From c4c56d63d5f08c9da9e41bba347b51d1cf11c0fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E5=85=89=E6=98=A5?= Date: Tue, 5 Sep 2023 10:18:33 +0800 Subject: [PATCH] - update vendor --- go.mod | 30 +- go.sum | 631 +-- vendor/github.com/fatih/color/LICENSE.md | 20 - vendor/github.com/fatih/color/README.md | 176 - vendor/github.com/fatih/color/color.go | 616 --- .../github.com/fatih/color/color_windows.go | 19 - vendor/github.com/fatih/color/doc.go | 134 - .../fsnotify/fsnotify/.editorconfig | 12 - .../fsnotify/fsnotify/.gitattributes | 1 - .../github.com/fsnotify/fsnotify/.gitignore | 6 - vendor/github.com/fsnotify/fsnotify/.mailmap | 2 - .../github.com/fsnotify/fsnotify/CHANGELOG.md | 470 --- .../fsnotify/fsnotify/CONTRIBUTING.md | 26 - vendor/github.com/fsnotify/fsnotify/LICENSE | 25 - vendor/github.com/fsnotify/fsnotify/README.md | 161 - .../fsnotify/fsnotify/backend_fen.go | 162 - .../fsnotify/fsnotify/backend_inotify.go | 459 --- .../fsnotify/fsnotify/backend_kqueue.go | 707 ---- .../fsnotify/fsnotify/backend_other.go | 66 - .../fsnotify/fsnotify/backend_windows.go | 746 ---- .../github.com/fsnotify/fsnotify/fsnotify.go | 81 - vendor/github.com/fsnotify/fsnotify/mkdoc.zsh | 208 - .../fsnotify/fsnotify/system_bsd.go | 8 - .../fsnotify/fsnotify/system_darwin.go | 9 - vendor/github.com/go-logr/logr/.golangci.yaml | 26 - vendor/github.com/go-logr/logr/CHANGELOG.md | 6 - .../github.com/go-logr/logr/CONTRIBUTING.md | 17 - vendor/github.com/go-logr/logr/LICENSE | 201 - vendor/github.com/go-logr/logr/README.md | 282 -- vendor/github.com/go-logr/logr/discard.go | 24 - vendor/github.com/go-logr/logr/funcr/funcr.go | 804 ---- vendor/github.com/go-logr/logr/logr.go | 550 --- vendor/github.com/go-logr/stdr/LICENSE | 201 - vendor/github.com/go-logr/stdr/README.md | 6 - vendor/github.com/go-logr/stdr/stdr.go | 170 - .../go-playground/validator/v10/Makefile | 2 +- .../go-playground/validator/v10/README.md | 137 +- .../go-playground/validator/v10/baked_in.go | 2 +- .../go-playground/validator/v10/cache.go | 21 +- .../go-playground/validator/v10/doc.go | 2 +- .../go-playground/validator/v10/options.go | 16 + .../go-playground/validator/v10/util.go | 8 - .../go-playground/validator/v10/validator.go | 113 +- .../validator/v10/validator_instance.go | 32 +- vendor/github.com/gogf/gf/v2/LICENSE | 21 - .../gogf/gf/v2/container/garray/garray.go | 8 - .../gf/v2/container/garray/garray_func.go | 69 - .../v2/container/garray/garray_normal_any.go | 870 ----- .../v2/container/garray/garray_normal_int.go | 846 ----- .../v2/container/garray/garray_normal_str.go | 857 ----- .../v2/container/garray/garray_sorted_any.go | 842 ---- .../v2/container/garray/garray_sorted_int.go | 787 ---- .../v2/container/garray/garray_sorted_str.go | 800 ---- .../gogf/gf/v2/container/glist/glist.go | 572 --- .../gogf/gf/v2/container/gmap/gmap.go | 45 - .../container/gmap/gmap_hash_any_any_map.go | 563 --- .../container/gmap/gmap_hash_int_any_map.go | 564 --- .../container/gmap/gmap_hash_int_int_map.go | 533 --- .../container/gmap/gmap_hash_int_str_map.go | 533 --- .../container/gmap/gmap_hash_str_any_map.go | 550 --- .../container/gmap/gmap_hash_str_int_map.go | 537 --- .../container/gmap/gmap_hash_str_str_map.go | 526 --- .../gf/v2/container/gmap/gmap_list_map.go | 612 --- .../gf/v2/container/gmap/gmap_tree_map.go | 30 - .../gogf/gf/v2/container/gpool/gpool.go | 188 - .../gogf/gf/v2/container/gqueue/gqueue.go | 144 - .../gogf/gf/v2/container/gset/gset_any_set.go | 526 --- .../gogf/gf/v2/container/gset/gset_int_set.go | 489 --- .../gogf/gf/v2/container/gset/gset_str_set.go | 519 --- .../gogf/gf/v2/container/gtree/gtree.go | 10 - .../gf/v2/container/gtree/gtree_avltree.go | 816 ---- .../gogf/gf/v2/container/gtree/gtree_btree.go | 979 ----- .../v2/container/gtree/gtree_redblacktree.go | 991 ----- .../gogf/gf/v2/container/gtype/gtype.go | 14 - .../gogf/gf/v2/container/gtype/gtype_bool.go | 106 - .../gogf/gf/v2/container/gtype/gtype_byte.go | 85 - .../gogf/gf/v2/container/gtype/gtype_bytes.go | 96 - .../gf/v2/container/gtype/gtype_float32.go | 97 - .../gf/v2/container/gtype/gtype_float64.go | 97 - .../gogf/gf/v2/container/gtype/gtype_int.go | 85 - .../gogf/gf/v2/container/gtype/gtype_int32.go | 85 - .../gogf/gf/v2/container/gtype/gtype_int64.go | 85 - .../gf/v2/container/gtype/gtype_interface.go | 82 - .../gf/v2/container/gtype/gtype_string.go | 80 - .../gogf/gf/v2/container/gtype/gtype_uint.go | 85 - .../gf/v2/container/gtype/gtype_uint32.go | 85 - .../gf/v2/container/gtype/gtype_uint64.go | 85 - .../gogf/gf/v2/container/gvar/gvar.go | 205 - .../gogf/gf/v2/container/gvar/gvar_is.go | 51 - .../gogf/gf/v2/container/gvar/gvar_list.go | 25 - .../gogf/gf/v2/container/gvar/gvar_map.go | 91 - .../gogf/gf/v2/container/gvar/gvar_scan.go | 19 - .../gogf/gf/v2/container/gvar/gvar_slice.go | 77 - .../gogf/gf/v2/container/gvar/gvar_struct.go | 23 - .../gogf/gf/v2/container/gvar/gvar_vars.go | 131 - .../gogf/gf/v2/database/gredis/gredis.go | 78 - .../gf/v2/database/gredis/gredis_adapter.go | 78 - .../gf/v2/database/gredis/gredis_config.go | 135 - .../gf/v2/database/gredis/gredis_instance.go | 44 - .../gf/v2/database/gredis/gredis_redis.go | 137 - .../gredis/gredis_redis_group_generic.go | 62 - .../gredis/gredis_redis_group_hash.go | 32 - .../gredis/gredis_redis_group_list.go | 43 - .../gredis/gredis_redis_group_pubsub.go | 40 - .../gredis/gredis_redis_group_script.go | 30 - .../database/gredis/gredis_redis_group_set.go | 33 - .../gredis/gredis_redis_group_sorted_set.go | 85 - .../gredis/gredis_redis_group_string.go | 63 - .../gogf/gf/v2/debug/gdebug/gdebug.go | 8 - .../gogf/gf/v2/debug/gdebug/gdebug_caller.go | 196 - .../gogf/gf/v2/debug/gdebug/gdebug_grid.go | 29 - .../gogf/gf/v2/debug/gdebug/gdebug_stack.go | 77 - .../gogf/gf/v2/debug/gdebug/gdebug_version.go | 57 - .../gogf/gf/v2/encoding/gbinary/gbinary.go | 134 - .../gogf/gf/v2/encoding/gbinary/gbinary_be.go | 287 -- .../gf/v2/encoding/gbinary/gbinary_bit.go | 74 - .../gf/v2/encoding/gbinary/gbinary_func.go | 7 - .../gogf/gf/v2/encoding/gbinary/gbinary_le.go | 287 -- .../gf/v2/encoding/gcompress/gcompress.go | 8 - .../v2/encoding/gcompress/gcompress_gzip.go | 135 - .../gf/v2/encoding/gcompress/gcompress_zip.go | 280 -- .../v2/encoding/gcompress/gcompress_zlib.go | 59 - .../gogf/gf/v2/encoding/ghash/ghash.go | 8 - .../gogf/gf/v2/encoding/ghash/ghash_ap.go | 33 - .../gogf/gf/v2/encoding/ghash/ghash_bkdr.go | 31 - .../gogf/gf/v2/encoding/ghash/ghash_djb.go | 25 - .../gogf/gf/v2/encoding/ghash/ghash_elf.go | 39 - .../gogf/gf/v2/encoding/ghash/ghash_jshash.go | 25 - .../gogf/gf/v2/encoding/ghash/ghash_pjw.go | 45 - .../gogf/gf/v2/encoding/ghash/ghash_rs.go | 35 - .../gogf/gf/v2/encoding/ghash/ghash_sdbm.go | 27 - .../gogf/gf/v2/errors/gcode/gcode.go | 71 - .../gogf/gf/v2/errors/gcode/gcode_local.go | 43 - .../gogf/gf/v2/errors/gerror/gerror.go | 79 - .../gogf/gf/v2/errors/gerror/gerror_api.go | 110 - .../gf/v2/errors/gerror/gerror_api_code.go | 139 - .../gf/v2/errors/gerror/gerror_api_option.go | 31 - .../gf/v2/errors/gerror/gerror_api_stack.go | 118 - .../gogf/gf/v2/errors/gerror/gerror_error.go | 146 - .../gf/v2/errors/gerror/gerror_error_code.go | 31 - .../v2/errors/gerror/gerror_error_format.go | 40 - .../gf/v2/errors/gerror/gerror_error_json.go | 13 - .../gf/v2/errors/gerror/gerror_error_stack.go | 171 - .../gogf/gf/v2/internal/command/command.go | 135 - .../gogf/gf/v2/internal/consts/consts.go | 21 - .../gogf/gf/v2/internal/deepcopy/deepcopy.go | 136 - .../gogf/gf/v2/internal/empty/empty.go | 235 -- .../gogf/gf/v2/internal/intlog/intlog.go | 125 - .../gogf/gf/v2/internal/json/json.go | 85 - .../gf/v2/internal/reflection/reflection.go | 94 - .../gogf/gf/v2/internal/rwmutex/rwmutex.go | 77 - .../gogf/gf/v2/internal/tracing/tracing.go | 49 - .../gogf/gf/v2/internal/utils/utils.go | 8 - .../gogf/gf/v2/internal/utils/utils_array.go | 26 - .../gogf/gf/v2/internal/utils/utils_debug.go | 42 - .../gogf/gf/v2/internal/utils/utils_io.go | 48 - .../gogf/gf/v2/internal/utils/utils_is.go | 100 - .../gogf/gf/v2/internal/utils/utils_list.go | 37 - .../gogf/gf/v2/internal/utils/utils_map.go | 37 - .../gogf/gf/v2/internal/utils/utils_str.go | 168 - .../github.com/gogf/gf/v2/net/gipv4/gipv4.go | 60 - .../gogf/gf/v2/net/gipv4/gipv4_ip.go | 145 - .../gogf/gf/v2/net/gipv4/gipv4_lookup.go | 52 - .../gogf/gf/v2/net/gipv4/gipv4_mac.go | 43 - .../gogf/gf/v2/net/gtrace/gtrace.go | 180 - .../gogf/gf/v2/net/gtrace/gtrace_baggage.go | 75 - .../gogf/gf/v2/net/gtrace/gtrace_carrier.go | 62 - .../gogf/gf/v2/net/gtrace/gtrace_span.go | 26 - .../gogf/gf/v2/net/gtrace/gtrace_tracer.go | 28 - .../net/gtrace/internal/provider/provider.go | 33 - .../internal/provider/provider_idgenerator.go | 33 - .../github.com/gogf/gf/v2/os/gcache/gcache.go | 240 -- .../gogf/gf/v2/os/gcache/gcache_adapter.go | 142 - .../gf/v2/os/gcache/gcache_adapter_memory.go | 476 --- .../os/gcache/gcache_adapter_memory_data.go | 206 - .../gcache_adapter_memory_expire_sets.go | 52 - .../gcache_adapter_memory_expire_times.go | 41 - .../os/gcache/gcache_adapter_memory_item.go | 19 - .../v2/os/gcache/gcache_adapter_memory_lru.go | 100 - .../gf/v2/os/gcache/gcache_adapter_redis.go | 438 --- .../gogf/gf/v2/os/gcache/gcache_cache.go | 70 - .../gogf/gf/v2/os/gcache/gcache_cache_must.go | 113 - .../github.com/gogf/gf/v2/os/gcron/gcron.go | 122 - .../gogf/gf/v2/os/gcron/gcron_cron.go | 221 -- .../gogf/gf/v2/os/gcron/gcron_entry.go | 195 - .../gogf/gf/v2/os/gcron/gcron_schedule.go | 412 -- .../gogf/gf/v2/os/gcron/gcron_schedule_fix.go | 47 - vendor/github.com/gogf/gf/v2/os/gctx/gctx.go | 81 - .../gogf/gf/v2/os/gctx/gctx_never_done.go | 38 - .../github.com/gogf/gf/v2/os/gfile/gfile.go | 449 --- .../gogf/gf/v2/os/gfile/gfile_cache.go | 87 - .../gogf/gf/v2/os/gfile/gfile_contents.go | 213 -- .../gogf/gf/v2/os/gfile/gfile_copy.go | 138 - .../gogf/gf/v2/os/gfile/gfile_home.go | 82 - .../gogf/gf/v2/os/gfile/gfile_replace.go | 58 - .../gogf/gf/v2/os/gfile/gfile_scan.go | 184 - .../gogf/gf/v2/os/gfile/gfile_search.go | 58 - .../gogf/gf/v2/os/gfile/gfile_size.go | 131 - .../gogf/gf/v2/os/gfile/gfile_sort.go | 40 - .../gogf/gf/v2/os/gfile/gfile_source.go | 91 - .../gogf/gf/v2/os/gfile/gfile_time.go | 39 - .../github.com/gogf/gf/v2/os/gfpool/gfpool.go | 41 - .../gogf/gf/v2/os/gfpool/gfpool_file.go | 77 - .../gogf/gf/v2/os/gfpool/gfpool_pool.go | 122 - .../gogf/gf/v2/os/gfsnotify/gfsnotify.go | 170 - .../gf/v2/os/gfsnotify/gfsnotify_event.go | 37 - .../gf/v2/os/gfsnotify/gfsnotify_filefunc.go | 134 - .../gf/v2/os/gfsnotify/gfsnotify_watcher.go | 198 - .../v2/os/gfsnotify/gfsnotify_watcher_loop.go | 186 - vendor/github.com/gogf/gf/v2/os/glog/glog.go | 75 - .../github.com/gogf/gf/v2/os/glog/glog_api.go | 109 - .../gogf/gf/v2/os/glog/glog_chaining.go | 98 - .../gogf/gf/v2/os/glog/glog_config.go | 161 - .../gogf/gf/v2/os/glog/glog_instance.go | 31 - .../gogf/gf/v2/os/glog/glog_logger.go | 415 -- .../gogf/gf/v2/os/glog/glog_logger_api.go | 146 - .../gf/v2/os/glog/glog_logger_chaining.go | 223 -- .../gogf/gf/v2/os/glog/glog_logger_color.go | 53 - .../gogf/gf/v2/os/glog/glog_logger_config.go | 293 -- .../gogf/gf/v2/os/glog/glog_logger_handler.go | 142 - .../gf/v2/os/glog/glog_logger_handler_json.go | 48 - .../gogf/gf/v2/os/glog/glog_logger_level.go | 111 - .../gogf/gf/v2/os/glog/glog_logger_rotate.go | 308 -- .../gogf/gf/v2/os/glog/glog_logger_writer.go | 19 - .../github.com/gogf/gf/v2/os/gmlock/gmlock.go | 89 - .../gogf/gf/v2/os/gmlock/gmlock_locker.go | 134 - .../github.com/gogf/gf/v2/os/grpool/grpool.go | 193 - .../gogf/gf/v2/os/grpool/grpool_supervisor.go | 30 - .../gogf/gf/v2/os/gstructs/gstructs.go | 62 - .../gogf/gf/v2/os/gstructs/gstructs_field.go | 232 -- .../gf/v2/os/gstructs/gstructs_field_tag.go | 90 - .../gogf/gf/v2/os/gstructs/gstructs_tag.go | 225 -- .../gogf/gf/v2/os/gstructs/gstructs_type.go | 75 - .../github.com/gogf/gf/v2/os/gtime/gtime.go | 452 --- .../gogf/gf/v2/os/gtime/gtime_format.go | 288 -- .../gogf/gf/v2/os/gtime/gtime_sql.go | 28 - .../gogf/gf/v2/os/gtime/gtime_time.go | 554 --- .../gogf/gf/v2/os/gtime/gtime_time_wrapper.go | 29 - .../gogf/gf/v2/os/gtime/gtime_time_zone.go | 120 - .../github.com/gogf/gf/v2/os/gtimer/gtimer.go | 161 - .../gogf/gf/v2/os/gtimer/gtimer_entry.go | 147 - .../gogf/gf/v2/os/gtimer/gtimer_exit.go | 15 - .../gogf/gf/v2/os/gtimer/gtimer_queue.go | 84 - .../gogf/gf/v2/os/gtimer/gtimer_queue_heap.go | 42 - .../gogf/gf/v2/os/gtimer/gtimer_timer.go | 208 - .../gogf/gf/v2/os/gtimer/gtimer_timer_loop.go | 67 - .../gogf/gf/v2/text/gregex/gregex.go | 149 - .../gogf/gf/v2/text/gregex/gregex_cache.go | 50 - .../github.com/gogf/gf/v2/text/gstr/gstr.go | 17 - .../gogf/gf/v2/text/gstr/gstr_array.go | 31 - .../gogf/gf/v2/text/gstr/gstr_case.go | 184 - .../gogf/gf/v2/text/gstr/gstr_compare.go | 21 - .../gogf/gf/v2/text/gstr/gstr_contain.go | 24 - .../gogf/gf/v2/text/gstr/gstr_convert.go | 265 -- .../gogf/gf/v2/text/gstr/gstr_count.go | 63 - .../gogf/gf/v2/text/gstr/gstr_create.go | 14 - .../gogf/gf/v2/text/gstr/gstr_domain.go | 56 - .../gogf/gf/v2/text/gstr/gstr_is.go | 14 - .../gogf/gf/v2/text/gstr/gstr_length.go | 14 - .../gogf/gf/v2/text/gstr/gstr_parse.go | 181 - .../gogf/gf/v2/text/gstr/gstr_pos.go | 140 - .../gogf/gf/v2/text/gstr/gstr_replace.go | 94 - .../gogf/gf/v2/text/gstr/gstr_similar.go | 158 - .../gogf/gf/v2/text/gstr/gstr_slashes.go | 54 - .../gogf/gf/v2/text/gstr/gstr_split_join.go | 83 - .../gogf/gf/v2/text/gstr/gstr_sub.go | 199 - .../gogf/gf/v2/text/gstr/gstr_trim.go | 114 - .../gogf/gf/v2/text/gstr/gstr_upper_lower.go | 54 - .../gogf/gf/v2/text/gstr/gstr_version.go | 189 - .../github.com/gogf/gf/v2/util/gconv/gconv.go | 286 -- .../gogf/gf/v2/util/gconv/gconv_convert.go | 320 -- .../gogf/gf/v2/util/gconv/gconv_converter.go | 155 - .../gogf/gf/v2/util/gconv/gconv_float.go | 55 - .../gogf/gf/v2/util/gconv/gconv_int.go | 136 - .../gogf/gf/v2/util/gconv/gconv_interface.go | 112 - .../gogf/gf/v2/util/gconv/gconv_map.go | 531 --- .../gogf/gf/v2/util/gconv/gconv_maps.go | 119 - .../gogf/gf/v2/util/gconv/gconv_maptomap.go | 149 - .../gogf/gf/v2/util/gconv/gconv_maptomaps.go | 141 - .../gogf/gf/v2/util/gconv/gconv_ptr.go | 96 - .../gogf/gf/v2/util/gconv/gconv_scan.go | 525 --- .../gogf/gf/v2/util/gconv/gconv_slice_any.go | 130 - .../gf/v2/util/gconv/gconv_slice_float.go | 282 -- .../gogf/gf/v2/util/gconv/gconv_slice_int.go | 416 -- .../gogf/gf/v2/util/gconv/gconv_slice_str.go | 144 - .../gogf/gf/v2/util/gconv/gconv_slice_uint.go | 436 --- .../gogf/gf/v2/util/gconv/gconv_struct.go | 656 ---- .../gogf/gf/v2/util/gconv/gconv_structs.go | 172 - .../gogf/gf/v2/util/gconv/gconv_time.go | 84 - .../gogf/gf/v2/util/gconv/gconv_uint.go | 119 - .../gogf/gf/v2/util/gconv/gconv_unsafe.go | 23 - .../github.com/gogf/gf/v2/util/grand/grand.go | 195 - .../gogf/gf/v2/util/grand/grand_buffer.go | 53 - .../github.com/gogf/gf/v2/util/gtag/gtag.go | 49 - .../gogf/gf/v2/util/gtag/gtag_enums.go | 37 - .../gogf/gf/v2/util/gtag/gtag_func.go | 65 - .../github.com/gogf/gf/v2/util/gutil/gutil.go | 160 - .../gogf/gf/v2/util/gutil/gutil_comparator.go | 127 - .../gogf/gf/v2/util/gutil/gutil_copy.go | 20 - .../gogf/gf/v2/util/gutil/gutil_default.go | 27 - .../gogf/gf/v2/util/gutil/gutil_dump.go | 484 --- .../gogf/gf/v2/util/gutil/gutil_list.go | 140 - .../gogf/gf/v2/util/gutil/gutil_map.go | 115 - .../gogf/gf/v2/util/gutil/gutil_reflect.go | 26 - .../gogf/gf/v2/util/gutil/gutil_slice.go | 118 - .../gogf/gf/v2/util/gutil/gutil_struct.go | 38 - vendor/github.com/lib/pq/.gitignore | 6 - vendor/github.com/lib/pq/LICENSE.md | 8 - vendor/github.com/lib/pq/README.md | 36 - vendor/github.com/lib/pq/TESTS.md | 33 - vendor/github.com/lib/pq/array.go | 895 ----- vendor/github.com/lib/pq/buf.go | 91 - vendor/github.com/lib/pq/conn.go | 2112 ----------- vendor/github.com/lib/pq/conn_go115.go | 8 - vendor/github.com/lib/pq/conn_go18.go | 247 -- vendor/github.com/lib/pq/connector.go | 120 - vendor/github.com/lib/pq/copy.go | 348 -- vendor/github.com/lib/pq/doc.go | 268 -- vendor/github.com/lib/pq/encode.go | 632 --- vendor/github.com/lib/pq/error.go | 523 --- vendor/github.com/lib/pq/krb.go | 27 - vendor/github.com/lib/pq/notice.go | 72 - vendor/github.com/lib/pq/notify.go | 858 ----- vendor/github.com/lib/pq/oid/doc.go | 6 - vendor/github.com/lib/pq/oid/types.go | 343 -- vendor/github.com/lib/pq/rows.go | 93 - vendor/github.com/lib/pq/scram/scram.go | 264 -- vendor/github.com/lib/pq/ssl.go | 204 - vendor/github.com/lib/pq/ssl_permissions.go | 93 - vendor/github.com/lib/pq/ssl_windows.go | 10 - vendor/github.com/lib/pq/url.go | 76 - vendor/github.com/lib/pq/user_other.go | 10 - vendor/github.com/lib/pq/user_posix.go | 25 - vendor/github.com/lib/pq/user_windows.go | 27 - vendor/github.com/lib/pq/uuid.go | 23 - vendor/github.com/mattn/go-colorable/LICENSE | 21 - .../github.com/mattn/go-colorable/README.md | 48 - .../mattn/go-colorable/colorable_appengine.go | 38 - .../mattn/go-colorable/colorable_others.go | 38 - .../mattn/go-colorable/colorable_windows.go | 1047 ----- .../mattn/go-colorable/noncolorable.go | 57 - .../pelletier/go-toml/v2/.goreleaser.yaml | 3 + .../github.com/pelletier/go-toml/v2/LICENSE | 3 +- .../github.com/pelletier/go-toml/v2/README.md | 44 +- .../github.com/pelletier/go-toml/v2/decode.go | 2 +- .../pelletier/go-toml/v2/marshaler.go | 36 +- .../pelletier/go-toml/v2/unmarshaler.go | 12 +- .../pelletier/go-toml/v2/unstable/parser.go | 6 + .../github.com/qiniu/go-sdk/v7/CHANGELOG.md | 6 + vendor/github.com/qiniu/go-sdk/v7/README.md | 2 +- .../github.com/qiniu/go-sdk/v7/conf/conf.go | 2 +- .../go-sdk/v7/internal/clientv2/client.go | 34 +- .../clientv2/interceptor_retry_simple.go | 16 +- .../qiniu/go-sdk/v7/storage/region.go | 25 +- .../github.com/qiniu/go-sdk/v7/storage/uc.go | 1 + .../qiniu/go-sdk/v7/storage/zone.go | 3 - vendor/github.com/syndtr/goleveldb/LICENSE | 24 - .../syndtr/goleveldb/leveldb/batch.go | 349 -- .../syndtr/goleveldb/leveldb/cache/cache.go | 704 ---- .../syndtr/goleveldb/leveldb/cache/lru.go | 195 - .../syndtr/goleveldb/leveldb/comparer.go | 67 - .../leveldb/comparer/bytes_comparer.go | 51 - .../goleveldb/leveldb/comparer/comparer.go | 57 - .../github.com/syndtr/goleveldb/leveldb/db.go | 1179 ------ .../syndtr/goleveldb/leveldb/db_compaction.go | 854 ----- .../syndtr/goleveldb/leveldb/db_iter.go | 360 -- .../syndtr/goleveldb/leveldb/db_snapshot.go | 187 - .../syndtr/goleveldb/leveldb/db_state.go | 239 -- .../goleveldb/leveldb/db_transaction.go | 329 -- .../syndtr/goleveldb/leveldb/db_util.go | 102 - .../syndtr/goleveldb/leveldb/db_write.go | 464 --- .../syndtr/goleveldb/leveldb/doc.go | 92 - .../syndtr/goleveldb/leveldb/errors.go | 20 - .../syndtr/goleveldb/leveldb/errors/errors.go | 78 - .../syndtr/goleveldb/leveldb/filter.go | 31 - .../syndtr/goleveldb/leveldb/filter/bloom.go | 116 - .../syndtr/goleveldb/leveldb/filter/filter.go | 60 - .../goleveldb/leveldb/iterator/array_iter.go | 184 - .../leveldb/iterator/indexed_iter.go | 242 -- .../syndtr/goleveldb/leveldb/iterator/iter.go | 132 - .../goleveldb/leveldb/iterator/merged_iter.go | 304 -- .../goleveldb/leveldb/journal/journal.go | 524 --- .../syndtr/goleveldb/leveldb/key.go | 143 - .../syndtr/goleveldb/leveldb/memdb/memdb.go | 479 --- .../syndtr/goleveldb/leveldb/opt/options.go | 697 ---- .../syndtr/goleveldb/leveldb/options.go | 107 - .../syndtr/goleveldb/leveldb/session.go | 210 - .../goleveldb/leveldb/session_compaction.go | 302 -- .../goleveldb/leveldb/session_record.go | 323 -- .../syndtr/goleveldb/leveldb/session_util.go | 271 -- .../syndtr/goleveldb/leveldb/storage.go | 63 - .../goleveldb/leveldb/storage/file_storage.go | 671 ---- .../leveldb/storage/file_storage_nacl.go | 34 - .../leveldb/storage/file_storage_plan9.go | 63 - .../leveldb/storage/file_storage_solaris.go | 81 - .../leveldb/storage/file_storage_unix.go | 98 - .../leveldb/storage/file_storage_windows.go | 78 - .../goleveldb/leveldb/storage/mem_storage.go | 222 -- .../goleveldb/leveldb/storage/storage.go | 187 - .../syndtr/goleveldb/leveldb/table.go | 531 --- .../syndtr/goleveldb/leveldb/table/reader.go | 1139 ------ .../syndtr/goleveldb/leveldb/table/table.go | 177 - .../syndtr/goleveldb/leveldb/table/writer.go | 375 -- .../syndtr/goleveldb/leveldb/util.go | 98 - .../syndtr/goleveldb/leveldb/util/buffer.go | 293 -- .../goleveldb/leveldb/util/buffer_pool.go | 239 -- .../syndtr/goleveldb/leveldb/util/crc32.go | 30 - .../syndtr/goleveldb/leveldb/util/hash.go | 48 - .../syndtr/goleveldb/leveldb/util/range.go | 32 - .../syndtr/goleveldb/leveldb/util/util.go | 73 - .../syndtr/goleveldb/leveldb/version.go | 528 --- .../go.opentelemetry.io/otel/.codespellignore | 5 - vendor/go.opentelemetry.io/otel/.codespellrc | 10 - .../go.opentelemetry.io/otel/.gitattributes | 3 - vendor/go.opentelemetry.io/otel/.gitignore | 24 - vendor/go.opentelemetry.io/otel/.gitmodules | 3 - vendor/go.opentelemetry.io/otel/.golangci.yml | 246 -- vendor/go.opentelemetry.io/otel/.lycheeignore | 6 - .../otel/.markdownlint.yaml | 29 - vendor/go.opentelemetry.io/otel/CHANGELOG.md | 2567 ------------- vendor/go.opentelemetry.io/otel/CODEOWNERS | 17 - .../go.opentelemetry.io/otel/CONTRIBUTING.md | 562 --- vendor/go.opentelemetry.io/otel/LICENSE | 201 - vendor/go.opentelemetry.io/otel/Makefile | 269 -- vendor/go.opentelemetry.io/otel/README.md | 109 - vendor/go.opentelemetry.io/otel/RELEASING.md | 126 - vendor/go.opentelemetry.io/otel/VERSIONING.md | 224 -- .../go.opentelemetry.io/otel/attribute/doc.go | 16 - .../otel/attribute/encoder.go | 146 - .../otel/attribute/iterator.go | 161 - .../go.opentelemetry.io/otel/attribute/key.go | 134 - .../go.opentelemetry.io/otel/attribute/kv.go | 86 - .../go.opentelemetry.io/otel/attribute/set.go | 436 --- .../otel/attribute/type_string.go | 31 - .../otel/attribute/value.go | 270 -- .../otel/baggage/baggage.go | 562 --- .../otel/baggage/context.go | 39 - .../go.opentelemetry.io/otel/baggage/doc.go | 20 - .../go.opentelemetry.io/otel/codes/codes.go | 116 - vendor/go.opentelemetry.io/otel/codes/doc.go | 21 - vendor/go.opentelemetry.io/otel/doc.go | 34 - .../go.opentelemetry.io/otel/error_handler.go | 38 - vendor/go.opentelemetry.io/otel/handler.go | 48 - .../otel/internal/attribute/attribute.go | 111 - .../otel/internal/baggage/baggage.go | 43 - .../otel/internal/baggage/context.go | 92 - .../otel/internal/global/handler.go | 103 - .../otel/internal/global/instruments.go | 359 -- .../otel/internal/global/internal_logging.go | 70 - .../otel/internal/global/meter.go | 354 -- .../otel/internal/global/propagator.go | 82 - .../otel/internal/global/state.go | 156 - .../otel/internal/global/trace.go | 192 - .../otel/internal/rawhelpers.go | 55 - .../otel/internal_logging.go | 26 - vendor/go.opentelemetry.io/otel/metric.go | 53 - .../go.opentelemetry.io/otel/metric/LICENSE | 201 - .../otel/metric/asyncfloat64.go | 271 -- .../otel/metric/asyncint64.go | 269 -- .../go.opentelemetry.io/otel/metric/config.go | 92 - vendor/go.opentelemetry.io/otel/metric/doc.go | 170 - .../otel/metric/embedded/embedded.go | 234 -- .../otel/metric/instrument.go | 332 -- .../go.opentelemetry.io/otel/metric/meter.go | 210 - .../otel/metric/syncfloat64.go | 179 - .../otel/metric/syncint64.go | 179 - .../go.opentelemetry.io/otel/propagation.go | 31 - .../otel/propagation/baggage.go | 58 - .../otel/propagation/doc.go | 24 - .../otel/propagation/propagation.go | 153 - .../otel/propagation/trace_context.go | 159 - .../go.opentelemetry.io/otel/requirements.txt | 1 - vendor/go.opentelemetry.io/otel/sdk/LICENSE | 201 - .../otel/sdk/instrumentation/doc.go | 24 - .../otel/sdk/instrumentation/library.go | 19 - .../otel/sdk/instrumentation/scope.go | 26 - .../otel/sdk/internal/env/env.go | 177 - .../otel/sdk/internal/internal.go | 28 - .../otel/sdk/resource/auto.go | 110 - .../otel/sdk/resource/builtin.go | 108 - .../otel/sdk/resource/config.go | 206 - .../otel/sdk/resource/container.go | 100 - .../otel/sdk/resource/doc.go | 31 - .../otel/sdk/resource/env.go | 108 - .../otel/sdk/resource/host_id.go | 120 - .../otel/sdk/resource/host_id_bsd.go | 23 - .../otel/sdk/resource/host_id_darwin.go | 19 - .../otel/sdk/resource/host_id_exec.go | 29 - .../otel/sdk/resource/host_id_linux.go | 22 - .../otel/sdk/resource/host_id_readfile.go | 28 - .../otel/sdk/resource/host_id_unsupported.go | 36 - .../otel/sdk/resource/host_id_windows.go | 48 - .../otel/sdk/resource/os.go | 97 - .../otel/sdk/resource/os_release_darwin.go | 102 - .../otel/sdk/resource/os_release_unix.go | 154 - .../otel/sdk/resource/os_unix.go | 90 - .../otel/sdk/resource/os_unsupported.go | 34 - .../otel/sdk/resource/os_windows.go | 101 - .../otel/sdk/resource/process.go | 180 - .../otel/sdk/resource/resource.go | 272 -- .../otel/sdk/trace/batch_span_processor.go | 432 --- .../go.opentelemetry.io/otel/sdk/trace/doc.go | 21 - .../otel/sdk/trace/event.go | 37 - .../otel/sdk/trace/evictedqueue.go | 44 - .../otel/sdk/trace/id_generator.go | 77 - .../otel/sdk/trace/link.go | 34 - .../otel/sdk/trace/provider.go | 500 --- .../otel/sdk/trace/sampler_env.go | 108 - .../otel/sdk/trace/sampling.go | 293 -- .../otel/sdk/trace/simple_span_processor.go | 131 - .../otel/sdk/trace/snapshot.go | 144 - .../otel/sdk/trace/span.go | 828 ---- .../otel/sdk/trace/span_exporter.go | 47 - .../otel/sdk/trace/span_limits.go | 125 - .../otel/sdk/trace/span_processor.go | 72 - .../otel/sdk/trace/tracer.go | 161 - .../otel/sdk/trace/version.go | 20 - .../go.opentelemetry.io/otel/sdk/version.go | 20 - .../otel/semconv/internal/http.go | 338 -- .../otel/semconv/v1.17.0/doc.go | 20 - .../otel/semconv/v1.17.0/event.go | 199 - .../otel/semconv/v1.17.0/exception.go | 20 - .../otel/semconv/v1.17.0/http.go | 21 - .../otel/semconv/v1.17.0/resource.go | 2010 ---------- .../otel/semconv/v1.17.0/schema.go | 20 - .../otel/semconv/v1.17.0/trace.go | 3375 ----------------- .../otel/semconv/v1.4.0/doc.go | 20 - .../otel/semconv/v1.4.0/exception.go | 20 - .../otel/semconv/v1.4.0/http.go | 114 - .../otel/semconv/v1.4.0/resource.go | 906 ----- .../otel/semconv/v1.4.0/schema.go | 20 - .../otel/semconv/v1.4.0/trace.go | 1378 ------- vendor/go.opentelemetry.io/otel/trace.go | 47 - vendor/go.opentelemetry.io/otel/trace/LICENSE | 201 - .../go.opentelemetry.io/otel/trace/config.go | 333 -- .../go.opentelemetry.io/otel/trace/context.go | 61 - vendor/go.opentelemetry.io/otel/trace/doc.go | 66 - .../otel/trace/nonrecording.go | 27 - vendor/go.opentelemetry.io/otel/trace/noop.go | 89 - .../go.opentelemetry.io/otel/trace/trace.go | 551 --- .../otel/trace/tracestate.go | 212 -- vendor/go.opentelemetry.io/otel/version.go | 20 - vendor/go.opentelemetry.io/otel/versions.yaml | 57 - vendor/golang.org/x/arch/x86/x86asm/gnu.go | 2 +- vendor/golang.org/x/arch/x86/x86asm/inst.go | 2 +- vendor/golang.org/x/sys/cpu/cpu.go | 5 +- vendor/golang.org/x/sys/cpu/cpu_x86.go | 7 + vendor/golang.org/x/sys/unix/syscall_linux.go | 23 + vendor/golang.org/x/sys/unix/syscall_unix.go | 3 + vendor/golang.org/x/sys/unix/zerrors_linux.go | 17 + .../golang.org/x/sys/unix/zsyscall_linux.go | 20 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 15 + .../golang.org/x/sys/windows/registry/key.go | 206 - .../x/sys/windows/registry/mksyscall.go | 10 - .../x/sys/windows/registry/syscall.go | 33 - .../x/sys/windows/registry/value.go | 387 -- .../sys/windows/registry/zsyscall_windows.go | 117 - .../x/sys/windows/syscall_windows.go | 11 +- .../x/sys/windows/zsyscall_windows.go | 26 +- vendor/golang.org/x/text/unicode/norm/trie.go | 2 +- vendor/modules.txt | 148 +- vendor/xorm.io/builder/.gitignore | 1 - vendor/xorm.io/builder/LICENSE | 27 - vendor/xorm.io/builder/README.md | 217 -- vendor/xorm.io/builder/as.go | 10 - vendor/xorm.io/builder/builder.go | 330 -- vendor/xorm.io/builder/builder_delete.go | 27 - vendor/xorm.io/builder/builder_insert.go | 149 - vendor/xorm.io/builder/builder_join.go | 85 - vendor/xorm.io/builder/builder_limit.go | 102 - vendor/xorm.io/builder/builder_select.go | 169 - .../xorm.io/builder/builder_set_operations.go | 51 - vendor/xorm.io/builder/builder_update.go | 57 - vendor/xorm.io/builder/cond.go | 38 - vendor/xorm.io/builder/cond_and.go | 61 - vendor/xorm.io/builder/cond_between.go | 65 - vendor/xorm.io/builder/cond_compare.go | 160 - vendor/xorm.io/builder/cond_eq.go | 117 - vendor/xorm.io/builder/cond_exists.go | 49 - vendor/xorm.io/builder/cond_if.go | 49 - vendor/xorm.io/builder/cond_in.go | 326 -- vendor/xorm.io/builder/cond_like.go | 41 - vendor/xorm.io/builder/cond_neq.go | 94 - vendor/xorm.io/builder/cond_not.go | 77 - vendor/xorm.io/builder/cond_not_exists.go | 49 - vendor/xorm.io/builder/cond_notin.go | 323 -- vendor/xorm.io/builder/cond_null.go | 59 - vendor/xorm.io/builder/cond_or.go | 69 - vendor/xorm.io/builder/doc.go | 120 - vendor/xorm.io/builder/error.go | 40 - vendor/xorm.io/builder/expr.go | 57 - vendor/xorm.io/builder/sql.go | 168 - vendor/xorm.io/builder/writer.go | 42 - vendor/xorm.io/xorm/.changelog.yml | 53 - vendor/xorm.io/xorm/.drone.yml | 437 --- vendor/xorm.io/xorm/.gitignore | 40 - vendor/xorm.io/xorm/.golangci.yml | 24 - vendor/xorm.io/xorm/CHANGELOG.md | 393 -- vendor/xorm.io/xorm/CONTRIBUTING.md | 87 - vendor/xorm.io/xorm/LICENSE | 27 - vendor/xorm.io/xorm/Makefile | 281 -- vendor/xorm.io/xorm/README.md | 528 --- vendor/xorm.io/xorm/README_CN.md | 520 --- vendor/xorm.io/xorm/caches/cache.go | 99 - vendor/xorm.io/xorm/caches/encode.go | 65 - vendor/xorm.io/xorm/caches/leveldb.go | 99 - vendor/xorm.io/xorm/caches/lru.go | 278 -- vendor/xorm.io/xorm/caches/manager.go | 60 - vendor/xorm.io/xorm/caches/memory_store.go | 49 - vendor/xorm.io/xorm/contexts/context_cache.go | 30 - vendor/xorm.io/xorm/contexts/hook.go | 81 - vendor/xorm.io/xorm/convert/bool.go | 51 - vendor/xorm.io/xorm/convert/conversion.go | 389 -- vendor/xorm.io/xorm/convert/float.go | 142 - vendor/xorm.io/xorm/convert/int.go | 178 - vendor/xorm.io/xorm/convert/interface.go | 49 - vendor/xorm.io/xorm/convert/scanner.go | 19 - vendor/xorm.io/xorm/convert/string.go | 75 - vendor/xorm.io/xorm/convert/time.go | 127 - vendor/xorm.io/xorm/core/db.go | 308 -- vendor/xorm.io/xorm/core/error.go | 14 - vendor/xorm.io/xorm/core/interface.go | 22 - vendor/xorm.io/xorm/core/rows.go | 346 -- vendor/xorm.io/xorm/core/scan.go | 70 - vendor/xorm.io/xorm/core/stmt.go | 213 -- vendor/xorm.io/xorm/core/tx.go | 238 -- vendor/xorm.io/xorm/dialects/dameng.go | 1201 ------ vendor/xorm.io/xorm/dialects/dialect.go | 364 -- vendor/xorm.io/xorm/dialects/driver.go | 85 - vendor/xorm.io/xorm/dialects/filter.go | 76 - vendor/xorm.io/xorm/dialects/mssql.go | 733 ---- vendor/xorm.io/xorm/dialects/mysql.go | 823 ---- vendor/xorm.io/xorm/dialects/oracle.go | 934 ----- vendor/xorm.io/xorm/dialects/pg_reserved.txt | 746 ---- vendor/xorm.io/xorm/dialects/postgres.go | 1556 -------- vendor/xorm.io/xorm/dialects/quote.go | 15 - vendor/xorm.io/xorm/dialects/sqlite3.go | 578 --- vendor/xorm.io/xorm/dialects/table_name.go | 93 - vendor/xorm.io/xorm/dialects/time.go | 63 - vendor/xorm.io/xorm/doc.go | 250 -- vendor/xorm.io/xorm/engine.go | 1433 ------- vendor/xorm.io/xorm/engine_group.go | 267 -- vendor/xorm.io/xorm/engine_group_policy.go | 118 - vendor/xorm.io/xorm/error.go | 26 - vendor/xorm.io/xorm/interface.go | 134 - vendor/xorm.io/xorm/internal/json/gojson.go | 28 - vendor/xorm.io/xorm/internal/json/json.go | 31 - vendor/xorm.io/xorm/internal/json/jsoniter.go | 28 - .../xorm.io/xorm/internal/statements/cache.go | 81 - .../xorm/internal/statements/column_map.go | 66 - .../xorm.io/xorm/internal/statements/cond.go | 111 - .../xorm.io/xorm/internal/statements/expr.go | 94 - .../xorm/internal/statements/insert.go | 299 -- .../xorm.io/xorm/internal/statements/join.go | 78 - .../xorm/internal/statements/order_by.go | 90 - vendor/xorm.io/xorm/internal/statements/pk.go | 98 - .../xorm.io/xorm/internal/statements/query.go | 499 --- .../xorm/internal/statements/select.go | 137 - .../xorm/internal/statements/statement.go | 700 ---- .../internal/statements/statement_args.go | 56 - .../xorm/internal/statements/table_name.go | 56 - .../xorm/internal/statements/update.go | 308 -- .../xorm/internal/statements/values.go | 171 - vendor/xorm.io/xorm/internal/utils/builder.go | 27 - vendor/xorm.io/xorm/internal/utils/name.go | 20 - vendor/xorm.io/xorm/internal/utils/new.go | 25 - vendor/xorm.io/xorm/internal/utils/reflect.go | 14 - vendor/xorm.io/xorm/internal/utils/slice.go | 32 - vendor/xorm.io/xorm/internal/utils/sql.go | 20 - vendor/xorm.io/xorm/internal/utils/strings.go | 32 - vendor/xorm.io/xorm/internal/utils/zero.go | 151 - vendor/xorm.io/xorm/log/logger.go | 208 - vendor/xorm.io/xorm/log/logger_context.go | 116 - vendor/xorm.io/xorm/log/syslogger.go | 88 - vendor/xorm.io/xorm/names/mapper.go | 281 -- vendor/xorm.io/xorm/names/table_name.go | 100 - vendor/xorm.io/xorm/processors.go | 144 - vendor/xorm.io/xorm/rows.go | 150 - vendor/xorm.io/xorm/scan.go | 439 --- vendor/xorm.io/xorm/schemas/column.go | 110 - vendor/xorm.io/xorm/schemas/index.go | 72 - vendor/xorm.io/xorm/schemas/pk.go | 46 - vendor/xorm.io/xorm/schemas/quote.go | 244 -- vendor/xorm.io/xorm/schemas/table.go | 178 - vendor/xorm.io/xorm/schemas/type.go | 362 -- vendor/xorm.io/xorm/schemas/version.go | 12 - vendor/xorm.io/xorm/session.go | 796 ---- vendor/xorm.io/xorm/session_cols.go | 143 - vendor/xorm.io/xorm/session_cond.go | 55 - vendor/xorm.io/xorm/session_delete.go | 257 -- vendor/xorm.io/xorm/session_exist.go | 32 - vendor/xorm.io/xorm/session_find.go | 483 --- vendor/xorm.io/xorm/session_get.go | 366 -- vendor/xorm.io/xorm/session_insert.go | 709 ---- vendor/xorm.io/xorm/session_iterate.go | 105 - vendor/xorm.io/xorm/session_raw.go | 197 - vendor/xorm.io/xorm/session_schema.go | 536 --- vendor/xorm.io/xorm/session_stats.go | 81 - vendor/xorm.io/xorm/session_tx.go | 91 - vendor/xorm.io/xorm/session_update.go | 553 --- vendor/xorm.io/xorm/tags/parser.go | 379 -- vendor/xorm.io/xorm/tags/tag.go | 398 -- 702 files changed, 462 insertions(+), 129582 deletions(-) delete mode 100644 vendor/github.com/fatih/color/LICENSE.md delete mode 100644 vendor/github.com/fatih/color/README.md delete mode 100644 vendor/github.com/fatih/color/color.go delete mode 100644 vendor/github.com/fatih/color/color_windows.go delete mode 100644 vendor/github.com/fatih/color/doc.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/.editorconfig delete mode 100644 vendor/github.com/fsnotify/fsnotify/.gitattributes delete mode 100644 vendor/github.com/fsnotify/fsnotify/.gitignore delete mode 100644 vendor/github.com/fsnotify/fsnotify/.mailmap delete mode 100644 vendor/github.com/fsnotify/fsnotify/CHANGELOG.md delete mode 100644 vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md delete mode 100644 vendor/github.com/fsnotify/fsnotify/LICENSE delete mode 100644 vendor/github.com/fsnotify/fsnotify/README.md delete mode 100644 vendor/github.com/fsnotify/fsnotify/backend_fen.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/backend_inotify.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/backend_kqueue.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/backend_other.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/backend_windows.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/fsnotify.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/mkdoc.zsh delete mode 100644 vendor/github.com/fsnotify/fsnotify/system_bsd.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/system_darwin.go delete mode 100644 vendor/github.com/go-logr/logr/.golangci.yaml delete mode 100644 vendor/github.com/go-logr/logr/CHANGELOG.md delete mode 100644 vendor/github.com/go-logr/logr/CONTRIBUTING.md delete mode 100644 vendor/github.com/go-logr/logr/LICENSE delete mode 100644 vendor/github.com/go-logr/logr/README.md delete mode 100644 vendor/github.com/go-logr/logr/discard.go delete mode 100644 vendor/github.com/go-logr/logr/funcr/funcr.go delete mode 100644 vendor/github.com/go-logr/logr/logr.go delete mode 100644 vendor/github.com/go-logr/stdr/LICENSE delete mode 100644 vendor/github.com/go-logr/stdr/README.md delete mode 100644 vendor/github.com/go-logr/stdr/stdr.go create mode 100644 vendor/github.com/go-playground/validator/v10/options.go delete mode 100644 vendor/github.com/gogf/gf/v2/LICENSE delete mode 100644 vendor/github.com/gogf/gf/v2/container/garray/garray.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/garray/garray_func.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/garray/garray_normal_any.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/garray/garray_normal_int.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/garray/garray_normal_str.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/garray/garray_sorted_any.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/garray/garray_sorted_int.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/garray/garray_sorted_str.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/glist/glist.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gmap/gmap.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_any_any_map.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_int_any_map.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_int_int_map.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_int_str_map.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_str_any_map.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_str_int_map.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_str_str_map.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gmap/gmap_list_map.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gmap/gmap_tree_map.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gpool/gpool.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gqueue/gqueue.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gset/gset_any_set.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gset/gset_int_set.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gset/gset_str_set.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gtree/gtree.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gtree/gtree_avltree.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gtree/gtree_btree.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gtree/gtree_redblacktree.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gtype/gtype.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gtype/gtype_bool.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gtype/gtype_byte.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gtype/gtype_bytes.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gtype/gtype_float32.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gtype/gtype_float64.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gtype/gtype_int.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gtype/gtype_int32.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gtype/gtype_int64.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gtype/gtype_interface.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gtype/gtype_string.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gtype/gtype_uint.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gtype/gtype_uint32.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gtype/gtype_uint64.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gvar/gvar.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gvar/gvar_is.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gvar/gvar_list.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gvar/gvar_map.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gvar/gvar_scan.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gvar/gvar_slice.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gvar/gvar_struct.go delete mode 100644 vendor/github.com/gogf/gf/v2/container/gvar/gvar_vars.go delete mode 100644 vendor/github.com/gogf/gf/v2/database/gredis/gredis.go delete mode 100644 vendor/github.com/gogf/gf/v2/database/gredis/gredis_adapter.go delete mode 100644 vendor/github.com/gogf/gf/v2/database/gredis/gredis_config.go delete mode 100644 vendor/github.com/gogf/gf/v2/database/gredis/gredis_instance.go delete mode 100644 vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis.go delete mode 100644 vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_generic.go delete mode 100644 vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_hash.go delete mode 100644 vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_list.go delete mode 100644 vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_pubsub.go delete mode 100644 vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_script.go delete mode 100644 vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_set.go delete mode 100644 vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_sorted_set.go delete mode 100644 vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_string.go delete mode 100644 vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug.go delete mode 100644 vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug_caller.go delete mode 100644 vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug_grid.go delete mode 100644 vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug_stack.go delete mode 100644 vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug_version.go delete mode 100644 vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary.go delete mode 100644 vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary_be.go delete mode 100644 vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary_bit.go delete mode 100644 vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary_func.go delete mode 100644 vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary_le.go delete mode 100644 vendor/github.com/gogf/gf/v2/encoding/gcompress/gcompress.go delete mode 100644 vendor/github.com/gogf/gf/v2/encoding/gcompress/gcompress_gzip.go delete mode 100644 vendor/github.com/gogf/gf/v2/encoding/gcompress/gcompress_zip.go delete mode 100644 vendor/github.com/gogf/gf/v2/encoding/gcompress/gcompress_zlib.go delete mode 100644 vendor/github.com/gogf/gf/v2/encoding/ghash/ghash.go delete mode 100644 vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_ap.go delete mode 100644 vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_bkdr.go delete mode 100644 vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_djb.go delete mode 100644 vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_elf.go delete mode 100644 vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_jshash.go delete mode 100644 vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_pjw.go delete mode 100644 vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_rs.go delete mode 100644 vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_sdbm.go delete mode 100644 vendor/github.com/gogf/gf/v2/errors/gcode/gcode.go delete mode 100644 vendor/github.com/gogf/gf/v2/errors/gcode/gcode_local.go delete mode 100644 vendor/github.com/gogf/gf/v2/errors/gerror/gerror.go delete mode 100644 vendor/github.com/gogf/gf/v2/errors/gerror/gerror_api.go delete mode 100644 vendor/github.com/gogf/gf/v2/errors/gerror/gerror_api_code.go delete mode 100644 vendor/github.com/gogf/gf/v2/errors/gerror/gerror_api_option.go delete mode 100644 vendor/github.com/gogf/gf/v2/errors/gerror/gerror_api_stack.go delete mode 100644 vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error.go delete mode 100644 vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error_code.go delete mode 100644 vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error_format.go delete mode 100644 vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error_json.go delete mode 100644 vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error_stack.go delete mode 100644 vendor/github.com/gogf/gf/v2/internal/command/command.go delete mode 100644 vendor/github.com/gogf/gf/v2/internal/consts/consts.go delete mode 100644 vendor/github.com/gogf/gf/v2/internal/deepcopy/deepcopy.go delete mode 100644 vendor/github.com/gogf/gf/v2/internal/empty/empty.go delete mode 100644 vendor/github.com/gogf/gf/v2/internal/intlog/intlog.go delete mode 100644 vendor/github.com/gogf/gf/v2/internal/json/json.go delete mode 100644 vendor/github.com/gogf/gf/v2/internal/reflection/reflection.go delete mode 100644 vendor/github.com/gogf/gf/v2/internal/rwmutex/rwmutex.go delete mode 100644 vendor/github.com/gogf/gf/v2/internal/tracing/tracing.go delete mode 100644 vendor/github.com/gogf/gf/v2/internal/utils/utils.go delete mode 100644 vendor/github.com/gogf/gf/v2/internal/utils/utils_array.go delete mode 100644 vendor/github.com/gogf/gf/v2/internal/utils/utils_debug.go delete mode 100644 vendor/github.com/gogf/gf/v2/internal/utils/utils_io.go delete mode 100644 vendor/github.com/gogf/gf/v2/internal/utils/utils_is.go delete mode 100644 vendor/github.com/gogf/gf/v2/internal/utils/utils_list.go delete mode 100644 vendor/github.com/gogf/gf/v2/internal/utils/utils_map.go delete mode 100644 vendor/github.com/gogf/gf/v2/internal/utils/utils_str.go delete mode 100644 vendor/github.com/gogf/gf/v2/net/gipv4/gipv4.go delete mode 100644 vendor/github.com/gogf/gf/v2/net/gipv4/gipv4_ip.go delete mode 100644 vendor/github.com/gogf/gf/v2/net/gipv4/gipv4_lookup.go delete mode 100644 vendor/github.com/gogf/gf/v2/net/gipv4/gipv4_mac.go delete mode 100644 vendor/github.com/gogf/gf/v2/net/gtrace/gtrace.go delete mode 100644 vendor/github.com/gogf/gf/v2/net/gtrace/gtrace_baggage.go delete mode 100644 vendor/github.com/gogf/gf/v2/net/gtrace/gtrace_carrier.go delete mode 100644 vendor/github.com/gogf/gf/v2/net/gtrace/gtrace_span.go delete mode 100644 vendor/github.com/gogf/gf/v2/net/gtrace/gtrace_tracer.go delete mode 100644 vendor/github.com/gogf/gf/v2/net/gtrace/internal/provider/provider.go delete mode 100644 vendor/github.com/gogf/gf/v2/net/gtrace/internal/provider/provider_idgenerator.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gcache/gcache.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_data.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_expire_sets.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_expire_times.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_item.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_lru.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_redis.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gcache/gcache_cache.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gcache/gcache_cache_must.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gcron/gcron.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gcron/gcron_cron.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gcron/gcron_entry.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gcron/gcron_schedule.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gcron/gcron_schedule_fix.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gctx/gctx.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gctx/gctx_never_done.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gfile/gfile.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gfile/gfile_cache.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gfile/gfile_contents.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gfile/gfile_copy.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gfile/gfile_home.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gfile/gfile_replace.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gfile/gfile_scan.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gfile/gfile_search.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gfile/gfile_size.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gfile/gfile_sort.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gfile/gfile_source.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gfile/gfile_time.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gfpool/gfpool.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gfpool/gfpool_file.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gfpool/gfpool_pool.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify_event.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify_filefunc.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify_watcher.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify_watcher_loop.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/glog/glog.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/glog/glog_api.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/glog/glog_chaining.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/glog/glog_config.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/glog/glog_instance.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/glog/glog_logger.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/glog/glog_logger_api.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/glog/glog_logger_chaining.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/glog/glog_logger_color.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/glog/glog_logger_config.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/glog/glog_logger_handler.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/glog/glog_logger_handler_json.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/glog/glog_logger_level.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/glog/glog_logger_rotate.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/glog/glog_logger_writer.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gmlock/gmlock.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gmlock/gmlock_locker.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/grpool/grpool.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/grpool/grpool_supervisor.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gstructs/gstructs.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gstructs/gstructs_field.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gstructs/gstructs_field_tag.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gstructs/gstructs_tag.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gstructs/gstructs_type.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gtime/gtime.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gtime/gtime_format.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gtime/gtime_sql.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gtime/gtime_time.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gtime/gtime_time_wrapper.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gtime/gtime_time_zone.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gtimer/gtimer.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_entry.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_exit.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_queue.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_queue_heap.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_timer.go delete mode 100644 vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_timer_loop.go delete mode 100644 vendor/github.com/gogf/gf/v2/text/gregex/gregex.go delete mode 100644 vendor/github.com/gogf/gf/v2/text/gregex/gregex_cache.go delete mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr.go delete mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_array.go delete mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_case.go delete mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_compare.go delete mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_contain.go delete mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_convert.go delete mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_count.go delete mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_create.go delete mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_domain.go delete mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_is.go delete mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_length.go delete mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_parse.go delete mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_pos.go delete mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_replace.go delete mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_similar.go delete mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_slashes.go delete mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_split_join.go delete mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_sub.go delete mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_trim.go delete mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_upper_lower.go delete mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_version.go delete mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv.go delete mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_convert.go delete mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_converter.go delete mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_float.go delete mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_int.go delete mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_interface.go delete mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_map.go delete mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_maps.go delete mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_maptomap.go delete mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_maptomaps.go delete mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_ptr.go delete mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_scan.go delete mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_any.go delete mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_float.go delete mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_int.go delete mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_str.go delete mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_uint.go delete mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_struct.go delete mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_structs.go delete mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_time.go delete mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_uint.go delete mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_unsafe.go delete mode 100644 vendor/github.com/gogf/gf/v2/util/grand/grand.go delete mode 100644 vendor/github.com/gogf/gf/v2/util/grand/grand_buffer.go delete mode 100644 vendor/github.com/gogf/gf/v2/util/gtag/gtag.go delete mode 100644 vendor/github.com/gogf/gf/v2/util/gtag/gtag_enums.go delete mode 100644 vendor/github.com/gogf/gf/v2/util/gtag/gtag_func.go delete mode 100644 vendor/github.com/gogf/gf/v2/util/gutil/gutil.go delete mode 100644 vendor/github.com/gogf/gf/v2/util/gutil/gutil_comparator.go delete mode 100644 vendor/github.com/gogf/gf/v2/util/gutil/gutil_copy.go delete mode 100644 vendor/github.com/gogf/gf/v2/util/gutil/gutil_default.go delete mode 100644 vendor/github.com/gogf/gf/v2/util/gutil/gutil_dump.go delete mode 100644 vendor/github.com/gogf/gf/v2/util/gutil/gutil_list.go delete mode 100644 vendor/github.com/gogf/gf/v2/util/gutil/gutil_map.go delete mode 100644 vendor/github.com/gogf/gf/v2/util/gutil/gutil_reflect.go delete mode 100644 vendor/github.com/gogf/gf/v2/util/gutil/gutil_slice.go delete mode 100644 vendor/github.com/gogf/gf/v2/util/gutil/gutil_struct.go delete mode 100644 vendor/github.com/lib/pq/.gitignore delete mode 100644 vendor/github.com/lib/pq/LICENSE.md delete mode 100644 vendor/github.com/lib/pq/README.md delete mode 100644 vendor/github.com/lib/pq/TESTS.md delete mode 100644 vendor/github.com/lib/pq/array.go delete mode 100644 vendor/github.com/lib/pq/buf.go delete mode 100644 vendor/github.com/lib/pq/conn.go delete mode 100644 vendor/github.com/lib/pq/conn_go115.go delete mode 100644 vendor/github.com/lib/pq/conn_go18.go delete mode 100644 vendor/github.com/lib/pq/connector.go delete mode 100644 vendor/github.com/lib/pq/copy.go delete mode 100644 vendor/github.com/lib/pq/doc.go delete mode 100644 vendor/github.com/lib/pq/encode.go delete mode 100644 vendor/github.com/lib/pq/error.go delete mode 100644 vendor/github.com/lib/pq/krb.go delete mode 100644 vendor/github.com/lib/pq/notice.go delete mode 100644 vendor/github.com/lib/pq/notify.go delete mode 100644 vendor/github.com/lib/pq/oid/doc.go delete mode 100644 vendor/github.com/lib/pq/oid/types.go delete mode 100644 vendor/github.com/lib/pq/rows.go delete mode 100644 vendor/github.com/lib/pq/scram/scram.go delete mode 100644 vendor/github.com/lib/pq/ssl.go delete mode 100644 vendor/github.com/lib/pq/ssl_permissions.go delete mode 100644 vendor/github.com/lib/pq/ssl_windows.go delete mode 100644 vendor/github.com/lib/pq/url.go delete mode 100644 vendor/github.com/lib/pq/user_other.go delete mode 100644 vendor/github.com/lib/pq/user_posix.go delete mode 100644 vendor/github.com/lib/pq/user_windows.go delete mode 100644 vendor/github.com/lib/pq/uuid.go delete mode 100644 vendor/github.com/mattn/go-colorable/LICENSE delete mode 100644 vendor/github.com/mattn/go-colorable/README.md delete mode 100644 vendor/github.com/mattn/go-colorable/colorable_appengine.go delete mode 100644 vendor/github.com/mattn/go-colorable/colorable_others.go delete mode 100644 vendor/github.com/mattn/go-colorable/colorable_windows.go delete mode 100644 vendor/github.com/mattn/go-colorable/noncolorable.go delete mode 100644 vendor/github.com/syndtr/goleveldb/LICENSE delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/batch.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/comparer.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/db.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/db_state.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/db_util.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/db_write.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/doc.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/errors.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/filter.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/filter/filter.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/key.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/options.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/session.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/session_record.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/session_util.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/storage.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_nacl.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/table.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/table/table.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/util.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/util/buffer.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/util/crc32.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/util/hash.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/util/range.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/util/util.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/version.go delete mode 100644 vendor/go.opentelemetry.io/otel/.codespellignore delete mode 100644 vendor/go.opentelemetry.io/otel/.codespellrc delete mode 100644 vendor/go.opentelemetry.io/otel/.gitattributes delete mode 100644 vendor/go.opentelemetry.io/otel/.gitignore delete mode 100644 vendor/go.opentelemetry.io/otel/.gitmodules delete mode 100644 vendor/go.opentelemetry.io/otel/.golangci.yml delete mode 100644 vendor/go.opentelemetry.io/otel/.lycheeignore delete mode 100644 vendor/go.opentelemetry.io/otel/.markdownlint.yaml delete mode 100644 vendor/go.opentelemetry.io/otel/CHANGELOG.md delete mode 100644 vendor/go.opentelemetry.io/otel/CODEOWNERS delete mode 100644 vendor/go.opentelemetry.io/otel/CONTRIBUTING.md delete mode 100644 vendor/go.opentelemetry.io/otel/LICENSE delete mode 100644 vendor/go.opentelemetry.io/otel/Makefile delete mode 100644 vendor/go.opentelemetry.io/otel/README.md delete mode 100644 vendor/go.opentelemetry.io/otel/RELEASING.md delete mode 100644 vendor/go.opentelemetry.io/otel/VERSIONING.md delete mode 100644 vendor/go.opentelemetry.io/otel/attribute/doc.go delete mode 100644 vendor/go.opentelemetry.io/otel/attribute/encoder.go delete mode 100644 vendor/go.opentelemetry.io/otel/attribute/iterator.go delete mode 100644 vendor/go.opentelemetry.io/otel/attribute/key.go delete mode 100644 vendor/go.opentelemetry.io/otel/attribute/kv.go delete mode 100644 vendor/go.opentelemetry.io/otel/attribute/set.go delete mode 100644 vendor/go.opentelemetry.io/otel/attribute/type_string.go delete mode 100644 vendor/go.opentelemetry.io/otel/attribute/value.go delete mode 100644 vendor/go.opentelemetry.io/otel/baggage/baggage.go delete mode 100644 vendor/go.opentelemetry.io/otel/baggage/context.go delete mode 100644 vendor/go.opentelemetry.io/otel/baggage/doc.go delete mode 100644 vendor/go.opentelemetry.io/otel/codes/codes.go delete mode 100644 vendor/go.opentelemetry.io/otel/codes/doc.go delete mode 100644 vendor/go.opentelemetry.io/otel/doc.go delete mode 100644 vendor/go.opentelemetry.io/otel/error_handler.go delete mode 100644 vendor/go.opentelemetry.io/otel/handler.go delete mode 100644 vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go delete mode 100644 vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go delete mode 100644 vendor/go.opentelemetry.io/otel/internal/baggage/context.go delete mode 100644 vendor/go.opentelemetry.io/otel/internal/global/handler.go delete mode 100644 vendor/go.opentelemetry.io/otel/internal/global/instruments.go delete mode 100644 vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go delete mode 100644 vendor/go.opentelemetry.io/otel/internal/global/meter.go delete mode 100644 vendor/go.opentelemetry.io/otel/internal/global/propagator.go delete mode 100644 vendor/go.opentelemetry.io/otel/internal/global/state.go delete mode 100644 vendor/go.opentelemetry.io/otel/internal/global/trace.go delete mode 100644 vendor/go.opentelemetry.io/otel/internal/rawhelpers.go delete mode 100644 vendor/go.opentelemetry.io/otel/internal_logging.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/LICENSE delete mode 100644 vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/asyncint64.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/config.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/doc.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/meter.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/syncfloat64.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/syncint64.go delete mode 100644 vendor/go.opentelemetry.io/otel/propagation.go delete mode 100644 vendor/go.opentelemetry.io/otel/propagation/baggage.go delete mode 100644 vendor/go.opentelemetry.io/otel/propagation/doc.go delete mode 100644 vendor/go.opentelemetry.io/otel/propagation/propagation.go delete mode 100644 vendor/go.opentelemetry.io/otel/propagation/trace_context.go delete mode 100644 vendor/go.opentelemetry.io/otel/requirements.txt delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/LICENSE delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/internal/internal.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/auto.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/config.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/container.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/doc.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/env.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/os.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/process.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/resource.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/doc.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/event.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/link.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/provider.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/span.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/version.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/version.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/internal/http.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.4.0/doc.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.4.0/exception.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.4.0/http.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.4.0/resource.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.4.0/schema.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.4.0/trace.go delete mode 100644 vendor/go.opentelemetry.io/otel/trace.go delete mode 100644 vendor/go.opentelemetry.io/otel/trace/LICENSE delete mode 100644 vendor/go.opentelemetry.io/otel/trace/config.go delete mode 100644 vendor/go.opentelemetry.io/otel/trace/context.go delete mode 100644 vendor/go.opentelemetry.io/otel/trace/doc.go delete mode 100644 vendor/go.opentelemetry.io/otel/trace/nonrecording.go delete mode 100644 vendor/go.opentelemetry.io/otel/trace/noop.go delete mode 100644 vendor/go.opentelemetry.io/otel/trace/trace.go delete mode 100644 vendor/go.opentelemetry.io/otel/trace/tracestate.go delete mode 100644 vendor/go.opentelemetry.io/otel/version.go delete mode 100644 vendor/go.opentelemetry.io/otel/versions.yaml delete mode 100644 vendor/golang.org/x/sys/windows/registry/key.go delete mode 100644 vendor/golang.org/x/sys/windows/registry/mksyscall.go delete mode 100644 vendor/golang.org/x/sys/windows/registry/syscall.go delete mode 100644 vendor/golang.org/x/sys/windows/registry/value.go delete mode 100644 vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go delete mode 100644 vendor/xorm.io/builder/.gitignore delete mode 100644 vendor/xorm.io/builder/LICENSE delete mode 100644 vendor/xorm.io/builder/README.md delete mode 100644 vendor/xorm.io/builder/as.go delete mode 100644 vendor/xorm.io/builder/builder.go delete mode 100644 vendor/xorm.io/builder/builder_delete.go delete mode 100644 vendor/xorm.io/builder/builder_insert.go delete mode 100644 vendor/xorm.io/builder/builder_join.go delete mode 100644 vendor/xorm.io/builder/builder_limit.go delete mode 100644 vendor/xorm.io/builder/builder_select.go delete mode 100644 vendor/xorm.io/builder/builder_set_operations.go delete mode 100644 vendor/xorm.io/builder/builder_update.go delete mode 100644 vendor/xorm.io/builder/cond.go delete mode 100644 vendor/xorm.io/builder/cond_and.go delete mode 100644 vendor/xorm.io/builder/cond_between.go delete mode 100644 vendor/xorm.io/builder/cond_compare.go delete mode 100644 vendor/xorm.io/builder/cond_eq.go delete mode 100644 vendor/xorm.io/builder/cond_exists.go delete mode 100644 vendor/xorm.io/builder/cond_if.go delete mode 100644 vendor/xorm.io/builder/cond_in.go delete mode 100644 vendor/xorm.io/builder/cond_like.go delete mode 100644 vendor/xorm.io/builder/cond_neq.go delete mode 100644 vendor/xorm.io/builder/cond_not.go delete mode 100644 vendor/xorm.io/builder/cond_not_exists.go delete mode 100644 vendor/xorm.io/builder/cond_notin.go delete mode 100644 vendor/xorm.io/builder/cond_null.go delete mode 100644 vendor/xorm.io/builder/cond_or.go delete mode 100644 vendor/xorm.io/builder/doc.go delete mode 100644 vendor/xorm.io/builder/error.go delete mode 100644 vendor/xorm.io/builder/expr.go delete mode 100644 vendor/xorm.io/builder/sql.go delete mode 100644 vendor/xorm.io/builder/writer.go delete mode 100644 vendor/xorm.io/xorm/.changelog.yml delete mode 100644 vendor/xorm.io/xorm/.drone.yml delete mode 100644 vendor/xorm.io/xorm/.gitignore delete mode 100644 vendor/xorm.io/xorm/.golangci.yml delete mode 100644 vendor/xorm.io/xorm/CHANGELOG.md delete mode 100644 vendor/xorm.io/xorm/CONTRIBUTING.md delete mode 100644 vendor/xorm.io/xorm/LICENSE delete mode 100644 vendor/xorm.io/xorm/Makefile delete mode 100644 vendor/xorm.io/xorm/README.md delete mode 100644 vendor/xorm.io/xorm/README_CN.md delete mode 100644 vendor/xorm.io/xorm/caches/cache.go delete mode 100644 vendor/xorm.io/xorm/caches/encode.go delete mode 100644 vendor/xorm.io/xorm/caches/leveldb.go delete mode 100644 vendor/xorm.io/xorm/caches/lru.go delete mode 100644 vendor/xorm.io/xorm/caches/manager.go delete mode 100644 vendor/xorm.io/xorm/caches/memory_store.go delete mode 100644 vendor/xorm.io/xorm/contexts/context_cache.go delete mode 100644 vendor/xorm.io/xorm/contexts/hook.go delete mode 100644 vendor/xorm.io/xorm/convert/bool.go delete mode 100644 vendor/xorm.io/xorm/convert/conversion.go delete mode 100644 vendor/xorm.io/xorm/convert/float.go delete mode 100644 vendor/xorm.io/xorm/convert/int.go delete mode 100644 vendor/xorm.io/xorm/convert/interface.go delete mode 100644 vendor/xorm.io/xorm/convert/scanner.go delete mode 100644 vendor/xorm.io/xorm/convert/string.go delete mode 100644 vendor/xorm.io/xorm/convert/time.go delete mode 100644 vendor/xorm.io/xorm/core/db.go delete mode 100644 vendor/xorm.io/xorm/core/error.go delete mode 100644 vendor/xorm.io/xorm/core/interface.go delete mode 100644 vendor/xorm.io/xorm/core/rows.go delete mode 100644 vendor/xorm.io/xorm/core/scan.go delete mode 100644 vendor/xorm.io/xorm/core/stmt.go delete mode 100644 vendor/xorm.io/xorm/core/tx.go delete mode 100644 vendor/xorm.io/xorm/dialects/dameng.go delete mode 100644 vendor/xorm.io/xorm/dialects/dialect.go delete mode 100644 vendor/xorm.io/xorm/dialects/driver.go delete mode 100644 vendor/xorm.io/xorm/dialects/filter.go delete mode 100644 vendor/xorm.io/xorm/dialects/mssql.go delete mode 100644 vendor/xorm.io/xorm/dialects/mysql.go delete mode 100644 vendor/xorm.io/xorm/dialects/oracle.go delete mode 100644 vendor/xorm.io/xorm/dialects/pg_reserved.txt delete mode 100644 vendor/xorm.io/xorm/dialects/postgres.go delete mode 100644 vendor/xorm.io/xorm/dialects/quote.go delete mode 100644 vendor/xorm.io/xorm/dialects/sqlite3.go delete mode 100644 vendor/xorm.io/xorm/dialects/table_name.go delete mode 100644 vendor/xorm.io/xorm/dialects/time.go delete mode 100644 vendor/xorm.io/xorm/doc.go delete mode 100644 vendor/xorm.io/xorm/engine.go delete mode 100644 vendor/xorm.io/xorm/engine_group.go delete mode 100644 vendor/xorm.io/xorm/engine_group_policy.go delete mode 100644 vendor/xorm.io/xorm/error.go delete mode 100644 vendor/xorm.io/xorm/interface.go delete mode 100644 vendor/xorm.io/xorm/internal/json/gojson.go delete mode 100644 vendor/xorm.io/xorm/internal/json/json.go delete mode 100644 vendor/xorm.io/xorm/internal/json/jsoniter.go delete mode 100644 vendor/xorm.io/xorm/internal/statements/cache.go delete mode 100644 vendor/xorm.io/xorm/internal/statements/column_map.go delete mode 100644 vendor/xorm.io/xorm/internal/statements/cond.go delete mode 100644 vendor/xorm.io/xorm/internal/statements/expr.go delete mode 100644 vendor/xorm.io/xorm/internal/statements/insert.go delete mode 100644 vendor/xorm.io/xorm/internal/statements/join.go delete mode 100644 vendor/xorm.io/xorm/internal/statements/order_by.go delete mode 100644 vendor/xorm.io/xorm/internal/statements/pk.go delete mode 100644 vendor/xorm.io/xorm/internal/statements/query.go delete mode 100644 vendor/xorm.io/xorm/internal/statements/select.go delete mode 100644 vendor/xorm.io/xorm/internal/statements/statement.go delete mode 100644 vendor/xorm.io/xorm/internal/statements/statement_args.go delete mode 100644 vendor/xorm.io/xorm/internal/statements/table_name.go delete mode 100644 vendor/xorm.io/xorm/internal/statements/update.go delete mode 100644 vendor/xorm.io/xorm/internal/statements/values.go delete mode 100644 vendor/xorm.io/xorm/internal/utils/builder.go delete mode 100644 vendor/xorm.io/xorm/internal/utils/name.go delete mode 100644 vendor/xorm.io/xorm/internal/utils/new.go delete mode 100644 vendor/xorm.io/xorm/internal/utils/reflect.go delete mode 100644 vendor/xorm.io/xorm/internal/utils/slice.go delete mode 100644 vendor/xorm.io/xorm/internal/utils/sql.go delete mode 100644 vendor/xorm.io/xorm/internal/utils/strings.go delete mode 100644 vendor/xorm.io/xorm/internal/utils/zero.go delete mode 100644 vendor/xorm.io/xorm/log/logger.go delete mode 100644 vendor/xorm.io/xorm/log/logger_context.go delete mode 100644 vendor/xorm.io/xorm/log/syslogger.go delete mode 100644 vendor/xorm.io/xorm/names/mapper.go delete mode 100644 vendor/xorm.io/xorm/names/table_name.go delete mode 100644 vendor/xorm.io/xorm/processors.go delete mode 100644 vendor/xorm.io/xorm/rows.go delete mode 100644 vendor/xorm.io/xorm/scan.go delete mode 100644 vendor/xorm.io/xorm/schemas/column.go delete mode 100644 vendor/xorm.io/xorm/schemas/index.go delete mode 100644 vendor/xorm.io/xorm/schemas/pk.go delete mode 100644 vendor/xorm.io/xorm/schemas/quote.go delete mode 100644 vendor/xorm.io/xorm/schemas/table.go delete mode 100644 vendor/xorm.io/xorm/schemas/type.go delete mode 100644 vendor/xorm.io/xorm/schemas/version.go delete mode 100644 vendor/xorm.io/xorm/session.go delete mode 100644 vendor/xorm.io/xorm/session_cols.go delete mode 100644 vendor/xorm.io/xorm/session_cond.go delete mode 100644 vendor/xorm.io/xorm/session_delete.go delete mode 100644 vendor/xorm.io/xorm/session_exist.go delete mode 100644 vendor/xorm.io/xorm/session_find.go delete mode 100644 vendor/xorm.io/xorm/session_get.go delete mode 100644 vendor/xorm.io/xorm/session_insert.go delete mode 100644 vendor/xorm.io/xorm/session_iterate.go delete mode 100644 vendor/xorm.io/xorm/session_raw.go delete mode 100644 vendor/xorm.io/xorm/session_schema.go delete mode 100644 vendor/xorm.io/xorm/session_stats.go delete mode 100644 vendor/xorm.io/xorm/session_tx.go delete mode 100644 vendor/xorm.io/xorm/session_update.go delete mode 100644 vendor/xorm.io/xorm/tags/parser.go delete mode 100644 vendor/xorm.io/xorm/tags/tag.go diff --git a/go.mod b/go.mod index 3472d4ca..9f532046 100644 --- a/go.mod +++ b/go.mod @@ -12,17 +12,14 @@ require ( github.com/gin-gonic/gin v1.9.1 github.com/go-playground/locales v0.14.1 github.com/go-playground/universal-translator v0.18.1 - github.com/go-playground/validator/v10 v10.15.1 - github.com/go-sql-driver/mysql v1.7.1 + github.com/go-playground/validator/v10 v10.15.3 github.com/goccy/go-json v0.10.2 - github.com/gogf/gf/v2 v2.5.2 github.com/json-iterator/go v1.1.12 - github.com/lib/pq v1.10.9 github.com/mitchellh/mapstructure v1.5.0 github.com/mvdan/xurls v1.1.0 github.com/natefinch/lumberjack v2.0.0+incompatible github.com/oschwald/geoip2-golang v1.9.0 - github.com/qiniu/go-sdk/v7 v7.17.0 + github.com/qiniu/go-sdk/v7 v7.17.1 github.com/redis/go-redis/v9 v9.1.0 github.com/robfig/cron/v3 v3.0.1 github.com/saracen/go7z v0.0.0-20191010121135-9c09b6bd7fda @@ -33,32 +30,29 @@ require ( go.uber.org/zap v1.25.0 golang.org/x/crypto v0.12.0 golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 - golang.org/x/text v0.12.0 + golang.org/x/text v0.13.0 gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df gorm.io/datatypes v1.2.0 gorm.io/driver/mysql v1.5.1 gorm.io/driver/postgres v1.5.2 gorm.io/gen v0.3.23 gorm.io/gorm v1.25.4 - xorm.io/builder v0.3.13 - xorm.io/xorm v1.3.2 ) require ( + github.com/BurntSushi/toml v1.2.0 // indirect github.com/bitly/go-simplejson v0.5.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect github.com/chenzhuoyu/iasm v0.9.0 // indirect github.com/clbanning/mxj v1.8.4 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect - github.com/fatih/color v1.15.0 // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/gabriel-vasile/mimetype v1.4.2 // indirect github.com/gin-contrib/sse v0.1.0 // indirect - github.com/go-logr/logr v1.2.4 // indirect - github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect + github.com/go-sql-driver/mysql v1.7.1 // indirect github.com/golang/snappy v0.0.4 // indirect + github.com/google/go-cmp v0.5.9 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect @@ -68,7 +62,6 @@ require ( github.com/klauspost/compress v1.16.7 // indirect github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/leodido/go-urn v1.2.4 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.19 // indirect github.com/mattn/go-sqlite3 v2.0.3+incompatible // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect @@ -76,11 +69,10 @@ require ( github.com/montanaflynn/stats v0.7.1 // indirect github.com/mozillazg/go-httpheader v0.4.0 // indirect github.com/oschwald/maxminddb-golang v1.12.0 // indirect - github.com/pelletier/go-toml/v2 v2.0.9 // indirect + github.com/pelletier/go-toml/v2 v2.1.0 // indirect github.com/rogpeppe/go-internal v1.8.1 // indirect github.com/saracen/go7z-fixtures v0.0.0-20190623165746-aa6b8fba1d2f // indirect github.com/saracen/solidblock v0.0.0-20190426153529-45df20abab6f // indirect - github.com/syndtr/goleveldb v1.0.0 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect @@ -91,16 +83,12 @@ require ( github.com/xdg-go/stringprep v1.0.4 // indirect github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect - go.opentelemetry.io/otel v1.16.0 // indirect - go.opentelemetry.io/otel/metric v1.16.0 // indirect - go.opentelemetry.io/otel/sdk v1.16.0 // indirect - go.opentelemetry.io/otel/trace v1.16.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/arch v0.4.0 // indirect + golang.org/x/arch v0.5.0 // indirect golang.org/x/mod v0.12.0 // indirect golang.org/x/net v0.14.0 // indirect golang.org/x/sync v0.3.0 // indirect - golang.org/x/sys v0.11.0 // indirect + golang.org/x/sys v0.12.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 // indirect google.golang.org/protobuf v1.31.0 // indirect diff --git a/go.sum b/go.sum index 9f470fcd..a10ab57e 100644 --- a/go.sum +++ b/go.sum @@ -1,45 +1,17 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a h1:lSA0F4e9A2NcQSqGqTOXqu2aRi/XEQxDCBwM8yJtE6s= -gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a/go.mod h1:EXuID2Zs0pAQhH8yz+DNjUbjppKQzKFAn28TMYPB6IU= -gitee.com/travelliu/dm v1.8.11192/go.mod h1:DHTzyhCrM843x9VdKVbZ+GKXGRbKM2sJ4LxihRxShkE= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0= -github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/MercuryEngineering/CookieMonster v0.0.0-20180304172713-1584578b3403 h1:EtZwYyLbkEcIt+B//6sujwRCnHuTEK3qiSypAX5aJeM= github.com/MercuryEngineering/CookieMonster v0.0.0-20180304172713-1584578b3403/go.mod h1:mM6WvakkX2m+NgMiPCfFFjwfH4KzENC07zeGEqq9U7s= github.com/QcloudApi/qcloud_sign_golang v0.0.0-20141224014652-e4130a326409/go.mod h1:1pk82RBxDY/JZnPQrtqHlUFfCctgdorsd9M06fMynOM= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/aliyun/aliyun-oss-go-sdk v2.2.9+incompatible h1:Sg/2xHwDrioHpxTN6WMiwbXTpUEinBpHsN7mG21Rc2k= github.com/aliyun/aliyun-oss-go-sdk v2.2.9+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= github.com/allegro/bigcache/v3 v3.1.0 h1:H2Vp8VOvxcrB91o86fUSVJFqeuz8kpyyB02eH3bSzwk= github.com/allegro/bigcache/v3 v3.1.0/go.mod h1:aPyh7jEvrog9zAwx5N7+JUQX5dZTSGpxF1LAR4dr35I= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= -github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/baidubce/bce-sdk-go v0.9.156 h1:f++WfptxGmSp5acsjl4kUxHpWDDccoFqkIrQKxvp/Sw= github.com/baidubce/bce-sdk-go v0.9.156/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg= github.com/basgys/goxml2json v1.1.0 h1:4ln5i4rseYfXNd86lGEB+Vi652IsIXIvggKM/BhUKVw= github.com/basgys/goxml2json v1.1.0/go.mod h1:wH7a5Np/Q4QoECFIU8zTQlZwZkrilY0itPfecMw41Dw= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-simplejson v0.5.0 h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/bsm/ginkgo/v2 v2.9.5 h1:rtVBYPs3+TC5iLUVOis1B9tjLTup7Cj5IfzosKtvTJ0= @@ -48,10 +20,6 @@ github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1 github.com/bytedance/sonic v1.10.0-rc/go.mod h1:ElCzW+ufi8qKqNW0FY314xriJhyJhuoJ3gFZdAHF7NM= github.com/bytedance/sonic v1.10.0 h1:qtNZduETEIWJVIyDl01BeNxur2rW9OwTQ/yBqFRkKEk= github.com/bytedance/sonic v1.10.0/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4= -github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= @@ -62,62 +30,18 @@ github.com/chenzhuoyu/iasm v0.9.0 h1:9fhXjVzq5hUy2gkhhgHl95zG2cEAhw9OSGs8toWWAwo github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog= github.com/clbanning/mxj v1.8.4 h1:HuhwZtbyvyOw+3Z1AowPkU87JkJUSv751ELWaiTpj8I= github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng= -github.com/clbanning/mxj/v2 v2.7.0 h1:WA/La7UGCanFe5NpHF0Q3DNtnCsVoxbPKuyBNHWRyME= -github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= -github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= -github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= -github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= @@ -132,163 +56,43 @@ github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/validator/v10 v10.8.0/go.mod h1:9JhgTzTaE31GZDpH/HSvHiRJrJ3iKAgqqH0Bl/Ocjdk= -github.com/go-playground/validator/v10 v10.15.1 h1:BSe8uhN+xQ4r5guV/ywQI4gO59C2raYcGffYWZEjZzM= -github.com/go-playground/validator/v10 v10.15.1/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-playground/validator/v10 v10.15.3 h1:S+sSpunYjNPDuXkWbK+x+bA7iXiW296KG4dL3X7xUZo= +github.com/go-playground/validator/v10 v10.15.3/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/goccy/go-json v0.8.1/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= -github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gogf/gf/v2 v2.5.2 h1:fACJE7DJH6iTGHGhgiNY1uuZIZtr2IqQkJ52E+wBnt8= -github.com/gogf/gf/v2 v2.5.2/go.mod h1:7yf5qp0BznfsYx7Sw49m3mQvBsHpwAjJk3Q9ZnKoUEc= -github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA= github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= -github.com/grokify/html-strip-tags-go v0.0.1 h1:0fThFwLbW7P/kOiTBs03FsJSV9RM2M/Q/MOnCQxKMo0= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= -github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= -github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= -github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= -github.com/jackc/pgconn v1.4.0/go.mod h1:Y2O3ZDF0q4mMacyWV3AstPJpeHXWGEetiFttmq5lahk= -github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= -github.com/jackc/pgconn v1.5.1-0.20200601181101-fa742c524853/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= -github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= -github.com/jackc/pgconn v1.8.1/go.mod h1:JV6m6b6jhjdmzchES0drzCcYcAHS1OPD5xu3OZ/lE2g= -github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= -github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= -github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= -github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= -github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= -github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= -github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= -github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= -github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= -github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= -github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= -github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= -github.com/jackc/pgtype v1.2.0/go.mod h1:5m2OfMh1wTK7x+Fk952IDmI4nw3nPrvtQdM0ZT4WpC0= -github.com/jackc/pgtype v1.3.1-0.20200510190516-8cd94a14c75a/go.mod h1:vaogEUkALtxZMCH411K+tKzNpwzCKU+AnPzBKZ+I+Po= -github.com/jackc/pgtype v1.3.1-0.20200606141011-f6355165a91c/go.mod h1:cvk9Bgu/VzJ9/lxTO5R5sf80p0DiucVtN7ZxvaC4GmQ= -github.com/jackc/pgtype v1.7.0/go.mod h1:ZnHF+rMePVqDKaOfJVI4Q8IVvAQMryDlDkZnKOI75BE= -github.com/jackc/pgtype v1.8.0/go.mod h1:PqDKcEBtllAtk/2p6z6SHdXW5UB+MhE75tUol2OKexE= -github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= -github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= -github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= -github.com/jackc/pgx/v4 v4.5.0/go.mod h1:EpAKPLdnTorwmPUUsqrPxy5fphV18j9q3wrfRXgo+kA= -github.com/jackc/pgx/v4 v4.6.1-0.20200510190926-94ba730bb1e9/go.mod h1:t3/cdRQl6fOLDxqtlyhe9UWgfIi9R8+8v8GKV5TRA/o= -github.com/jackc/pgx/v4 v4.6.1-0.20200606145419-4e5062306904/go.mod h1:ZDaNWkt9sW1JMiNn0kdYBaLelIhw7Pg4qd+Vk6tw7Hg= -github.com/jackc/pgx/v4 v4.11.0/go.mod h1:i62xJgdrtVDsnL3U8ekyrQXEwGNTRoG7/8r+CIdYfcc= -github.com/jackc/pgx/v4 v4.12.0/go.mod h1:fE547h6VulLPA3kySjfnSG/e2D861g/50JlVUa/ub60= github.com/jackc/pgx/v5 v5.4.3 h1:cxFyXhxlvAifxnkKKdlxv8XqUf59tDlYjnV5YYfsJJY= github.com/jackc/pgx/v5 v5.4.3/go.mod h1:Ig06C2Vu0t5qXC60W8sqIthScaEnFvojjj9dSljmHRA= -github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.1.4/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= @@ -296,73 +100,30 @@ github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa02 github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= -github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= -github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= -github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= -github.com/mattn/go-sqlite3 v1.14.9/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U= github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/microsoft/go-mssqldb v0.17.0 h1:Fto83dMZPnYv1Zwx5vHHxpNraeEaUlQ/hhHLgZiaenE= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= @@ -373,130 +134,43 @@ github.com/mozillazg/go-httpheader v0.4.0 h1:aBn6aRXtFzyDLZ4VIRLsZbbJloagQfMnCiY github.com/mozillazg/go-httpheader v0.4.0/go.mod h1:PuT8h0pw6efvp8ZeUec1Rs7dwjK08bt6gKSReGMqtdA= github.com/mvdan/xurls v1.1.0 h1:OpuDelGQ1R1ueQ6sSryzi6P+1RtBpfQHM8fJwlE45ww= github.com/mvdan/xurls v1.1.0/go.mod h1:tQlNn3BED8bE/15hnSL2HLkDeLWpNPAwtw7wkEq44oU= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/natefinch/lumberjack v2.0.0+incompatible h1:4QJd3OLAMgj7ph+yZTuX13Ld4UpgHp07nNdFX7mqFfM= github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk= -github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= -github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= -github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/oschwald/geoip2-golang v1.9.0 h1:uvD3O6fXAXs+usU+UGExshpdP13GAqp4GBrzN7IgKZc= github.com/oschwald/geoip2-golang v1.9.0/go.mod h1:BHK6TvDyATVQhKNbQBdrj9eAvuwOMi2zSFXizL3K81Y= github.com/oschwald/maxminddb-golang v1.12.0 h1:9FnTOD0YOhP7DGxGsq4glzpGy5+w7pq50AS6wALUMYs= github.com/oschwald/maxminddb-golang v1.12.0/go.mod h1:q0Nob5lTCqyQ8WT6FYgS1L7PXKVVbgiymefNwIjPzgY= -github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml/v2 v2.0.9 h1:uH2qQXheeefCCkuBBSLi7jCiSmj3VRh2+Goq2N7Xxu0= -github.com/pelletier/go-toml/v2 v2.0.9/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= -github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= -github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= +github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/qiniu/dyn v1.3.0/go.mod h1:E8oERcm8TtwJiZvkQPbcAh0RL8jO1G0VXJMW3FAWdkk= -github.com/qiniu/go-sdk/v7 v7.17.0 h1:sF05b0NFdlUEz1SnJStrOn+PVUPu76lYCoHZCZyNYgs= -github.com/qiniu/go-sdk/v7 v7.17.0/go.mod h1:nqoYCNo53ZlGA521RvRethvxUDvXKt4gtYXOwye868w= +github.com/qiniu/go-sdk/v7 v7.17.1 h1:UoQv7fBKtzAiD1qZPIvTy62Se48YLKxcCYP9nAwWMa0= +github.com/qiniu/go-sdk/v7 v7.17.1/go.mod h1:nqoYCNo53ZlGA521RvRethvxUDvXKt4gtYXOwye868w= github.com/qiniu/x v1.10.5/go.mod h1:03Ni9tj+N2h2aKnAz+6N0Xfl8FwMEDRC2PAlxekASDs= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/redis/go-redis/v9 v9.1.0 h1:137FnGdk+EQdCbye1FW+qOEcY5S+SpY9T0NiuqvtfMY= github.com/redis/go-redis/v9 v9.1.0/go.mod h1:urWj3He21Dj5k4TK1y59xH8Uj6ATueP8AH1cY3lZl4c= -github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= -github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= -github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= -github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= -github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/saracen/go7z v0.0.0-20191010121135-9c09b6bd7fda h1:h+YpzUB/bGVJcLqW+d5GghcCmE/A25KbzjXvWJQi/+o= github.com/saracen/go7z v0.0.0-20191010121135-9c09b6bd7fda/go.mod h1:MSotTrCv1PwoR8QgU1JurEx+lNNbtr25I+m0zbLyAGw= github.com/saracen/go7z-fixtures v0.0.0-20190623165746-aa6b8fba1d2f h1:PF9WV5j/x6MT+x/sauUHd4objCvJbZb0wdxZkHSdd5A= github.com/saracen/go7z-fixtures v0.0.0-20190623165746-aa6b8fba1d2f/go.mod h1:6Ff0ADODZ6S3gYepgZ2w7OqFrTqtFcfwDUhmm8jsUhs= github.com/saracen/solidblock v0.0.0-20190426153529-45df20abab6f h1:1cJITU3JUI8qNS5T0BlXwANsVdyoJQHQ4hvOxbunPCw= github.com/saracen/solidblock v0.0.0-20190426153529-45df20abab6f/go.mod h1:LyBTue+RWeyIfN3ZJ4wVxvDuvlGJtDgCLgCb6HCPgps= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -505,8 +179,6 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= -github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.563/go.mod h1:7sCQWVkxcsR38nffDW057DRGk8mUjK1Ing/EFOK8s8Y= github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/kms v1.0.563/go.mod h1:uom4Nvi9W+Qkom0exYiJ9VWJjXwyxtPYTkKkaLMlfE0= github.com/tencentyun/cos-go-sdk-v5 v0.7.42 h1:Up1704BJjI5orycXKjpVpvuOInt9GC5pqY4knyE9Uds= @@ -515,178 +187,76 @@ github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFA github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a h1:fZHgsYlfvtyqToslyjUt3VOPF4J7aK/3MPcK7xp3PDk= github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a/go.mod h1:ul22v+Nro/R083muKhosV54bj5niojjWZvU8xrevuH4= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= -github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.mongodb.org/mongo-driver v1.12.1 h1:nLkghSU8fQNaK7oUmDhQFsnrtcoNy7Z6LVFKsEecqgE= go.mongodb.org/mongo-driver v1.12.1/go.mod h1:/rGBTebI3XYboVmgz+Wv3Bcbl3aD0QF9zl6kDDw18rQ= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= -go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= -go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= -go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= -go.opentelemetry.io/otel/sdk v1.16.0 h1:Z1Ok1YsijYL0CSJpHt4cS3wDDh7p572grzNrBMiMWgE= -go.opentelemetry.io/otel/sdk v1.16.0/go.mod h1:tMsIuKXuuIWPBAOrH+eHtvhTL+SntFtXF9QD68aP6p4= -go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= -go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c= go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk= golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= -golang.org/x/arch v0.4.0 h1:A8WCeEWhLwPBKNbFi5Wv5UTCBx5zzubnXDlMOFAzFMc= -golang.org/x/arch v0.4.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/arch v0.5.0 h1:jpGode6huXQxcskEIpOCvrU+tzo81b6+oFLUYXWtH/Y= +golang.org/x/arch v0.5.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ= golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210902050250-f475640dd07b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -694,89 +264,36 @@ golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= -golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 h1:Vve/L0v7CXXuxUmaMGIEK/dEeq7uiqb5qBgQrZzIE7E= golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= -golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk= gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df h1:n7WqCuqOuCbNr617RXOY0AWRXxgwEyPp2z+p0+hgMuE= gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df/go.mod h1:LRQQ+SO6ZHR7tOkpBDuZnXENFzX8qRjMDMyPD6BRkCw= -gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -806,125 +323,5 @@ gorm.io/hints v1.1.2 h1:b5j0kwk5p4+3BtDtYqqfY+ATSxjj+6ptPgVveuynn9o= gorm.io/hints v1.1.2/go.mod h1:/ARdpUHAtyEMCh5NNi3tI7FsGh+Cj/MIUlvNxCNCFWg= gorm.io/plugin/dbresolver v1.4.7 h1:ZwtwmJQxTx9us7o6zEHFvH1q4OeEo1pooU7efmnunJA= gorm.io/plugin/dbresolver v1.4.7/go.mod h1:l4Cn87EHLEYuqUncpEeTC2tTJQkjngPSD+lo8hIvcT0= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -lukechampine.com/uint128 v1.1.1 h1:pnxCASz787iMf+02ssImqk6OLt+Z5QHMoZyUXR4z6JU= -lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= -modernc.org/cc/v3 v3.33.6/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= -modernc.org/cc/v3 v3.33.9/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= -modernc.org/cc/v3 v3.33.11/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= -modernc.org/cc/v3 v3.34.0/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= -modernc.org/cc/v3 v3.35.0/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= -modernc.org/cc/v3 v3.35.4/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= -modernc.org/cc/v3 v3.35.5/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= -modernc.org/cc/v3 v3.35.7/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= -modernc.org/cc/v3 v3.35.8/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= -modernc.org/cc/v3 v3.35.10/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= -modernc.org/cc/v3 v3.35.15/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= -modernc.org/cc/v3 v3.35.16/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= -modernc.org/cc/v3 v3.35.17/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= -modernc.org/cc/v3 v3.35.18 h1:rMZhRcWrba0y3nVmdiQ7kxAgOOSq2m2f2VzjHLgEs6U= -modernc.org/cc/v3 v3.35.18/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= -modernc.org/ccgo/v3 v3.9.5/go.mod h1:umuo2EP2oDSBnD3ckjaVUXMrmeAw8C8OSICVa0iFf60= -modernc.org/ccgo/v3 v3.10.0/go.mod h1:c0yBmkRFi7uW4J7fwx/JiijwOjeAeR2NoSaRVFPmjMw= -modernc.org/ccgo/v3 v3.11.0/go.mod h1:dGNposbDp9TOZ/1KBxghxtUp/bzErD0/0QW4hhSaBMI= -modernc.org/ccgo/v3 v3.11.1/go.mod h1:lWHxfsn13L3f7hgGsGlU28D9eUOf6y3ZYHKoPaKU0ag= -modernc.org/ccgo/v3 v3.11.3/go.mod h1:0oHunRBMBiXOKdaglfMlRPBALQqsfrCKXgw9okQ3GEw= -modernc.org/ccgo/v3 v3.12.4/go.mod h1:Bk+m6m2tsooJchP/Yk5ji56cClmN6R1cqc9o/YtbgBQ= -modernc.org/ccgo/v3 v3.12.6/go.mod h1:0Ji3ruvpFPpz+yu+1m0wk68pdr/LENABhTrDkMDWH6c= -modernc.org/ccgo/v3 v3.12.8/go.mod h1:Hq9keM4ZfjCDuDXxaHptpv9N24JhgBZmUG5q60iLgUo= -modernc.org/ccgo/v3 v3.12.11/go.mod h1:0jVcmyDwDKDGWbcrzQ+xwJjbhZruHtouiBEvDfoIsdg= -modernc.org/ccgo/v3 v3.12.14/go.mod h1:GhTu1k0YCpJSuWwtRAEHAol5W7g1/RRfS4/9hc9vF5I= -modernc.org/ccgo/v3 v3.12.18/go.mod h1:jvg/xVdWWmZACSgOiAhpWpwHWylbJaSzayCqNOJKIhs= -modernc.org/ccgo/v3 v3.12.20/go.mod h1:aKEdssiu7gVgSy/jjMastnv/q6wWGRbszbheXgWRHc8= -modernc.org/ccgo/v3 v3.12.21/go.mod h1:ydgg2tEprnyMn159ZO/N4pLBqpL7NOkJ88GT5zNU2dE= -modernc.org/ccgo/v3 v3.12.22/go.mod h1:nyDVFMmMWhMsgQw+5JH6B6o4MnZ+UQNw1pp52XYFPRk= -modernc.org/ccgo/v3 v3.12.25/go.mod h1:UaLyWI26TwyIT4+ZFNjkyTbsPsY3plAEB6E7L/vZV3w= -modernc.org/ccgo/v3 v3.12.29/go.mod h1:FXVjG7YLf9FetsS2OOYcwNhcdOLGt8S9bQ48+OP75cE= -modernc.org/ccgo/v3 v3.12.36/go.mod h1:uP3/Fiezp/Ga8onfvMLpREq+KUjUmYMxXPO8tETHtA8= -modernc.org/ccgo/v3 v3.12.38/go.mod h1:93O0G7baRST1vNj4wnZ49b1kLxt0xCW5Hsa2qRaZPqc= -modernc.org/ccgo/v3 v3.12.43/go.mod h1:k+DqGXd3o7W+inNujK15S5ZYuPoWYLpF5PYougCmthU= -modernc.org/ccgo/v3 v3.12.46/go.mod h1:UZe6EvMSqOxaJ4sznY7b23/k13R8XNlyWsO5bAmSgOE= -modernc.org/ccgo/v3 v3.12.47/go.mod h1:m8d6p0zNps187fhBwzY/ii6gxfjob1VxWb919Nk1HUk= -modernc.org/ccgo/v3 v3.12.50/go.mod h1:bu9YIwtg+HXQxBhsRDE+cJjQRuINuT9PUK4orOco/JI= -modernc.org/ccgo/v3 v3.12.51/go.mod h1:gaIIlx4YpmGO2bLye04/yeblmvWEmE4BBBls4aJXFiE= -modernc.org/ccgo/v3 v3.12.53/go.mod h1:8xWGGTFkdFEWBEsUmi+DBjwu/WLy3SSOrqEmKUjMeEg= -modernc.org/ccgo/v3 v3.12.54/go.mod h1:yANKFTm9llTFVX1FqNKHE0aMcQb1fuPJx6p8AcUx+74= -modernc.org/ccgo/v3 v3.12.55/go.mod h1:rsXiIyJi9psOwiBkplOaHye5L4MOOaCjHg1Fxkj7IeU= -modernc.org/ccgo/v3 v3.12.56/go.mod h1:ljeFks3faDseCkr60JMpeDb2GSO3TKAmrzm7q9YOcMU= -modernc.org/ccgo/v3 v3.12.57/go.mod h1:hNSF4DNVgBl8wYHpMvPqQWDQx8luqxDnNGCMM4NFNMc= -modernc.org/ccgo/v3 v3.12.60/go.mod h1:k/Nn0zdO1xHVWjPYVshDeWKqbRWIfif5dtsIOCUVMqM= -modernc.org/ccgo/v3 v3.12.65/go.mod h1:D6hQtKxPNZiY6wDBtehSGKFKmyXn53F8nGTpH+POmS4= -modernc.org/ccgo/v3 v3.12.66/go.mod h1:jUuxlCFZTUZLMV08s7B1ekHX5+LIAurKTTaugUr/EhQ= -modernc.org/ccgo/v3 v3.12.67/go.mod h1:Bll3KwKvGROizP2Xj17GEGOTrlvB1XcVaBrC90ORO84= -modernc.org/ccgo/v3 v3.12.73/go.mod h1:hngkB+nUUqzOf3iqsM48Gf1FZhY599qzVg1iX+BT3cQ= -modernc.org/ccgo/v3 v3.12.81/go.mod h1:p2A1duHoBBg1mFtYvnhAnQyI6vL0uw5PGYLSIgF6rYY= -modernc.org/ccgo/v3 v3.12.82 h1:wudcnJyjLj1aQQCXF3IM9Gz2X6UNjw+afIghzdtn0v8= -modernc.org/ccgo/v3 v3.12.82/go.mod h1:ApbflUfa5BKadjHynCficldU1ghjen84tuM5jRynB7w= -modernc.org/ccorpus v1.11.1/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= -modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= -modernc.org/libc v1.9.8/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w= -modernc.org/libc v1.9.11/go.mod h1:NyF3tsA5ArIjJ83XB0JlqhjTabTCHm9aX4XMPHyQn0Q= -modernc.org/libc v1.11.0/go.mod h1:2lOfPmj7cz+g1MrPNmX65QCzVxgNq2C5o0jdLY2gAYg= -modernc.org/libc v1.11.2/go.mod h1:ioIyrl3ETkugDO3SGZ+6EOKvlP3zSOycUETe4XM4n8M= -modernc.org/libc v1.11.5/go.mod h1:k3HDCP95A6U111Q5TmG3nAyUcp3kR5YFZTeDS9v8vSU= -modernc.org/libc v1.11.6/go.mod h1:ddqmzR6p5i4jIGK1d/EiSw97LBcE3dK24QEwCFvgNgE= -modernc.org/libc v1.11.11/go.mod h1:lXEp9QOOk4qAYOtL3BmMve99S5Owz7Qyowzvg6LiZso= -modernc.org/libc v1.11.13/go.mod h1:ZYawJWlXIzXy2Pzghaf7YfM8OKacP3eZQI81PDLFdY8= -modernc.org/libc v1.11.16/go.mod h1:+DJquzYi+DMRUtWI1YNxrlQO6TcA5+dRRiq8HWBWRC8= -modernc.org/libc v1.11.19/go.mod h1:e0dgEame6mkydy19KKaVPBeEnyJB4LGNb0bBH1EtQ3I= -modernc.org/libc v1.11.24/go.mod h1:FOSzE0UwookyT1TtCJrRkvsOrX2k38HoInhw+cSCUGk= -modernc.org/libc v1.11.26/go.mod h1:SFjnYi9OSd2W7f4ct622o/PAYqk7KHv6GS8NZULIjKY= -modernc.org/libc v1.11.27/go.mod h1:zmWm6kcFXt/jpzeCgfvUNswM0qke8qVwxqZrnddlDiE= -modernc.org/libc v1.11.28/go.mod h1:Ii4V0fTFcbq3qrv3CNn+OGHAvzqMBvC7dBNyC4vHZlg= -modernc.org/libc v1.11.31/go.mod h1:FpBncUkEAtopRNJj8aRo29qUiyx5AvAlAxzlx9GNaVM= -modernc.org/libc v1.11.34/go.mod h1:+Tzc4hnb1iaX/SKAutJmfzES6awxfU1BPvrrJO0pYLg= -modernc.org/libc v1.11.37/go.mod h1:dCQebOwoO1046yTrfUE5nX1f3YpGZQKNcITUYWlrAWo= -modernc.org/libc v1.11.39/go.mod h1:mV8lJMo2S5A31uD0k1cMu7vrJbSA3J3waQJxpV4iqx8= -modernc.org/libc v1.11.42/go.mod h1:yzrLDU+sSjLE+D4bIhS7q1L5UwXDOw99PLSX0BlZvSQ= -modernc.org/libc v1.11.44/go.mod h1:KFq33jsma7F5WXiYelU8quMJasCCTnHK0mkri4yPHgA= -modernc.org/libc v1.11.45/go.mod h1:Y192orvfVQQYFzCNsn+Xt0Hxt4DiO4USpLNXBlXg/tM= -modernc.org/libc v1.11.47/go.mod h1:tPkE4PzCTW27E6AIKIR5IwHAQKCAtudEIeAV1/SiyBg= -modernc.org/libc v1.11.49/go.mod h1:9JrJuK5WTtoTWIFQ7QjX2Mb/bagYdZdscI3xrvHbXjE= -modernc.org/libc v1.11.51/go.mod h1:R9I8u9TS+meaWLdbfQhq2kFknTW0O3aw3kEMqDDxMaM= -modernc.org/libc v1.11.53/go.mod h1:5ip5vWYPAoMulkQ5XlSJTy12Sz5U6blOQiYasilVPsU= -modernc.org/libc v1.11.54/go.mod h1:S/FVnskbzVUrjfBqlGFIPA5m7UwB3n9fojHhCNfSsnw= -modernc.org/libc v1.11.55/go.mod h1:j2A5YBRm6HjNkoSs/fzZrSxCuwWqcMYTDPLNx0URn3M= -modernc.org/libc v1.11.56/go.mod h1:pakHkg5JdMLt2OgRadpPOTnyRXm/uzu+Yyg/LSLdi18= -modernc.org/libc v1.11.58/go.mod h1:ns94Rxv0OWyoQrDqMFfWwka2BcaF6/61CqJRK9LP7S8= -modernc.org/libc v1.11.70/go.mod h1:DUOmMYe+IvKi9n6Mycyx3DbjfzSKrdr/0Vgt3j7P5gw= -modernc.org/libc v1.11.71/go.mod h1:DUOmMYe+IvKi9n6Mycyx3DbjfzSKrdr/0Vgt3j7P5gw= -modernc.org/libc v1.11.75/go.mod h1:dGRVugT6edz361wmD9gk6ax1AbDSe0x5vji0dGJiPT0= -modernc.org/libc v1.11.82/go.mod h1:NF+Ek1BOl2jeC7lw3a7Jj5PWyHPwWD4aq3wVKxqV1fI= -modernc.org/libc v1.11.86/go.mod h1:ePuYgoQLmvxdNT06RpGnaDKJmDNEkV7ZPKI2jnsvZoE= -modernc.org/libc v1.11.87 h1:PzIzOqtlzMDDcCzJ5cUP6h/Ku6Fa9iyflP2ccTY64aE= -modernc.org/libc v1.11.87/go.mod h1:Qvd5iXTeLhI5PS0XSyqMY99282y+3euapQFxM7jYnpY= -modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/mathutil v1.4.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/mathutil v1.4.1 h1:ij3fYGe8zBF4Vu+g0oT7mB06r8sqGWKuJu1yXeR4by8= -modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/memory v1.0.4/go.mod h1:nV2OApxradM3/OVbs2/0OsP6nPfakXpi50C7dcoHXlc= -modernc.org/memory v1.0.5 h1:XRch8trV7GgvTec2i7jc33YlUI0RKVDBvZ5eZ5m8y14= -modernc.org/memory v1.0.5/go.mod h1:B7OYswTRnfGg+4tDH1t1OeUNnsy2viGTdME4tzd+IjM= -modernc.org/opt v0.1.1 h1:/0RX92k9vwVeDXj+Xn23DKp2VJubL7k8qNffND6qn3A= -modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= -modernc.org/sqlite v1.14.2 h1:ohsW2+e+Qe2To1W6GNezzKGwjXwSax6R+CrhRxVaFbE= -modernc.org/sqlite v1.14.2/go.mod h1:yqfn85u8wVOE6ub5UT8VI9JjhrwBUUCNyTACN0h6Sx8= -modernc.org/strutil v1.1.1 h1:xv+J1BXY3Opl2ALrBwyfEikFAj8pmqcpnfmuwUwcozs= -modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= -modernc.org/tcl v1.8.13/go.mod h1:V+q/Ef0IJaNUSECieLU4o+8IScapxnMyFV6i/7uQlAY= -modernc.org/token v1.0.0 h1:a0jaWiNMDhDUtqOj09wvjWWAqd3q7WpBulmL9H2egsk= -modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= -modernc.org/z v1.2.19/go.mod h1:+ZpP0pc4zz97eukOzW3xagV/lS82IpPN9NGG5pNF9vY= nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= -xorm.io/builder v0.3.11-0.20220531020008-1bd24a7dc978/go.mod h1:aUW0S9eb9VCaPohFCH3j7czOx1PMW3i1HrSzbLYGBSE= -xorm.io/builder v0.3.13 h1:a3jmiVVL19psGeXx8GIurTp7p0IIgqeDmwhcR6BAOAo= -xorm.io/builder v0.3.13/go.mod h1:aUW0S9eb9VCaPohFCH3j7czOx1PMW3i1HrSzbLYGBSE= -xorm.io/xorm v1.3.2 h1:uTRRKF2jYzbZ5nsofXVUx6ncMaek+SHjWYtCXyZo1oM= -xorm.io/xorm v1.3.2/go.mod h1:9NbjqdnjX6eyjRRhh01GHm64r6N9shTb/8Ak3YRt8Nw= diff --git a/vendor/github.com/fatih/color/LICENSE.md b/vendor/github.com/fatih/color/LICENSE.md deleted file mode 100644 index 25fdaf63..00000000 --- a/vendor/github.com/fatih/color/LICENSE.md +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Fatih Arslan - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/fatih/color/README.md b/vendor/github.com/fatih/color/README.md deleted file mode 100644 index be82827c..00000000 --- a/vendor/github.com/fatih/color/README.md +++ /dev/null @@ -1,176 +0,0 @@ -# color [![](https://github.com/fatih/color/workflows/build/badge.svg)](https://github.com/fatih/color/actions) [![PkgGoDev](https://pkg.go.dev/badge/github.com/fatih/color)](https://pkg.go.dev/github.com/fatih/color) - -Color lets you use colorized outputs in terms of [ANSI Escape -Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It -has support for Windows too! The API can be used in several ways, pick one that -suits you. - -![Color](https://user-images.githubusercontent.com/438920/96832689-03b3e000-13f4-11eb-9803-46f4c4de3406.jpg) - -## Install - -```bash -go get github.com/fatih/color -``` - -## Examples - -### Standard colors - -```go -// Print with default helper functions -color.Cyan("Prints text in cyan.") - -// A newline will be appended automatically -color.Blue("Prints %s in blue.", "text") - -// These are using the default foreground colors -color.Red("We have red") -color.Magenta("And many others ..") - -``` - -### Mix and reuse colors - -```go -// Create a new color object -c := color.New(color.FgCyan).Add(color.Underline) -c.Println("Prints cyan text with an underline.") - -// Or just add them to New() -d := color.New(color.FgCyan, color.Bold) -d.Printf("This prints bold cyan %s\n", "too!.") - -// Mix up foreground and background colors, create new mixes! -red := color.New(color.FgRed) - -boldRed := red.Add(color.Bold) -boldRed.Println("This will print text in bold red.") - -whiteBackground := red.Add(color.BgWhite) -whiteBackground.Println("Red text with white background.") -``` - -### Use your own output (io.Writer) - -```go -// Use your own io.Writer output -color.New(color.FgBlue).Fprintln(myWriter, "blue color!") - -blue := color.New(color.FgBlue) -blue.Fprint(writer, "This will print text in blue.") -``` - -### Custom print functions (PrintFunc) - -```go -// Create a custom print function for convenience -red := color.New(color.FgRed).PrintfFunc() -red("Warning") -red("Error: %s", err) - -// Mix up multiple attributes -notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() -notice("Don't forget this...") -``` - -### Custom fprint functions (FprintFunc) - -```go -blue := color.New(color.FgBlue).FprintfFunc() -blue(myWriter, "important notice: %s", stars) - -// Mix up with multiple attributes -success := color.New(color.Bold, color.FgGreen).FprintlnFunc() -success(myWriter, "Don't forget this...") -``` - -### Insert into noncolor strings (SprintFunc) - -```go -// Create SprintXxx functions to mix strings with other non-colorized strings: -yellow := color.New(color.FgYellow).SprintFunc() -red := color.New(color.FgRed).SprintFunc() -fmt.Printf("This is a %s and this is %s.\n", yellow("warning"), red("error")) - -info := color.New(color.FgWhite, color.BgGreen).SprintFunc() -fmt.Printf("This %s rocks!\n", info("package")) - -// Use helper functions -fmt.Println("This", color.RedString("warning"), "should be not neglected.") -fmt.Printf("%v %v\n", color.GreenString("Info:"), "an important message.") - -// Windows supported too! Just don't forget to change the output to color.Output -fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) -``` - -### Plug into existing code - -```go -// Use handy standard colors -color.Set(color.FgYellow) - -fmt.Println("Existing text will now be in yellow") -fmt.Printf("This one %s\n", "too") - -color.Unset() // Don't forget to unset - -// You can mix up parameters -color.Set(color.FgMagenta, color.Bold) -defer color.Unset() // Use it in your function - -fmt.Println("All text will now be bold magenta.") -``` - -### Disable/Enable color - -There might be a case where you want to explicitly disable/enable color output. the -`go-isatty` package will automatically disable color output for non-tty output streams -(for example if the output were piped directly to `less`). - -The `color` package also disables color output if the [`NO_COLOR`](https://no-color.org) environment -variable is set to a non-empty string. - -`Color` has support to disable/enable colors programmatically both globally and -for single color definitions. For example suppose you have a CLI app and a -`-no-color` bool flag. You can easily disable the color output with: - -```go -var flagNoColor = flag.Bool("no-color", false, "Disable color output") - -if *flagNoColor { - color.NoColor = true // disables colorized output -} -``` - -It also has support for single color definitions (local). You can -disable/enable color output on the fly: - -```go -c := color.New(color.FgCyan) -c.Println("Prints cyan text") - -c.DisableColor() -c.Println("This is printed without any color") - -c.EnableColor() -c.Println("This prints again cyan...") -``` - -## GitHub Actions - -To output color in GitHub Actions (or other CI systems that support ANSI colors), make sure to set `color.NoColor = false` so that it bypasses the check for non-tty output streams. - -## Todo - -* Save/Return previous values -* Evaluate fmt.Formatter interface - -## Credits - -* [Fatih Arslan](https://github.com/fatih) -* Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable) - -## License - -The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details diff --git a/vendor/github.com/fatih/color/color.go b/vendor/github.com/fatih/color/color.go deleted file mode 100644 index 889f9e77..00000000 --- a/vendor/github.com/fatih/color/color.go +++ /dev/null @@ -1,616 +0,0 @@ -package color - -import ( - "fmt" - "io" - "os" - "strconv" - "strings" - "sync" - - "github.com/mattn/go-colorable" - "github.com/mattn/go-isatty" -) - -var ( - // NoColor defines if the output is colorized or not. It's dynamically set to - // false or true based on the stdout's file descriptor referring to a terminal - // or not. It's also set to true if the NO_COLOR environment variable is - // set (regardless of its value). This is a global option and affects all - // colors. For more control over each color block use the methods - // DisableColor() individually. - NoColor = noColorIsSet() || os.Getenv("TERM") == "dumb" || - (!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd())) - - // Output defines the standard output of the print functions. By default, - // os.Stdout is used. - Output = colorable.NewColorableStdout() - - // Error defines a color supporting writer for os.Stderr. - Error = colorable.NewColorableStderr() - - // colorsCache is used to reduce the count of created Color objects and - // allows to reuse already created objects with required Attribute. - colorsCache = make(map[Attribute]*Color) - colorsCacheMu sync.Mutex // protects colorsCache -) - -// noColorIsSet returns true if the environment variable NO_COLOR is set to a non-empty string. -func noColorIsSet() bool { - return os.Getenv("NO_COLOR") != "" -} - -// Color defines a custom color object which is defined by SGR parameters. -type Color struct { - params []Attribute - noColor *bool -} - -// Attribute defines a single SGR Code -type Attribute int - -const escape = "\x1b" - -// Base attributes -const ( - Reset Attribute = iota - Bold - Faint - Italic - Underline - BlinkSlow - BlinkRapid - ReverseVideo - Concealed - CrossedOut -) - -// Foreground text colors -const ( - FgBlack Attribute = iota + 30 - FgRed - FgGreen - FgYellow - FgBlue - FgMagenta - FgCyan - FgWhite -) - -// Foreground Hi-Intensity text colors -const ( - FgHiBlack Attribute = iota + 90 - FgHiRed - FgHiGreen - FgHiYellow - FgHiBlue - FgHiMagenta - FgHiCyan - FgHiWhite -) - -// Background text colors -const ( - BgBlack Attribute = iota + 40 - BgRed - BgGreen - BgYellow - BgBlue - BgMagenta - BgCyan - BgWhite -) - -// Background Hi-Intensity text colors -const ( - BgHiBlack Attribute = iota + 100 - BgHiRed - BgHiGreen - BgHiYellow - BgHiBlue - BgHiMagenta - BgHiCyan - BgHiWhite -) - -// New returns a newly created color object. -func New(value ...Attribute) *Color { - c := &Color{ - params: make([]Attribute, 0), - } - - if noColorIsSet() { - c.noColor = boolPtr(true) - } - - c.Add(value...) - return c -} - -// Set sets the given parameters immediately. It will change the color of -// output with the given SGR parameters until color.Unset() is called. -func Set(p ...Attribute) *Color { - c := New(p...) - c.Set() - return c -} - -// Unset resets all escape attributes and clears the output. Usually should -// be called after Set(). -func Unset() { - if NoColor { - return - } - - fmt.Fprintf(Output, "%s[%dm", escape, Reset) -} - -// Set sets the SGR sequence. -func (c *Color) Set() *Color { - if c.isNoColorSet() { - return c - } - - fmt.Fprint(Output, c.format()) - return c -} - -func (c *Color) unset() { - if c.isNoColorSet() { - return - } - - Unset() -} - -// SetWriter is used to set the SGR sequence with the given io.Writer. This is -// a low-level function, and users should use the higher-level functions, such -// as color.Fprint, color.Print, etc. -func (c *Color) SetWriter(w io.Writer) *Color { - if c.isNoColorSet() { - return c - } - - fmt.Fprint(w, c.format()) - return c -} - -// UnsetWriter resets all escape attributes and clears the output with the give -// io.Writer. Usually should be called after SetWriter(). -func (c *Color) UnsetWriter(w io.Writer) { - if c.isNoColorSet() { - return - } - - if NoColor { - return - } - - fmt.Fprintf(w, "%s[%dm", escape, Reset) -} - -// Add is used to chain SGR parameters. Use as many as parameters to combine -// and create custom color objects. Example: Add(color.FgRed, color.Underline). -func (c *Color) Add(value ...Attribute) *Color { - c.params = append(c.params, value...) - return c -} - -// Fprint formats using the default formats for its operands and writes to w. -// Spaces are added between operands when neither is a string. -// It returns the number of bytes written and any write error encountered. -// On Windows, users should wrap w with colorable.NewColorable() if w is of -// type *os.File. -func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) { - c.SetWriter(w) - defer c.UnsetWriter(w) - - return fmt.Fprint(w, a...) -} - -// Print formats using the default formats for its operands and writes to -// standard output. Spaces are added between operands when neither is a -// string. It returns the number of bytes written and any write error -// encountered. This is the standard fmt.Print() method wrapped with the given -// color. -func (c *Color) Print(a ...interface{}) (n int, err error) { - c.Set() - defer c.unset() - - return fmt.Fprint(Output, a...) -} - -// Fprintf formats according to a format specifier and writes to w. -// It returns the number of bytes written and any write error encountered. -// On Windows, users should wrap w with colorable.NewColorable() if w is of -// type *os.File. -func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - c.SetWriter(w) - defer c.UnsetWriter(w) - - return fmt.Fprintf(w, format, a...) -} - -// Printf formats according to a format specifier and writes to standard output. -// It returns the number of bytes written and any write error encountered. -// This is the standard fmt.Printf() method wrapped with the given color. -func (c *Color) Printf(format string, a ...interface{}) (n int, err error) { - c.Set() - defer c.unset() - - return fmt.Fprintf(Output, format, a...) -} - -// Fprintln formats using the default formats for its operands and writes to w. -// Spaces are always added between operands and a newline is appended. -// On Windows, users should wrap w with colorable.NewColorable() if w is of -// type *os.File. -func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - c.SetWriter(w) - defer c.UnsetWriter(w) - - return fmt.Fprintln(w, a...) -} - -// Println formats using the default formats for its operands and writes to -// standard output. Spaces are always added between operands and a newline is -// appended. It returns the number of bytes written and any write error -// encountered. This is the standard fmt.Print() method wrapped with the given -// color. -func (c *Color) Println(a ...interface{}) (n int, err error) { - c.Set() - defer c.unset() - - return fmt.Fprintln(Output, a...) -} - -// Sprint is just like Print, but returns a string instead of printing it. -func (c *Color) Sprint(a ...interface{}) string { - return c.wrap(fmt.Sprint(a...)) -} - -// Sprintln is just like Println, but returns a string instead of printing it. -func (c *Color) Sprintln(a ...interface{}) string { - return c.wrap(fmt.Sprintln(a...)) -} - -// Sprintf is just like Printf, but returns a string instead of printing it. -func (c *Color) Sprintf(format string, a ...interface{}) string { - return c.wrap(fmt.Sprintf(format, a...)) -} - -// FprintFunc returns a new function that prints the passed arguments as -// colorized with color.Fprint(). -func (c *Color) FprintFunc() func(w io.Writer, a ...interface{}) { - return func(w io.Writer, a ...interface{}) { - c.Fprint(w, a...) - } -} - -// PrintFunc returns a new function that prints the passed arguments as -// colorized with color.Print(). -func (c *Color) PrintFunc() func(a ...interface{}) { - return func(a ...interface{}) { - c.Print(a...) - } -} - -// FprintfFunc returns a new function that prints the passed arguments as -// colorized with color.Fprintf(). -func (c *Color) FprintfFunc() func(w io.Writer, format string, a ...interface{}) { - return func(w io.Writer, format string, a ...interface{}) { - c.Fprintf(w, format, a...) - } -} - -// PrintfFunc returns a new function that prints the passed arguments as -// colorized with color.Printf(). -func (c *Color) PrintfFunc() func(format string, a ...interface{}) { - return func(format string, a ...interface{}) { - c.Printf(format, a...) - } -} - -// FprintlnFunc returns a new function that prints the passed arguments as -// colorized with color.Fprintln(). -func (c *Color) FprintlnFunc() func(w io.Writer, a ...interface{}) { - return func(w io.Writer, a ...interface{}) { - c.Fprintln(w, a...) - } -} - -// PrintlnFunc returns a new function that prints the passed arguments as -// colorized with color.Println(). -func (c *Color) PrintlnFunc() func(a ...interface{}) { - return func(a ...interface{}) { - c.Println(a...) - } -} - -// SprintFunc returns a new function that returns colorized strings for the -// given arguments with fmt.Sprint(). Useful to put into or mix into other -// string. Windows users should use this in conjunction with color.Output, example: -// -// put := New(FgYellow).SprintFunc() -// fmt.Fprintf(color.Output, "This is a %s", put("warning")) -func (c *Color) SprintFunc() func(a ...interface{}) string { - return func(a ...interface{}) string { - return c.wrap(fmt.Sprint(a...)) - } -} - -// SprintfFunc returns a new function that returns colorized strings for the -// given arguments with fmt.Sprintf(). Useful to put into or mix into other -// string. Windows users should use this in conjunction with color.Output. -func (c *Color) SprintfFunc() func(format string, a ...interface{}) string { - return func(format string, a ...interface{}) string { - return c.wrap(fmt.Sprintf(format, a...)) - } -} - -// SprintlnFunc returns a new function that returns colorized strings for the -// given arguments with fmt.Sprintln(). Useful to put into or mix into other -// string. Windows users should use this in conjunction with color.Output. -func (c *Color) SprintlnFunc() func(a ...interface{}) string { - return func(a ...interface{}) string { - return c.wrap(fmt.Sprintln(a...)) - } -} - -// sequence returns a formatted SGR sequence to be plugged into a "\x1b[...m" -// an example output might be: "1;36" -> bold cyan -func (c *Color) sequence() string { - format := make([]string, len(c.params)) - for i, v := range c.params { - format[i] = strconv.Itoa(int(v)) - } - - return strings.Join(format, ";") -} - -// wrap wraps the s string with the colors attributes. The string is ready to -// be printed. -func (c *Color) wrap(s string) string { - if c.isNoColorSet() { - return s - } - - return c.format() + s + c.unformat() -} - -func (c *Color) format() string { - return fmt.Sprintf("%s[%sm", escape, c.sequence()) -} - -func (c *Color) unformat() string { - return fmt.Sprintf("%s[%dm", escape, Reset) -} - -// DisableColor disables the color output. Useful to not change any existing -// code and still being able to output. Can be used for flags like -// "--no-color". To enable back use EnableColor() method. -func (c *Color) DisableColor() { - c.noColor = boolPtr(true) -} - -// EnableColor enables the color output. Use it in conjunction with -// DisableColor(). Otherwise, this method has no side effects. -func (c *Color) EnableColor() { - c.noColor = boolPtr(false) -} - -func (c *Color) isNoColorSet() bool { - // check first if we have user set action - if c.noColor != nil { - return *c.noColor - } - - // if not return the global option, which is disabled by default - return NoColor -} - -// Equals returns a boolean value indicating whether two colors are equal. -func (c *Color) Equals(c2 *Color) bool { - if len(c.params) != len(c2.params) { - return false - } - - for _, attr := range c.params { - if !c2.attrExists(attr) { - return false - } - } - - return true -} - -func (c *Color) attrExists(a Attribute) bool { - for _, attr := range c.params { - if attr == a { - return true - } - } - - return false -} - -func boolPtr(v bool) *bool { - return &v -} - -func getCachedColor(p Attribute) *Color { - colorsCacheMu.Lock() - defer colorsCacheMu.Unlock() - - c, ok := colorsCache[p] - if !ok { - c = New(p) - colorsCache[p] = c - } - - return c -} - -func colorPrint(format string, p Attribute, a ...interface{}) { - c := getCachedColor(p) - - if !strings.HasSuffix(format, "\n") { - format += "\n" - } - - if len(a) == 0 { - c.Print(format) - } else { - c.Printf(format, a...) - } -} - -func colorString(format string, p Attribute, a ...interface{}) string { - c := getCachedColor(p) - - if len(a) == 0 { - return c.SprintFunc()(format) - } - - return c.SprintfFunc()(format, a...) -} - -// Black is a convenient helper function to print with black foreground. A -// newline is appended to format by default. -func Black(format string, a ...interface{}) { colorPrint(format, FgBlack, a...) } - -// Red is a convenient helper function to print with red foreground. A -// newline is appended to format by default. -func Red(format string, a ...interface{}) { colorPrint(format, FgRed, a...) } - -// Green is a convenient helper function to print with green foreground. A -// newline is appended to format by default. -func Green(format string, a ...interface{}) { colorPrint(format, FgGreen, a...) } - -// Yellow is a convenient helper function to print with yellow foreground. -// A newline is appended to format by default. -func Yellow(format string, a ...interface{}) { colorPrint(format, FgYellow, a...) } - -// Blue is a convenient helper function to print with blue foreground. A -// newline is appended to format by default. -func Blue(format string, a ...interface{}) { colorPrint(format, FgBlue, a...) } - -// Magenta is a convenient helper function to print with magenta foreground. -// A newline is appended to format by default. -func Magenta(format string, a ...interface{}) { colorPrint(format, FgMagenta, a...) } - -// Cyan is a convenient helper function to print with cyan foreground. A -// newline is appended to format by default. -func Cyan(format string, a ...interface{}) { colorPrint(format, FgCyan, a...) } - -// White is a convenient helper function to print with white foreground. A -// newline is appended to format by default. -func White(format string, a ...interface{}) { colorPrint(format, FgWhite, a...) } - -// BlackString is a convenient helper function to return a string with black -// foreground. -func BlackString(format string, a ...interface{}) string { return colorString(format, FgBlack, a...) } - -// RedString is a convenient helper function to return a string with red -// foreground. -func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) } - -// GreenString is a convenient helper function to return a string with green -// foreground. -func GreenString(format string, a ...interface{}) string { return colorString(format, FgGreen, a...) } - -// YellowString is a convenient helper function to return a string with yellow -// foreground. -func YellowString(format string, a ...interface{}) string { return colorString(format, FgYellow, a...) } - -// BlueString is a convenient helper function to return a string with blue -// foreground. -func BlueString(format string, a ...interface{}) string { return colorString(format, FgBlue, a...) } - -// MagentaString is a convenient helper function to return a string with magenta -// foreground. -func MagentaString(format string, a ...interface{}) string { - return colorString(format, FgMagenta, a...) -} - -// CyanString is a convenient helper function to return a string with cyan -// foreground. -func CyanString(format string, a ...interface{}) string { return colorString(format, FgCyan, a...) } - -// WhiteString is a convenient helper function to return a string with white -// foreground. -func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) } - -// HiBlack is a convenient helper function to print with hi-intensity black foreground. A -// newline is appended to format by default. -func HiBlack(format string, a ...interface{}) { colorPrint(format, FgHiBlack, a...) } - -// HiRed is a convenient helper function to print with hi-intensity red foreground. A -// newline is appended to format by default. -func HiRed(format string, a ...interface{}) { colorPrint(format, FgHiRed, a...) } - -// HiGreen is a convenient helper function to print with hi-intensity green foreground. A -// newline is appended to format by default. -func HiGreen(format string, a ...interface{}) { colorPrint(format, FgHiGreen, a...) } - -// HiYellow is a convenient helper function to print with hi-intensity yellow foreground. -// A newline is appended to format by default. -func HiYellow(format string, a ...interface{}) { colorPrint(format, FgHiYellow, a...) } - -// HiBlue is a convenient helper function to print with hi-intensity blue foreground. A -// newline is appended to format by default. -func HiBlue(format string, a ...interface{}) { colorPrint(format, FgHiBlue, a...) } - -// HiMagenta is a convenient helper function to print with hi-intensity magenta foreground. -// A newline is appended to format by default. -func HiMagenta(format string, a ...interface{}) { colorPrint(format, FgHiMagenta, a...) } - -// HiCyan is a convenient helper function to print with hi-intensity cyan foreground. A -// newline is appended to format by default. -func HiCyan(format string, a ...interface{}) { colorPrint(format, FgHiCyan, a...) } - -// HiWhite is a convenient helper function to print with hi-intensity white foreground. A -// newline is appended to format by default. -func HiWhite(format string, a ...interface{}) { colorPrint(format, FgHiWhite, a...) } - -// HiBlackString is a convenient helper function to return a string with hi-intensity black -// foreground. -func HiBlackString(format string, a ...interface{}) string { - return colorString(format, FgHiBlack, a...) -} - -// HiRedString is a convenient helper function to return a string with hi-intensity red -// foreground. -func HiRedString(format string, a ...interface{}) string { return colorString(format, FgHiRed, a...) } - -// HiGreenString is a convenient helper function to return a string with hi-intensity green -// foreground. -func HiGreenString(format string, a ...interface{}) string { - return colorString(format, FgHiGreen, a...) -} - -// HiYellowString is a convenient helper function to return a string with hi-intensity yellow -// foreground. -func HiYellowString(format string, a ...interface{}) string { - return colorString(format, FgHiYellow, a...) -} - -// HiBlueString is a convenient helper function to return a string with hi-intensity blue -// foreground. -func HiBlueString(format string, a ...interface{}) string { return colorString(format, FgHiBlue, a...) } - -// HiMagentaString is a convenient helper function to return a string with hi-intensity magenta -// foreground. -func HiMagentaString(format string, a ...interface{}) string { - return colorString(format, FgHiMagenta, a...) -} - -// HiCyanString is a convenient helper function to return a string with hi-intensity cyan -// foreground. -func HiCyanString(format string, a ...interface{}) string { return colorString(format, FgHiCyan, a...) } - -// HiWhiteString is a convenient helper function to return a string with hi-intensity white -// foreground. -func HiWhiteString(format string, a ...interface{}) string { - return colorString(format, FgHiWhite, a...) -} diff --git a/vendor/github.com/fatih/color/color_windows.go b/vendor/github.com/fatih/color/color_windows.go deleted file mode 100644 index be01c558..00000000 --- a/vendor/github.com/fatih/color/color_windows.go +++ /dev/null @@ -1,19 +0,0 @@ -package color - -import ( - "os" - - "golang.org/x/sys/windows" -) - -func init() { - // Opt-in for ansi color support for current process. - // https://learn.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences#output-sequences - var outMode uint32 - out := windows.Handle(os.Stdout.Fd()) - if err := windows.GetConsoleMode(out, &outMode); err != nil { - return - } - outMode |= windows.ENABLE_PROCESSED_OUTPUT | windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING - _ = windows.SetConsoleMode(out, outMode) -} diff --git a/vendor/github.com/fatih/color/doc.go b/vendor/github.com/fatih/color/doc.go deleted file mode 100644 index 9491ad54..00000000 --- a/vendor/github.com/fatih/color/doc.go +++ /dev/null @@ -1,134 +0,0 @@ -/* -Package color is an ANSI color package to output colorized or SGR defined -output to the standard output. The API can be used in several way, pick one -that suits you. - -Use simple and default helper functions with predefined foreground colors: - - color.Cyan("Prints text in cyan.") - - // a newline will be appended automatically - color.Blue("Prints %s in blue.", "text") - - // More default foreground colors.. - color.Red("We have red") - color.Yellow("Yellow color too!") - color.Magenta("And many others ..") - - // Hi-intensity colors - color.HiGreen("Bright green color.") - color.HiBlack("Bright black means gray..") - color.HiWhite("Shiny white color!") - -However, there are times when custom color mixes are required. Below are some -examples to create custom color objects and use the print functions of each -separate color object. - - // Create a new color object - c := color.New(color.FgCyan).Add(color.Underline) - c.Println("Prints cyan text with an underline.") - - // Or just add them to New() - d := color.New(color.FgCyan, color.Bold) - d.Printf("This prints bold cyan %s\n", "too!.") - - - // Mix up foreground and background colors, create new mixes! - red := color.New(color.FgRed) - - boldRed := red.Add(color.Bold) - boldRed.Println("This will print text in bold red.") - - whiteBackground := red.Add(color.BgWhite) - whiteBackground.Println("Red text with White background.") - - // Use your own io.Writer output - color.New(color.FgBlue).Fprintln(myWriter, "blue color!") - - blue := color.New(color.FgBlue) - blue.Fprint(myWriter, "This will print text in blue.") - -You can create PrintXxx functions to simplify even more: - - // Create a custom print function for convenient - red := color.New(color.FgRed).PrintfFunc() - red("warning") - red("error: %s", err) - - // Mix up multiple attributes - notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() - notice("don't forget this...") - -You can also FprintXxx functions to pass your own io.Writer: - - blue := color.New(FgBlue).FprintfFunc() - blue(myWriter, "important notice: %s", stars) - - // Mix up with multiple attributes - success := color.New(color.Bold, color.FgGreen).FprintlnFunc() - success(myWriter, don't forget this...") - -Or create SprintXxx functions to mix strings with other non-colorized strings: - - yellow := New(FgYellow).SprintFunc() - red := New(FgRed).SprintFunc() - - fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error")) - - info := New(FgWhite, BgGreen).SprintFunc() - fmt.Printf("this %s rocks!\n", info("package")) - -Windows support is enabled by default. All Print functions work as intended. -However, only for color.SprintXXX functions, user should use fmt.FprintXXX and -set the output to color.Output: - - fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) - - info := New(FgWhite, BgGreen).SprintFunc() - fmt.Fprintf(color.Output, "this %s rocks!\n", info("package")) - -Using with existing code is possible. Just use the Set() method to set the -standard output to the given parameters. That way a rewrite of an existing -code is not required. - - // Use handy standard colors. - color.Set(color.FgYellow) - - fmt.Println("Existing text will be now in Yellow") - fmt.Printf("This one %s\n", "too") - - color.Unset() // don't forget to unset - - // You can mix up parameters - color.Set(color.FgMagenta, color.Bold) - defer color.Unset() // use it in your function - - fmt.Println("All text will be now bold magenta.") - -There might be a case where you want to disable color output (for example to -pipe the standard output of your app to somewhere else). `Color` has support to -disable colors both globally and for single color definition. For example -suppose you have a CLI app and a `--no-color` bool flag. You can easily disable -the color output with: - - var flagNoColor = flag.Bool("no-color", false, "Disable color output") - - if *flagNoColor { - color.NoColor = true // disables colorized output - } - -You can also disable the color by setting the NO_COLOR environment variable to any value. - -It also has support for single color definitions (local). You can -disable/enable color output on the fly: - - c := color.New(color.FgCyan) - c.Println("Prints cyan text") - - c.DisableColor() - c.Println("This is printed without any color") - - c.EnableColor() - c.Println("This prints again cyan...") -*/ -package color diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig deleted file mode 100644 index fad89585..00000000 --- a/vendor/github.com/fsnotify/fsnotify/.editorconfig +++ /dev/null @@ -1,12 +0,0 @@ -root = true - -[*.go] -indent_style = tab -indent_size = 4 -insert_final_newline = true - -[*.{yml,yaml}] -indent_style = space -indent_size = 2 -insert_final_newline = true -trim_trailing_whitespace = true diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes deleted file mode 100644 index 32f1001b..00000000 --- a/vendor/github.com/fsnotify/fsnotify/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -go.sum linguist-generated diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore deleted file mode 100644 index 1d89d85c..00000000 --- a/vendor/github.com/fsnotify/fsnotify/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -# go test -c output -*.test -*.test.exe - -# Output of go build ./cmd/fsnotify -/fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/.mailmap b/vendor/github.com/fsnotify/fsnotify/.mailmap deleted file mode 100644 index a04f2907..00000000 --- a/vendor/github.com/fsnotify/fsnotify/.mailmap +++ /dev/null @@ -1,2 +0,0 @@ -Chris Howey -Nathan Youngman <4566+nathany@users.noreply.github.com> diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md deleted file mode 100644 index 77f9593b..00000000 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ /dev/null @@ -1,470 +0,0 @@ -# Changelog - -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [Unreleased] - -Nothing yet. - -## [1.6.0] - 2022-10-13 - -This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1, -but not documented). It also increases the minimum Linux version to 2.6.32. - -### Additions - -- all: add `Event.Has()` and `Op.Has()` ([#477]) - - This makes checking events a lot easier; for example: - - if event.Op&Write == Write && !(event.Op&Remove == Remove) { - } - - Becomes: - - if event.Has(Write) && !event.Has(Remove) { - } - -- all: add cmd/fsnotify ([#463]) - - A command-line utility for testing and some examples. - -### Changes and fixes - -- inotify: don't ignore events for files that don't exist ([#260], [#470]) - - Previously the inotify watcher would call `os.Lstat()` to check if a file - still exists before emitting events. - - This was inconsistent with other platforms and resulted in inconsistent event - reporting (e.g. when a file is quickly removed and re-created), and generally - a source of confusion. It was added in 2013 to fix a memory leak that no - longer exists. - -- all: return `ErrNonExistentWatch` when `Remove()` is called on a path that's - not watched ([#460]) - -- inotify: replace epoll() with non-blocking inotify ([#434]) - - Non-blocking inotify was not generally available at the time this library was - written in 2014, but now it is. As a result, the minimum Linux version is - bumped from 2.6.27 to 2.6.32. This hugely simplifies the code and is faster. - -- kqueue: don't check for events every 100ms ([#480]) - - The watcher would wake up every 100ms, even when there was nothing to do. Now - it waits until there is something to do. - -- macos: retry opening files on EINTR ([#475]) - -- kqueue: skip unreadable files ([#479]) - - kqueue requires a file descriptor for every file in a directory; this would - fail if a file was unreadable by the current user. Now these files are simply - skipped. - -- windows: fix renaming a watched directory if the parent is also watched ([#370]) - -- windows: increase buffer size from 4K to 64K ([#485]) - -- windows: close file handle on Remove() ([#288]) - -- kqueue: put pathname in the error if watching a file fails ([#471]) - -- inotify, windows: calling Close() more than once could race ([#465]) - -- kqueue: improve Close() performance ([#233]) - -- all: various documentation additions and clarifications. - -[#233]: https://github.com/fsnotify/fsnotify/pull/233 -[#260]: https://github.com/fsnotify/fsnotify/pull/260 -[#288]: https://github.com/fsnotify/fsnotify/pull/288 -[#370]: https://github.com/fsnotify/fsnotify/pull/370 -[#434]: https://github.com/fsnotify/fsnotify/pull/434 -[#460]: https://github.com/fsnotify/fsnotify/pull/460 -[#463]: https://github.com/fsnotify/fsnotify/pull/463 -[#465]: https://github.com/fsnotify/fsnotify/pull/465 -[#470]: https://github.com/fsnotify/fsnotify/pull/470 -[#471]: https://github.com/fsnotify/fsnotify/pull/471 -[#475]: https://github.com/fsnotify/fsnotify/pull/475 -[#477]: https://github.com/fsnotify/fsnotify/pull/477 -[#479]: https://github.com/fsnotify/fsnotify/pull/479 -[#480]: https://github.com/fsnotify/fsnotify/pull/480 -[#485]: https://github.com/fsnotify/fsnotify/pull/485 - -## [1.5.4] - 2022-04-25 - -* Windows: add missing defer to `Watcher.WatchList` [#447](https://github.com/fsnotify/fsnotify/pull/447) -* go.mod: use latest x/sys [#444](https://github.com/fsnotify/fsnotify/pull/444) -* Fix compilation for OpenBSD [#443](https://github.com/fsnotify/fsnotify/pull/443) - -## [1.5.3] - 2022-04-22 - -* This version is retracted. An incorrect branch is published accidentally [#445](https://github.com/fsnotify/fsnotify/issues/445) - -## [1.5.2] - 2022-04-21 - -* Add a feature to return the directories and files that are being monitored [#374](https://github.com/fsnotify/fsnotify/pull/374) -* Fix potential crash on windows if `raw.FileNameLength` exceeds `syscall.MAX_PATH` [#361](https://github.com/fsnotify/fsnotify/pull/361) -* Allow build on unsupported GOOS [#424](https://github.com/fsnotify/fsnotify/pull/424) -* Don't set `poller.fd` twice in `newFdPoller` [#406](https://github.com/fsnotify/fsnotify/pull/406) -* fix go vet warnings: call to `(*T).Fatalf` from a non-test goroutine [#416](https://github.com/fsnotify/fsnotify/pull/416) - -## [1.5.1] - 2021-08-24 - -* Revert Add AddRaw to not follow symlinks [#394](https://github.com/fsnotify/fsnotify/pull/394) - -## [1.5.0] - 2021-08-20 - -* Go: Increase minimum required version to Go 1.12 [#381](https://github.com/fsnotify/fsnotify/pull/381) -* Feature: Add AddRaw method which does not follow symlinks when adding a watch [#289](https://github.com/fsnotify/fsnotify/pull/298) -* Windows: Follow symlinks by default like on all other systems [#289](https://github.com/fsnotify/fsnotify/pull/289) -* CI: Use GitHub Actions for CI and cover go 1.12-1.17 - [#378](https://github.com/fsnotify/fsnotify/pull/378) - [#381](https://github.com/fsnotify/fsnotify/pull/381) - [#385](https://github.com/fsnotify/fsnotify/pull/385) -* Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325) - -## [1.4.9] - 2020-03-11 - -* Move example usage to the readme #329. This may resolve #328. - -## [1.4.8] - 2020-03-10 - -* CI: test more go versions (@nathany 1d13583d846ea9d66dcabbfefbfb9d8e6fb05216) -* Tests: Queued inotify events could have been read by the test before max_queued_events was hit (@matthias-stone #265) -* Tests: t.Fatalf -> t.Errorf in go routines (@gdey #266) -* CI: Less verbosity (@nathany #267) -* Tests: Darwin: Exchangedata is deprecated on 10.13 (@nathany #267) -* Tests: Check if channels are closed in the example (@alexeykazakov #244) -* CI: Only run golint on latest version of go and fix issues (@cpuguy83 #284) -* CI: Add windows to travis matrix (@cpuguy83 #284) -* Docs: Remover appveyor badge (@nathany 11844c0959f6fff69ba325d097fce35bd85a8e93) -* Linux: create epoll and pipe fds with close-on-exec (@JohannesEbke #219) -* Linux: open files with close-on-exec (@linxiulei #273) -* Docs: Plan to support fanotify (@nathany ab058b44498e8b7566a799372a39d150d9ea0119 ) -* Project: Add go.mod (@nathany #309) -* Project: Revise editor config (@nathany #309) -* Project: Update copyright for 2019 (@nathany #309) -* CI: Drop go1.8 from CI matrix (@nathany #309) -* Docs: Updating the FAQ section for supportability with NFS & FUSE filesystems (@Pratik32 4bf2d1fec78374803a39307bfb8d340688f4f28e ) - -## [1.4.7] - 2018-01-09 - -* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) -* Tests: Fix missing verb on format string (thanks @rchiossi) -* Linux: Fix deadlock in Remove (thanks @aarondl) -* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne) -* Docs: Moved FAQ into the README (thanks @vahe) -* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich) -* Docs: replace references to OS X with macOS - -## [1.4.2] - 2016-10-10 - -* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack) - -## [1.4.1] - 2016-10-04 - -* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack) - -## [1.4.0] - 2016-10-01 - -* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie) - -## [1.3.1] - 2016-06-28 - -* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc) - -## [1.3.0] - 2016-04-19 - -* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135) - -## [1.2.10] - 2016-03-02 - -* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj) - -## [1.2.9] - 2016-01-13 - -kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep) - -## [1.2.8] - 2015-12-17 - -* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test) -* inotify: fix race in test -* enable race detection for continuous integration (Linux, Mac, Windows) - -## [1.2.5] - 2015-10-17 - -* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki) -* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken) -* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie) -* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion) - -## [1.2.1] - 2015-10-14 - -* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx) - -## [1.2.0] - 2015-02-08 - -* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD) -* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD) -* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59) - -## [1.1.1] - 2015-02-05 - -* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD) - -## [1.1.0] - 2014-12-12 - -* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43) - * add low-level functions - * only need to store flags on directories - * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13) - * done can be an unbuffered channel - * remove calls to os.NewSyscallError -* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher) -* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48) -* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) - -## [1.0.4] - 2014-09-07 - -* kqueue: add dragonfly to the build tags. -* Rename source code files, rearrange code so exported APIs are at the top. -* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang) - -## [1.0.3] - 2014-08-19 - -* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36) - -## [1.0.2] - 2014-08-17 - -* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) -* [Fix] Make ./path and path equivalent. (thanks @zhsso) - -## [1.0.0] - 2014-08-15 - -* [API] Remove AddWatch on Windows, use Add. -* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30) -* Minor updates based on feedback from golint. - -## dev / 2014-07-09 - -* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify). -* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno) - -## dev / 2014-07-04 - -* kqueue: fix incorrect mutex used in Close() -* Update example to demonstrate usage of Op. - -## dev / 2014-06-28 - -* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4) -* Fix for String() method on Event (thanks Alex Brainman) -* Don't build on Plan 9 or Solaris (thanks @4ad) - -## dev / 2014-06-21 - -* Events channel of type Event rather than *Event. -* [internal] use syscall constants directly for inotify and kqueue. -* [internal] kqueue: rename events to kevents and fileEvent to event. - -## dev / 2014-06-19 - -* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally). -* [internal] remove cookie from Event struct (unused). -* [internal] Event struct has the same definition across every OS. -* [internal] remove internal watch and removeWatch methods. - -## dev / 2014-06-12 - -* [API] Renamed Watch() to Add() and RemoveWatch() to Remove(). -* [API] Pluralized channel names: Events and Errors. -* [API] Renamed FileEvent struct to Event. -* [API] Op constants replace methods like IsCreate(). - -## dev / 2014-06-12 - -* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) - -## dev / 2014-05-23 - -* [API] Remove current implementation of WatchFlags. - * current implementation doesn't take advantage of OS for efficiency - * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes - * no tests for the current implementation - * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) - -## [0.9.3] - 2014-12-31 - -* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) - -## [0.9.2] - 2014-08-17 - -* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) - -## [0.9.1] - 2014-06-12 - -* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) - -## [0.9.0] - 2014-01-17 - -* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) -* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) -* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. - -## [0.8.12] - 2013-11-13 - -* [API] Remove FD_SET and friends from Linux adapter - -## [0.8.11] - 2013-11-02 - -* [Doc] Add Changelog [#72][] (thanks @nathany) -* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond) - -## [0.8.10] - 2013-10-19 - -* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) -* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) -* [Doc] specify OS-specific limits in README (thanks @debrando) - -## [0.8.9] - 2013-09-08 - -* [Doc] Contributing (thanks @nathany) -* [Doc] update package path in example code [#63][] (thanks @paulhammond) -* [Doc] GoCI badge in README (Linux only) [#60][] -* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) - -## [0.8.8] - 2013-06-17 - -* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) - -## [0.8.7] - 2013-06-03 - -* [API] Make syscall flags internal -* [Fix] inotify: ignore event changes -* [Fix] race in symlink test [#45][] (reported by @srid) -* [Fix] tests on Windows -* lower case error messages - -## [0.8.6] - 2013-05-23 - -* kqueue: Use EVT_ONLY flag on Darwin -* [Doc] Update README with full example - -## [0.8.5] - 2013-05-09 - -* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) - -## [0.8.4] - 2013-04-07 - -* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) - -## [0.8.3] - 2013-03-13 - -* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) -* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) - -## [0.8.2] - 2013-02-07 - -* [Doc] add Authors -* [Fix] fix data races for map access [#29][] (thanks @fsouza) - -## [0.8.1] - 2013-01-09 - -* [Fix] Windows path separators -* [Doc] BSD License - -## [0.8.0] - 2012-11-09 - -* kqueue: directory watching improvements (thanks @vmirage) -* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) -* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) - -## [0.7.4] - 2012-10-09 - -* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) -* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) -* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) -* [Fix] kqueue: modify after recreation of file - -## [0.7.3] - 2012-09-27 - -* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) -* [Fix] kqueue: no longer get duplicate CREATE events - -## [0.7.2] - 2012-09-01 - -* kqueue: events for created directories - -## [0.7.1] - 2012-07-14 - -* [Fix] for renaming files - -## [0.7.0] - 2012-07-02 - -* [Feature] FSNotify flags -* [Fix] inotify: Added file name back to event path - -## [0.6.0] - 2012-06-06 - -* kqueue: watch files after directory created (thanks @tmc) - -## [0.5.1] - 2012-05-22 - -* [Fix] inotify: remove all watches before Close() - -## [0.5.0] - 2012-05-03 - -* [API] kqueue: return errors during watch instead of sending over channel -* kqueue: match symlink behavior on Linux -* inotify: add `DELETE_SELF` (requested by @taralx) -* [Fix] kqueue: handle EINTR (reported by @robfig) -* [Doc] Godoc example [#1][] (thanks @davecheney) - -## [0.4.0] - 2012-03-30 - -* Go 1 released: build with go tool -* [Feature] Windows support using winfsnotify -* Windows does not have attribute change notifications -* Roll attribute notifications into IsModify - -## [0.3.0] - 2012-02-19 - -* kqueue: add files when watch directory - -## [0.2.0] - 2011-12-30 - -* update to latest Go weekly code - -## [0.1.0] - 2011-10-19 - -* kqueue: add watch on file creation to match inotify -* kqueue: create file event -* inotify: ignore `IN_IGNORED` events -* event String() -* linux: common FileEvent functions -* initial commit - -[#79]: https://github.com/howeyc/fsnotify/pull/79 -[#77]: https://github.com/howeyc/fsnotify/pull/77 -[#72]: https://github.com/howeyc/fsnotify/issues/72 -[#71]: https://github.com/howeyc/fsnotify/issues/71 -[#70]: https://github.com/howeyc/fsnotify/issues/70 -[#63]: https://github.com/howeyc/fsnotify/issues/63 -[#62]: https://github.com/howeyc/fsnotify/issues/62 -[#60]: https://github.com/howeyc/fsnotify/issues/60 -[#59]: https://github.com/howeyc/fsnotify/issues/59 -[#49]: https://github.com/howeyc/fsnotify/issues/49 -[#45]: https://github.com/howeyc/fsnotify/issues/45 -[#40]: https://github.com/howeyc/fsnotify/issues/40 -[#36]: https://github.com/howeyc/fsnotify/issues/36 -[#33]: https://github.com/howeyc/fsnotify/issues/33 -[#29]: https://github.com/howeyc/fsnotify/issues/29 -[#25]: https://github.com/howeyc/fsnotify/issues/25 -[#24]: https://github.com/howeyc/fsnotify/issues/24 -[#21]: https://github.com/howeyc/fsnotify/issues/21 diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md deleted file mode 100644 index ea379759..00000000 --- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ /dev/null @@ -1,26 +0,0 @@ -Thank you for your interest in contributing to fsnotify! We try to review and -merge PRs in a reasonable timeframe, but please be aware that: - -- To avoid "wasted" work, please discus changes on the issue tracker first. You - can just send PRs, but they may end up being rejected for one reason or the - other. - -- fsnotify is a cross-platform library, and changes must work reasonably well on - all supported platforms. - -- Changes will need to be compatible; old code should still compile, and the - runtime behaviour can't change in ways that are likely to lead to problems for - users. - -Testing -------- -Just `go test ./...` runs all the tests; the CI runs this on all supported -platforms. Testing different platforms locally can be done with something like -[goon] or [Vagrant], but this isn't super-easy to set up at the moment. - -Use the `-short` flag to make the "stress test" run faster. - - -[goon]: https://github.com/arp242/goon -[Vagrant]: https://www.vagrantup.com/ -[integration_test.go]: /integration_test.go diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE deleted file mode 100644 index fb03ade7..00000000 --- a/vendor/github.com/fsnotify/fsnotify/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Copyright © 2012 The Go Authors. All rights reserved. -Copyright © fsnotify Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice, this - list of conditions and the following disclaimer in the documentation and/or - other materials provided with the distribution. -* Neither the name of Google Inc. nor the names of its contributors may be used - to endorse or promote products derived from this software without specific - prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md deleted file mode 100644 index d4e6080f..00000000 --- a/vendor/github.com/fsnotify/fsnotify/README.md +++ /dev/null @@ -1,161 +0,0 @@ -fsnotify is a Go library to provide cross-platform filesystem notifications on -Windows, Linux, macOS, and BSD systems. - -Go 1.16 or newer is required; the full documentation is at -https://pkg.go.dev/github.com/fsnotify/fsnotify - -**It's best to read the documentation at pkg.go.dev, as it's pinned to the last -released version, whereas this README is for the last development version which -may include additions/changes.** - ---- - -Platform support: - -| Adapter | OS | Status | -| --------------------- | ---------------| -------------------------------------------------------------| -| inotify | Linux 2.6.32+ | Supported | -| kqueue | BSD, macOS | Supported | -| ReadDirectoryChangesW | Windows | Supported | -| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | -| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) | -| fanotify | Linux 5.9+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) | -| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | -| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | - -Linux and macOS should include Android and iOS, but these are currently untested. - -Usage ------ -A basic example: - -```go -package main - -import ( - "log" - - "github.com/fsnotify/fsnotify" -) - -func main() { - // Create new watcher. - watcher, err := fsnotify.NewWatcher() - if err != nil { - log.Fatal(err) - } - defer watcher.Close() - - // Start listening for events. - go func() { - for { - select { - case event, ok := <-watcher.Events: - if !ok { - return - } - log.Println("event:", event) - if event.Has(fsnotify.Write) { - log.Println("modified file:", event.Name) - } - case err, ok := <-watcher.Errors: - if !ok { - return - } - log.Println("error:", err) - } - } - }() - - // Add a path. - err = watcher.Add("/tmp") - if err != nil { - log.Fatal(err) - } - - // Block main goroutine forever. - <-make(chan struct{}) -} -``` - -Some more examples can be found in [cmd/fsnotify](cmd/fsnotify), which can be -run with: - - % go run ./cmd/fsnotify - -FAQ ---- -### Will a file still be watched when it's moved to another directory? -No, not unless you are watching the location it was moved to. - -### Are subdirectories watched too? -No, you must add watches for any directory you want to watch (a recursive -watcher is on the roadmap: [#18]). - -[#18]: https://github.com/fsnotify/fsnotify/issues/18 - -### Do I have to watch the Error and Event channels in a goroutine? -As of now, yes (you can read both channels in the same goroutine using `select`, -you don't need a separate goroutine for both channels; see the example). - -### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys? -fsnotify requires support from underlying OS to work. The current NFS and SMB -protocols does not provide network level support for file notifications, and -neither do the /proc and /sys virtual filesystems. - -This could be fixed with a polling watcher ([#9]), but it's not yet implemented. - -[#9]: https://github.com/fsnotify/fsnotify/issues/9 - -Platform-specific notes ------------------------ -### Linux -When a file is removed a REMOVE event won't be emitted until all file -descriptors are closed; it will emit a CHMOD instead: - - fp := os.Open("file") - os.Remove("file") // CHMOD - fp.Close() // REMOVE - -This is the event that inotify sends, so not much can be changed about this. - -The `fs.inotify.max_user_watches` sysctl variable specifies the upper limit for -the number of watches per user, and `fs.inotify.max_user_instances` specifies -the maximum number of inotify instances per user. Every Watcher you create is an -"instance", and every path you add is a "watch". - -These are also exposed in `/proc` as `/proc/sys/fs/inotify/max_user_watches` and -`/proc/sys/fs/inotify/max_user_instances` - -To increase them you can use `sysctl` or write the value to proc file: - - # The default values on Linux 5.18 - sysctl fs.inotify.max_user_watches=124983 - sysctl fs.inotify.max_user_instances=128 - -To make the changes persist on reboot edit `/etc/sysctl.conf` or -`/usr/lib/sysctl.d/50-default.conf` (details differ per Linux distro; check your -distro's documentation): - - fs.inotify.max_user_watches=124983 - fs.inotify.max_user_instances=128 - -Reaching the limit will result in a "no space left on device" or "too many open -files" error. - -### kqueue (macOS, all BSD systems) -kqueue requires opening a file descriptor for every file that's being watched; -so if you're watching a directory with five files then that's six file -descriptors. You will run in to your system's "max open files" limit faster on -these platforms. - -The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to -control the maximum number of open files. - -### macOS -Spotlight indexing on macOS can result in multiple events (see [#15]). A temporary -workaround is to add your folder(s) to the *Spotlight Privacy settings* until we -have a native FSEvents implementation (see [#11]). - -[#11]: https://github.com/fsnotify/fsnotify/issues/11 -[#15]: https://github.com/fsnotify/fsnotify/issues/15 diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go deleted file mode 100644 index 1a95ad8e..00000000 --- a/vendor/github.com/fsnotify/fsnotify/backend_fen.go +++ /dev/null @@ -1,162 +0,0 @@ -//go:build solaris -// +build solaris - -package fsnotify - -import ( - "errors" -) - -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # macOS notes -// -// Spotlight indexing on macOS can result in multiple events (see [#15]). A -// temporary workaround is to add your folder(s) to the "Spotlight Privacy -// Settings" until we have a native FSEvents implementation (see [#11]). -// -// [#11]: https://github.com/fsnotify/fsnotify/issues/11 -// [#15]: https://github.com/fsnotify/fsnotify/issues/15 -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, so you - // probably want to wait until you've stopped receiving - // them (see the dedup example in cmd/fsnotify). - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // and on kqueue when a file is truncated. On Windows - // it's never sent. - Events chan Event - - // Errors sends any errors. - Errors chan error -} - -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - return nil -} - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; attempting to watch it more than once will -// return an error. Paths that do not yet exist on the filesystem cannot be -// added. A watch will be automatically removed if the path is deleted. -// -// A path will remain watched if it gets renamed to somewhere else on the same -// filesystem, but the monitor will get removed if the path gets deleted and -// re-created, or if it's moved to a different filesystem. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many tools update files atomically. Instead of "just" writing -// to the file a temporary file will be written to first, and if successful the -// temporary file is moved to to destination removing the original, or some -// variant thereof. The watcher on the original file is now lost, as it no -// longer exists. -// -// Instead, watch the parent directory and use Event.Name to filter out files -// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. -func (w *Watcher) Add(name string) error { - return nil -} - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -func (w *Watcher) Remove(name string) error { - return nil -} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go deleted file mode 100644 index 54c77fbb..00000000 --- a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go +++ /dev/null @@ -1,459 +0,0 @@ -//go:build linux -// +build linux - -package fsnotify - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - "unsafe" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # macOS notes -// -// Spotlight indexing on macOS can result in multiple events (see [#15]). A -// temporary workaround is to add your folder(s) to the "Spotlight Privacy -// Settings" until we have a native FSEvents implementation (see [#11]). -// -// [#11]: https://github.com/fsnotify/fsnotify/issues/11 -// [#15]: https://github.com/fsnotify/fsnotify/issues/15 -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, so you - // probably want to wait until you've stopped receiving - // them (see the dedup example in cmd/fsnotify). - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // and on kqueue when a file is truncated. On Windows - // it's never sent. - Events chan Event - - // Errors sends any errors. - Errors chan error - - // Store fd here as os.File.Read() will no longer return on close after - // calling Fd(). See: https://github.com/golang/go/issues/26439 - fd int - mu sync.Mutex // Map access - inotifyFile *os.File - watches map[string]*watch // Map of inotify watches (key: path) - paths map[int]string // Map of watched paths (key: watch descriptor) - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - doneResp chan struct{} // Channel to respond to Close -} - -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - // Create inotify fd - // Need to set the FD to nonblocking mode in order for SetDeadline methods to work - // Otherwise, blocking i/o operations won't terminate on close - fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) - if fd == -1 { - return nil, errno - } - - w := &Watcher{ - fd: fd, - inotifyFile: os.NewFile(uintptr(fd), ""), - watches: make(map[string]*watch), - paths: make(map[int]string), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - doneResp: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -// Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { - select { - case w.Events <- e: - return true - case <-w.done: - } - return false -} - -// Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { - select { - case w.Errors <- err: - return true - case <-w.done: - return false - } -} - -func (w *Watcher) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed() { - w.mu.Unlock() - return nil - } - - // Send 'close' signal to goroutine, and set the Watcher to closed. - close(w.done) - w.mu.Unlock() - - // Causes any blocking reads to return with an error, provided the file - // still supports deadline operations. - err := w.inotifyFile.Close() - if err != nil { - return err - } - - // Wait for goroutine to close - <-w.doneResp - - return nil -} - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; attempting to watch it more than once will -// return an error. Paths that do not yet exist on the filesystem cannot be -// added. A watch will be automatically removed if the path is deleted. -// -// A path will remain watched if it gets renamed to somewhere else on the same -// filesystem, but the monitor will get removed if the path gets deleted and -// re-created, or if it's moved to a different filesystem. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many tools update files atomically. Instead of "just" writing -// to the file a temporary file will be written to first, and if successful the -// temporary file is moved to to destination removing the original, or some -// variant thereof. The watcher on the original file is now lost, as it no -// longer exists. -// -// Instead, watch the parent directory and use Event.Name to filter out files -// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. -func (w *Watcher) Add(name string) error { - name = filepath.Clean(name) - if w.isClosed() { - return errors.New("inotify instance already closed") - } - - var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF - - w.mu.Lock() - defer w.mu.Unlock() - watchEntry := w.watches[name] - if watchEntry != nil { - flags |= watchEntry.flags | unix.IN_MASK_ADD - } - wd, errno := unix.InotifyAddWatch(w.fd, name, flags) - if wd == -1 { - return errno - } - - if watchEntry == nil { - w.watches[name] = &watch{wd: uint32(wd), flags: flags} - w.paths[wd] = name - } else { - watchEntry.wd = uint32(wd) - watchEntry.flags = flags - } - - return nil -} - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - - // Fetch the watch. - w.mu.Lock() - defer w.mu.Unlock() - watch, ok := w.watches[name] - - // Remove it from inotify. - if !ok { - return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) - } - - // We successfully removed the watch if InotifyRmWatch doesn't return an - // error, we need to clean up our internal state to ensure it matches - // inotify's kernel state. - delete(w.paths, int(watch.wd)) - delete(w.watches, name) - - // inotify_rm_watch will return EINVAL if the file has been deleted; - // the inotify will already have been removed. - // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously - // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE - // so that EINVAL means that the wd is being rm_watch()ed or its file removed - // by another thread and we have not received IN_IGNORE event. - success, errno := unix.InotifyRmWatch(w.fd, watch.wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case; - // The only two possible errors are: - // - // - EBADF, which happens when w.fd is not a valid file descriptor - // of any kind. - // - EINVAL, which is when fd is not an inotify descriptor or wd - // is not a valid watch descriptor. Watch descriptors are - // invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they - // are watching is deleted. - return errno - } - - return nil -} - -// WatchList returns all paths added with [Add] (and are not yet removed). -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - - entries := make([]string, 0, len(w.watches)) - for pathname := range w.watches { - entries = append(entries, pathname) - } - - return entries -} - -type watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) -} - -// readEvents reads from the inotify file descriptor, converts the -// received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { - defer func() { - close(w.doneResp) - close(w.Errors) - close(w.Events) - }() - - var ( - buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events - errno error // Syscall errno - ) - for { - // See if we have been closed. - if w.isClosed() { - return - } - - n, err := w.inotifyFile.Read(buf[:]) - switch { - case errors.Unwrap(err) == os.ErrClosed: - return - case err != nil: - if !w.sendError(err) { - return - } - continue - } - - if n < unix.SizeofInotifyEvent { - var err error - if n == 0 { - // If EOF is received. This should really never happen. - err = io.EOF - } else if n < 0 { - // If an error occurred while reading. - err = errno - } else { - // Read was too short. - err = errors.New("notify: short read in readEvents()") - } - if !w.sendError(err) { - return - } - continue - } - - var offset uint32 - // We don't know how many events we just read into the buffer - // While the offset points to at least one whole event... - for offset <= uint32(n-unix.SizeofInotifyEvent) { - var ( - // Point "raw" to the event in the buffer - raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) - mask = uint32(raw.Mask) - nameLen = uint32(raw.Len) - ) - - if mask&unix.IN_Q_OVERFLOW != 0 { - if !w.sendError(ErrEventOverflow) { - return - } - } - - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. - w.mu.Lock() - name, ok := w.paths[int(raw.Wd)] - // IN_DELETE_SELF occurs when the file/directory being watched is removed. - // This is a sign to clean up the maps, otherwise we are no longer in sync - // with the inotify kernel state which has already deleted the watch - // automatically. - if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { - delete(w.paths, int(raw.Wd)) - delete(w.watches, name) - } - w.mu.Unlock() - - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") - } - - event := w.newEvent(name, mask) - - // Send the events that are not ignored on the events channel - if mask&unix.IN_IGNORED == 0 { - if !w.sendEvent(event) { - return - } - } - - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen - } - } -} - -// newEvent returns an platform-independent Event based on an inotify mask. -func (w *Watcher) newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { - e.Op |= Create - } - if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { - e.Op |= Remove - } - if mask&unix.IN_MODIFY == unix.IN_MODIFY { - e.Op |= Write - } - if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { - e.Op |= Rename - } - if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { - e.Op |= Chmod - } - return e -} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go deleted file mode 100644 index 29087469..00000000 --- a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go +++ /dev/null @@ -1,707 +0,0 @@ -//go:build freebsd || openbsd || netbsd || dragonfly || darwin -// +build freebsd openbsd netbsd dragonfly darwin - -package fsnotify - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sync" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # macOS notes -// -// Spotlight indexing on macOS can result in multiple events (see [#15]). A -// temporary workaround is to add your folder(s) to the "Spotlight Privacy -// Settings" until we have a native FSEvents implementation (see [#11]). -// -// [#11]: https://github.com/fsnotify/fsnotify/issues/11 -// [#15]: https://github.com/fsnotify/fsnotify/issues/15 -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, so you - // probably want to wait until you've stopped receiving - // them (see the dedup example in cmd/fsnotify). - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // and on kqueue when a file is truncated. On Windows - // it's never sent. - Events chan Event - - // Errors sends any errors. - Errors chan error - - done chan struct{} - kq int // File descriptor (as returned by the kqueue() syscall). - closepipe [2]int // Pipe used for closing. - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Watched file descriptors (key: path). - watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)). - userWatches map[string]struct{} // Watches added with Watcher.Add() - dirFlags map[string]uint32 // Watched directories to fflags used in kqueue. - paths map[int]pathInfo // File descriptors to path names for processing kqueue events. - fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called -} - -type pathInfo struct { - name string - isDir bool -} - -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - kq, closepipe, err := newKqueue() - if err != nil { - return nil, err - } - - w := &Watcher{ - kq: kq, - closepipe: closepipe, - watches: make(map[string]int), - watchesByDir: make(map[string]map[int]struct{}), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]struct{}), - userWatches: make(map[string]struct{}), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -// newKqueue creates a new kernel event queue and returns a descriptor. -// -// This registers a new event on closepipe, which will trigger an event when -// it's closed. This way we can use kevent() without timeout/polling; without -// the closepipe, it would block forever and we wouldn't be able to stop it at -// all. -func newKqueue() (kq int, closepipe [2]int, err error) { - kq, err = unix.Kqueue() - if kq == -1 { - return kq, closepipe, err - } - - // Register the close pipe. - err = unix.Pipe(closepipe[:]) - if err != nil { - unix.Close(kq) - return kq, closepipe, err - } - - // Register changes to listen on the closepipe. - changes := make([]unix.Kevent_t, 1) - // SetKevent converts int to the platform-specific types. - unix.SetKevent(&changes[0], closepipe[0], unix.EVFILT_READ, - unix.EV_ADD|unix.EV_ENABLE|unix.EV_ONESHOT) - - ok, err := unix.Kevent(kq, changes, nil, nil) - if ok == -1 { - unix.Close(kq) - unix.Close(closepipe[0]) - unix.Close(closepipe[1]) - return kq, closepipe, err - } - return kq, closepipe, nil -} - -// Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { - select { - case w.Events <- e: - return true - case <-w.done: - } - return false -} - -// Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { - select { - case w.Errors <- err: - return true - case <-w.done: - } - return false -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return nil - } - w.isClosed = true - - // copy paths to remove while locked - pathsToRemove := make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() // Unlock before calling Remove, which also locks - for _, name := range pathsToRemove { - w.Remove(name) - } - - // Send "quit" message to the reader goroutine. - unix.Close(w.closepipe[1]) - close(w.done) - - return nil -} - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; attempting to watch it more than once will -// return an error. Paths that do not yet exist on the filesystem cannot be -// added. A watch will be automatically removed if the path is deleted. -// -// A path will remain watched if it gets renamed to somewhere else on the same -// filesystem, but the monitor will get removed if the path gets deleted and -// re-created, or if it's moved to a different filesystem. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many tools update files atomically. Instead of "just" writing -// to the file a temporary file will be written to first, and if successful the -// temporary file is moved to to destination removing the original, or some -// variant thereof. The watcher on the original file is now lost, as it no -// longer exists. -// -// Instead, watch the parent directory and use Event.Name to filter out files -// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. -func (w *Watcher) Add(name string) error { - w.mu.Lock() - w.userWatches[name] = struct{}{} - w.mu.Unlock() - _, err := w.addWatch(name, noteAllEvents) - return err -} - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - w.mu.Lock() - watchfd, ok := w.watches[name] - w.mu.Unlock() - if !ok { - return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) - } - - err := w.register([]int{watchfd}, unix.EV_DELETE, 0) - if err != nil { - return err - } - - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.userWatches, name) - - parentName := filepath.Dir(name) - delete(w.watchesByDir[parentName], watchfd) - - if len(w.watchesByDir[parentName]) == 0 { - delete(w.watchesByDir, parentName) - } - - delete(w.paths, watchfd) - delete(w.dirFlags, name) - delete(w.fileExists, name) - w.mu.Unlock() - - // Find all watched paths that are in this directory that are not external. - if isDir { - var pathsToRemove []string - w.mu.Lock() - for fd := range w.watchesByDir[name] { - path := w.paths[fd] - if _, ok := w.userWatches[path.name]; !ok { - pathsToRemove = append(pathsToRemove, path.name) - } - } - w.mu.Unlock() - for _, name := range pathsToRemove { - // Since these are internal, not much sense in propagating error - // to the user, as that will just confuse them with an error about - // a path they did not explicitly watch themselves. - w.Remove(name) - } - } - - return nil -} - -// WatchList returns all paths added with [Add] (and are not yet removed). -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - - entries := make([]string, 0, len(w.userWatches)) - for pathname := range w.userWatches { - entries = append(entries, pathname) - } - - return entries -} - -// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) -const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME - -// addWatch adds name to the watched file set. -// The flags are interpreted as described in kevent(2). -// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - // Make ./name and name equivalent - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return "", errors.New("kevent instance already closed") - } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() - - if !alreadyWatching { - fi, err := os.Lstat(name) - if err != nil { - return "", err - } - - // Don't watch sockets or named pipes - if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) { - return "", nil - } - - // Follow Symlinks - // - // Linux can add unresolvable symlinks to the watch list without issue, - // and Windows can't do symlinks period. To maintain consistency, we - // will act like everything is fine if the link can't be resolved. - // There will simply be no file events for broken symlinks. Hence the - // returns of nil on errors. - if fi.Mode()&os.ModeSymlink == os.ModeSymlink { - name, err = filepath.EvalSymlinks(name) - if err != nil { - return "", nil - } - - w.mu.Lock() - _, alreadyWatching = w.watches[name] - w.mu.Unlock() - - if alreadyWatching { - return name, nil - } - - fi, err = os.Lstat(name) - if err != nil { - return "", nil - } - } - - // Retry on EINTR; open() can return EINTR in practice on macOS. - // See #354, and go issues 11180 and 39237. - for { - watchfd, err = unix.Open(name, openMode, 0) - if err == nil { - break - } - if errors.Is(err, unix.EINTR) { - continue - } - - return "", err - } - - isDir = fi.IsDir() - } - - err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) - if err != nil { - unix.Close(watchfd) - return "", err - } - - if !alreadyWatching { - w.mu.Lock() - parentName := filepath.Dir(name) - w.watches[name] = watchfd - - watchesByDir, ok := w.watchesByDir[parentName] - if !ok { - watchesByDir = make(map[int]struct{}, 1) - w.watchesByDir[parentName] = watchesByDir - } - watchesByDir[watchfd] = struct{}{} - - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() - } - - if isDir { - // Watch the directory if it has not been watched before, - // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - - watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() - - if watchDir { - if err := w.watchDirectoryFiles(name); err != nil { - return "", err - } - } - } - return name, nil -} - -// readEvents reads from kqueue and converts the received kevents into -// Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { - defer func() { - err := unix.Close(w.kq) - if err != nil { - w.Errors <- err - } - unix.Close(w.closepipe[0]) - close(w.Events) - close(w.Errors) - }() - - eventBuffer := make([]unix.Kevent_t, 10) - for closed := false; !closed; { - kevents, err := w.read(eventBuffer) - // EINTR is okay, the syscall was interrupted before timeout expired. - if err != nil && err != unix.EINTR { - if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) { - closed = true - } - continue - } - - // Flush the events we received to the Events channel - for _, kevent := range kevents { - var ( - watchfd = int(kevent.Ident) - mask = uint32(kevent.Fflags) - ) - - // Shut down the loop when the pipe is closed, but only after all - // other events have been processed. - if watchfd == w.closepipe[0] { - closed = true - continue - } - - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() - - event := w.newEvent(path.name, mask) - - if path.isDir && !event.Has(Remove) { - // Double check to make sure the directory exists. This can - // happen when we do a rm -fr on a recursively watched folders - // and we receive a modification event first but the folder has - // been deleted and later receive the delete event. - if _, err := os.Lstat(event.Name); os.IsNotExist(err) { - event.Op |= Remove - } - } - - if event.Has(Rename) || event.Has(Remove) { - w.Remove(event.Name) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() - } - - if path.isDir && event.Has(Write) && !event.Has(Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - if !w.sendEvent(event) { - closed = true - continue - } - } - - if event.Has(Remove) { - // Look for a file that may have overwritten this. - // For example, mv f1 f2 will delete f2, then create f2. - if path.isDir { - fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() - if found { - // make sure the directory exists before we watch for changes. When we - // do a recursive watch and perform rm -fr, the parent directory might - // have gone missing, ignore the missing directory and let the - // upcoming delete event remove the watch from the parent directory. - if _, err := os.Lstat(fileDir); err == nil { - w.sendDirectoryChangeEvents(fileDir) - } - } - } else { - filePath := filepath.Clean(event.Name) - if fileInfo, err := os.Lstat(filePath); err == nil { - w.sendFileCreatedEventIfNew(filePath, fileInfo) - } - } - } - } - } -} - -// newEvent returns an platform-independent Event based on kqueue Fflags. -func (w *Watcher) newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { - e.Op |= Remove - } - if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { - e.Op |= Write - } - if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { - e.Op |= Rename - } - if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { - e.Op |= Chmod - } - return e -} - -// watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - return err - } - - for _, fileInfo := range files { - path := filepath.Join(dirPath, fileInfo.Name()) - - cleanPath, err := w.internalWatch(path, fileInfo) - if err != nil { - // No permission to read the file; that's not a problem: just skip. - // But do add it to w.fileExists to prevent it from being picked up - // as a "new" file later (it still shows up in the directory - // listing). - switch { - case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM): - cleanPath = filepath.Clean(path) - default: - return fmt.Errorf("%q: %w", filepath.Join(dirPath, fileInfo.Name()), err) - } - } - - w.mu.Lock() - w.fileExists[cleanPath] = struct{}{} - w.mu.Unlock() - } - - return nil -} - -// Search the directory for new files and send an event for them. -// -// This functionality is to have the BSD watcher match the inotify, which sends -// a create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dir string) { - // Get all files - files, err := ioutil.ReadDir(dir) - if err != nil { - if !w.sendError(fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)) { - return - } - } - - // Search for new files - for _, fi := range files { - err := w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi) - if err != nil { - return - } - } -} - -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - if !w.sendEvent(Event{Name: filePath, Op: Create}) { - return - } - } - - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fileInfo) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = struct{}{} - w.mu.Unlock() - - return nil -} - -func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { - if fileInfo.IsDir() { - // mimic Linux providing delete events for subdirectories - // but preserve the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) - } - - // watch file to mimic Linux inotify - return w.addWatch(name, noteAllEvents) -} - -// Register events with the queue. -func (w *Watcher) register(fds []int, flags int, fflags uint32) error { - changes := make([]unix.Kevent_t, len(fds)) - for i, fd := range fds { - // SetKevent converts int to the platform-specific types. - unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) - changes[i].Fflags = fflags - } - - // Register the events. - success, err := unix.Kevent(w.kq, changes, nil, nil) - if success == -1 { - return err - } - return nil -} - -// read retrieves pending events, or waits until an event occurs. -func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { - n, err := unix.Kevent(w.kq, nil, events, nil) - if err != nil { - return nil, err - } - return events[0:n], nil -} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go deleted file mode 100644 index a9bb1c3c..00000000 --- a/vendor/github.com/fsnotify/fsnotify/backend_other.go +++ /dev/null @@ -1,66 +0,0 @@ -//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows -// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows - -package fsnotify - -import ( - "fmt" - "runtime" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct{} - -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS) -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - return nil -} - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; attempting to watch it more than once will -// return an error. Paths that do not yet exist on the filesystem cannot be -// added. A watch will be automatically removed if the path is deleted. -// -// A path will remain watched if it gets renamed to somewhere else on the same -// filesystem, but the monitor will get removed if the path gets deleted and -// re-created, or if it's moved to a different filesystem. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many tools update files atomically. Instead of "just" writing -// to the file a temporary file will be written to first, and if successful the -// temporary file is moved to to destination removing the original, or some -// variant thereof. The watcher on the original file is now lost, as it no -// longer exists. -// -// Instead, watch the parent directory and use Event.Name to filter out files -// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. -func (w *Watcher) Add(name string) error { - return nil -} - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -func (w *Watcher) Remove(name string) error { - return nil -} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go deleted file mode 100644 index ae392867..00000000 --- a/vendor/github.com/fsnotify/fsnotify/backend_windows.go +++ /dev/null @@ -1,746 +0,0 @@ -//go:build windows -// +build windows - -package fsnotify - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "reflect" - "runtime" - "strings" - "sync" - "unsafe" - - "golang.org/x/sys/windows" -) - -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # macOS notes -// -// Spotlight indexing on macOS can result in multiple events (see [#15]). A -// temporary workaround is to add your folder(s) to the "Spotlight Privacy -// Settings" until we have a native FSEvents implementation (see [#11]). -// -// [#11]: https://github.com/fsnotify/fsnotify/issues/11 -// [#15]: https://github.com/fsnotify/fsnotify/issues/15 -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, so you - // probably want to wait until you've stopped receiving - // them (see the dedup example in cmd/fsnotify). - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // and on kqueue when a file is truncated. On Windows - // it's never sent. - Events chan Event - - // Errors sends any errors. - Errors chan error - - port windows.Handle // Handle to completion port - input chan *input // Inputs to the reader are sent on this channel - quit chan chan<- error - - mu sync.Mutex // Protects access to watches, isClosed - watches watchMap // Map of watches (key: i-number) - isClosed bool // Set to true when Close() is first called -} - -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) - if err != nil { - return nil, os.NewSyscallError("CreateIoCompletionPort", err) - } - w := &Watcher{ - port: port, - watches: make(watchMap), - input: make(chan *input, 1), - Events: make(chan Event, 50), - Errors: make(chan error), - quit: make(chan chan<- error, 1), - } - go w.readEvents() - return w, nil -} - -func (w *Watcher) sendEvent(name string, mask uint64) bool { - if mask == 0 { - return false - } - - event := w.newEvent(name, uint32(mask)) - select { - case ch := <-w.quit: - w.quit <- ch - case w.Events <- event: - } - return true -} - -// Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { - select { - case w.Errors <- err: - return true - case <-w.quit: - } - return false -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return nil - } - w.isClosed = true - w.mu.Unlock() - - // Send "quit" message to the reader goroutine - ch := make(chan error) - w.quit <- ch - if err := w.wakeupReader(); err != nil { - return err - } - return <-ch -} - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; attempting to watch it more than once will -// return an error. Paths that do not yet exist on the filesystem cannot be -// added. A watch will be automatically removed if the path is deleted. -// -// A path will remain watched if it gets renamed to somewhere else on the same -// filesystem, but the monitor will get removed if the path gets deleted and -// re-created, or if it's moved to a different filesystem. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many tools update files atomically. Instead of "just" writing -// to the file a temporary file will be written to first, and if successful the -// temporary file is moved to to destination removing the original, or some -// variant thereof. The watcher on the original file is now lost, as it no -// longer exists. -// -// Instead, watch the parent directory and use Event.Name to filter out files -// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. -func (w *Watcher) Add(name string) error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return errors.New("watcher already closed") - } - w.mu.Unlock() - - in := &input{ - op: opAddWatch, - path: filepath.Clean(name), - flags: sysFSALLEVENTS, - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -func (w *Watcher) Remove(name string) error { - in := &input{ - op: opRemoveWatch, - path: filepath.Clean(name), - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -// WatchList returns all paths added with [Add] (and are not yet removed). -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - - entries := make([]string, 0, len(w.watches)) - for _, entry := range w.watches { - for _, watchEntry := range entry { - entries = append(entries, watchEntry.path) - } - } - - return entries -} - -// These options are from the old golang.org/x/exp/winfsnotify, where you could -// add various options to the watch. This has long since been removed. -// -// The "sys" in the name is misleading as they're not part of any "system". -// -// This should all be removed at some point, and just use windows.FILE_NOTIFY_* -const ( - sysFSALLEVENTS = 0xfff - sysFSATTRIB = 0x4 - sysFSCREATE = 0x100 - sysFSDELETE = 0x200 - sysFSDELETESELF = 0x400 - sysFSMODIFY = 0x2 - sysFSMOVE = 0xc0 - sysFSMOVEDFROM = 0x40 - sysFSMOVEDTO = 0x80 - sysFSMOVESELF = 0x800 - sysFSIGNORED = 0x8000 -) - -func (w *Watcher) newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { - e.Op |= Create - } - if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { - e.Op |= Remove - } - if mask&sysFSMODIFY == sysFSMODIFY { - e.Op |= Write - } - if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { - e.Op |= Rename - } - if mask&sysFSATTRIB == sysFSATTRIB { - e.Op |= Chmod - } - return e -} - -const ( - opAddWatch = iota - opRemoveWatch -) - -const ( - provisional uint64 = 1 << (32 + iota) -) - -type input struct { - op int - path string - flags uint32 - reply chan error -} - -type inode struct { - handle windows.Handle - volume uint32 - index uint64 -} - -type watch struct { - ov windows.Overlapped - ino *inode // i-number - path string // Directory path - mask uint64 // Directory itself is being watched with these notify flags - names map[string]uint64 // Map of names being watched and their notify flags - rename string // Remembers the old name while renaming a file - buf [65536]byte // 64K buffer -} - -type ( - indexMap map[uint64]*watch - watchMap map[uint32]indexMap -) - -func (w *Watcher) wakeupReader() error { - err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil) - if err != nil { - return os.NewSyscallError("PostQueuedCompletionStatus", err) - } - return nil -} - -func (w *Watcher) getDir(pathname string) (dir string, err error) { - attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname)) - if err != nil { - return "", os.NewSyscallError("GetFileAttributes", err) - } - if attr&windows.FILE_ATTRIBUTE_DIRECTORY != 0 { - dir = pathname - } else { - dir, _ = filepath.Split(pathname) - dir = filepath.Clean(dir) - } - return -} - -func (w *Watcher) getIno(path string) (ino *inode, err error) { - h, err := windows.CreateFile(windows.StringToUTF16Ptr(path), - windows.FILE_LIST_DIRECTORY, - windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, - nil, windows.OPEN_EXISTING, - windows.FILE_FLAG_BACKUP_SEMANTICS|windows.FILE_FLAG_OVERLAPPED, 0) - if err != nil { - return nil, os.NewSyscallError("CreateFile", err) - } - - var fi windows.ByHandleFileInformation - err = windows.GetFileInformationByHandle(h, &fi) - if err != nil { - windows.CloseHandle(h) - return nil, os.NewSyscallError("GetFileInformationByHandle", err) - } - ino = &inode{ - handle: h, - volume: fi.VolumeSerialNumber, - index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), - } - return ino, nil -} - -// Must run within the I/O thread. -func (m watchMap) get(ino *inode) *watch { - if i := m[ino.volume]; i != nil { - return i[ino.index] - } - return nil -} - -// Must run within the I/O thread. -func (m watchMap) set(ino *inode, watch *watch) { - i := m[ino.volume] - if i == nil { - i = make(indexMap) - m[ino.volume] = i - } - i[ino.index] = watch -} - -// Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64) error { - dir, err := w.getDir(pathname) - if err != nil { - return err - } - - ino, err := w.getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watchEntry := w.watches.get(ino) - w.mu.Unlock() - if watchEntry == nil { - _, err := windows.CreateIoCompletionPort(ino.handle, w.port, 0, 0) - if err != nil { - windows.CloseHandle(ino.handle) - return os.NewSyscallError("CreateIoCompletionPort", err) - } - watchEntry = &watch{ - ino: ino, - path: dir, - names: make(map[string]uint64), - } - w.mu.Lock() - w.watches.set(ino, watchEntry) - w.mu.Unlock() - flags |= provisional - } else { - windows.CloseHandle(ino.handle) - } - if pathname == dir { - watchEntry.mask |= flags - } else { - watchEntry.names[filepath.Base(pathname)] |= flags - } - - err = w.startRead(watchEntry) - if err != nil { - return err - } - - if pathname == dir { - watchEntry.mask &= ^provisional - } else { - watchEntry.names[filepath.Base(pathname)] &= ^provisional - } - return nil -} - -// Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { - dir, err := w.getDir(pathname) - if err != nil { - return err - } - ino, err := w.getIno(dir) - if err != nil { - return err - } - - w.mu.Lock() - watch := w.watches.get(ino) - w.mu.Unlock() - - err = windows.CloseHandle(ino.handle) - if err != nil { - w.sendError(os.NewSyscallError("CloseHandle", err)) - } - if watch == nil { - return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname) - } - if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - watch.mask = 0 - } else { - name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - - return w.startRead(watch) -} - -// Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { - for name, mask := range watch.names { - if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) - } - delete(watch.names, name) - } - if watch.mask != 0 { - if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - } - watch.mask = 0 - } -} - -// Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { - err := windows.CancelIo(watch.ino.handle) - if err != nil { - w.sendError(os.NewSyscallError("CancelIo", err)) - w.deleteWatch(watch) - } - mask := w.toWindowsFlags(watch.mask) - for _, m := range watch.names { - mask |= w.toWindowsFlags(m) - } - if mask == 0 { - err := windows.CloseHandle(watch.ino.handle) - if err != nil { - w.sendError(os.NewSyscallError("CloseHandle", err)) - } - w.mu.Lock() - delete(w.watches[watch.ino.volume], watch.ino.index) - w.mu.Unlock() - return nil - } - - rdErr := windows.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], - uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) - if rdErr != nil { - err := os.NewSyscallError("ReadDirectoryChanges", rdErr) - if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { - // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) - err = nil - } - w.deleteWatch(watch) - w.startRead(watch) - return err - } - return nil -} - -// readEvents reads from the I/O completion port, converts the -// received events into Event objects and sends them via the Events channel. -// Entry point to the I/O thread. -func (w *Watcher) readEvents() { - var ( - n uint32 - key uintptr - ov *windows.Overlapped - ) - runtime.LockOSThread() - - for { - qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE) - // This error is handled after the watch == nil check below. NOTE: this - // seems odd, note sure if it's correct. - - watch := (*watch)(unsafe.Pointer(ov)) - if watch == nil { - select { - case ch := <-w.quit: - w.mu.Lock() - var indexes []indexMap - for _, index := range w.watches { - indexes = append(indexes, index) - } - w.mu.Unlock() - for _, index := range indexes { - for _, watch := range index { - w.deleteWatch(watch) - w.startRead(watch) - } - } - - err := windows.CloseHandle(w.port) - if err != nil { - err = os.NewSyscallError("CloseHandle", err) - } - close(w.Events) - close(w.Errors) - ch <- err - return - case in := <-w.input: - switch in.op { - case opAddWatch: - in.reply <- w.addWatch(in.path, uint64(in.flags)) - case opRemoveWatch: - in.reply <- w.remWatch(in.path) - } - default: - } - continue - } - - switch qErr { - case windows.ERROR_MORE_DATA: - if watch == nil { - w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")) - } else { - // The i/o succeeded but the buffer is full. - // In theory we should be building up a full packet. - // In practice we can get away with just carrying on. - n = uint32(unsafe.Sizeof(watch.buf)) - } - case windows.ERROR_ACCESS_DENIED: - // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) - w.deleteWatch(watch) - w.startRead(watch) - continue - case windows.ERROR_OPERATION_ABORTED: - // CancelIo was called on this handle - continue - default: - w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr)) - continue - case nil: - } - - var offset uint32 - for { - if n == 0 { - w.sendError(errors.New("short read in readEvents()")) - break - } - - // Point "raw" to the event in the buffer - raw := (*windows.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) - - // Create a buf that is the size of the path name - size := int(raw.FileNameLength / 2) - var buf []uint16 - // TODO: Use unsafe.Slice in Go 1.17; https://stackoverflow.com/questions/51187973 - sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - sh.Data = uintptr(unsafe.Pointer(&raw.FileName)) - sh.Len = size - sh.Cap = size - name := windows.UTF16ToString(buf) - fullname := filepath.Join(watch.path, name) - - var mask uint64 - switch raw.Action { - case windows.FILE_ACTION_REMOVED: - mask = sysFSDELETESELF - case windows.FILE_ACTION_MODIFIED: - mask = sysFSMODIFY - case windows.FILE_ACTION_RENAMED_OLD_NAME: - watch.rename = name - case windows.FILE_ACTION_RENAMED_NEW_NAME: - // Update saved path of all sub-watches. - old := filepath.Join(watch.path, watch.rename) - w.mu.Lock() - for _, watchMap := range w.watches { - for _, ww := range watchMap { - if strings.HasPrefix(ww.path, old) { - ww.path = filepath.Join(fullname, strings.TrimPrefix(ww.path, old)) - } - } - } - w.mu.Unlock() - - if watch.names[watch.rename] != 0 { - watch.names[name] |= watch.names[watch.rename] - delete(watch.names, watch.rename) - mask = sysFSMOVESELF - } - } - - sendNameEvent := func() { - w.sendEvent(fullname, watch.names[name]&mask) - } - if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() - } - if raw.Action == windows.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - - w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action)) - if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() - } - - // Move to the next event in the buffer - if raw.NextEntryOffset == 0 { - break - } - offset += raw.NextEntryOffset - - // Error! - if offset >= n { - w.sendError(errors.New( - "Windows system assumed buffer larger than it is, events have likely been missed.")) - break - } - } - - if err := w.startRead(watch); err != nil { - w.sendError(err) - } - } -} - -func (w *Watcher) toWindowsFlags(mask uint64) uint32 { - var m uint32 - if mask&sysFSMODIFY != 0 { - m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE - } - if mask&sysFSATTRIB != 0 { - m |= windows.FILE_NOTIFY_CHANGE_ATTRIBUTES - } - if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { - m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME - } - return m -} - -func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { - switch action { - case windows.FILE_ACTION_ADDED: - return sysFSCREATE - case windows.FILE_ACTION_REMOVED: - return sysFSDELETE - case windows.FILE_ACTION_MODIFIED: - return sysFSMODIFY - case windows.FILE_ACTION_RENAMED_OLD_NAME: - return sysFSMOVEDFROM - case windows.FILE_ACTION_RENAMED_NEW_NAME: - return sysFSMOVEDTO - } - return 0 -} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go deleted file mode 100644 index 30a5bf0f..00000000 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ /dev/null @@ -1,81 +0,0 @@ -//go:build !plan9 -// +build !plan9 - -// Package fsnotify provides a cross-platform interface for file system -// notifications. -package fsnotify - -import ( - "errors" - "fmt" - "strings" -) - -// Event represents a file system notification. -type Event struct { - // Path to the file or directory. - // - // Paths are relative to the input; for example with Add("dir") the Name - // will be set to "dir/file" if you create that file, but if you use - // Add("/path/to/dir") it will be "/path/to/dir/file". - Name string - - // File operation that triggered the event. - // - // This is a bitmask and some systems may send multiple operations at once. - // Use the Event.Has() method instead of comparing with ==. - Op Op -} - -// Op describes a set of file operations. -type Op uint32 - -// The operations fsnotify can trigger; see the documentation on [Watcher] for a -// full description, and check them with [Event.Has]. -const ( - Create Op = 1 << iota - Write - Remove - Rename - Chmod -) - -// Common errors that can be reported by a watcher -var ( - ErrNonExistentWatch = errors.New("can't remove non-existent watcher") - ErrEventOverflow = errors.New("fsnotify queue overflow") -) - -func (op Op) String() string { - var b strings.Builder - if op.Has(Create) { - b.WriteString("|CREATE") - } - if op.Has(Remove) { - b.WriteString("|REMOVE") - } - if op.Has(Write) { - b.WriteString("|WRITE") - } - if op.Has(Rename) { - b.WriteString("|RENAME") - } - if op.Has(Chmod) { - b.WriteString("|CHMOD") - } - if b.Len() == 0 { - return "[no events]" - } - return b.String()[1:] -} - -// Has reports if this operation has the given operation. -func (o Op) Has(h Op) bool { return o&h == h } - -// Has reports if this event has the given operation. -func (e Event) Has(op Op) bool { return e.Op.Has(op) } - -// String returns a string representation of the event with their path. -func (e Event) String() string { - return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) -} diff --git a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh deleted file mode 100644 index b09ef768..00000000 --- a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh +++ /dev/null @@ -1,208 +0,0 @@ -#!/usr/bin/env zsh -[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1 -setopt err_exit no_unset pipefail extended_glob - -# Simple script to update the godoc comments on all watchers. Probably took me -# more time to write this than doing it manually, but ah well 🙃 - -watcher=$(</tmp/x - print -r -- $cmt >>/tmp/x - tail -n+$(( end + 1 )) $file >>/tmp/x - mv /tmp/x $file - done -} - -set-cmt '^type Watcher struct ' $watcher -set-cmt '^func NewWatcher(' $new -set-cmt '^func (w \*Watcher) Add(' $add -set-cmt '^func (w \*Watcher) Remove(' $remove -set-cmt '^func (w \*Watcher) Close(' $close -set-cmt '^func (w \*Watcher) WatchList(' $watchlist -set-cmt '^[[:space:]]*Events *chan Event$' $events -set-cmt '^[[:space:]]*Errors *chan error$' $errors diff --git a/vendor/github.com/fsnotify/fsnotify/system_bsd.go b/vendor/github.com/fsnotify/fsnotify/system_bsd.go deleted file mode 100644 index 4322b0b8..00000000 --- a/vendor/github.com/fsnotify/fsnotify/system_bsd.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build freebsd || openbsd || netbsd || dragonfly -// +build freebsd openbsd netbsd dragonfly - -package fsnotify - -import "golang.org/x/sys/unix" - -const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC diff --git a/vendor/github.com/fsnotify/fsnotify/system_darwin.go b/vendor/github.com/fsnotify/fsnotify/system_darwin.go deleted file mode 100644 index 5da5ffa7..00000000 --- a/vendor/github.com/fsnotify/fsnotify/system_darwin.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build darwin -// +build darwin - -package fsnotify - -import "golang.org/x/sys/unix" - -// note: this constant is not defined on BSD -const openMode = unix.O_EVTONLY | unix.O_CLOEXEC diff --git a/vendor/github.com/go-logr/logr/.golangci.yaml b/vendor/github.com/go-logr/logr/.golangci.yaml deleted file mode 100644 index 0cffafa7..00000000 --- a/vendor/github.com/go-logr/logr/.golangci.yaml +++ /dev/null @@ -1,26 +0,0 @@ -run: - timeout: 1m - tests: true - -linters: - disable-all: true - enable: - - asciicheck - - errcheck - - forcetypeassert - - gocritic - - gofmt - - goimports - - gosimple - - govet - - ineffassign - - misspell - - revive - - staticcheck - - typecheck - - unused - -issues: - exclude-use-default: false - max-issues-per-linter: 0 - max-same-issues: 10 diff --git a/vendor/github.com/go-logr/logr/CHANGELOG.md b/vendor/github.com/go-logr/logr/CHANGELOG.md deleted file mode 100644 index c3569600..00000000 --- a/vendor/github.com/go-logr/logr/CHANGELOG.md +++ /dev/null @@ -1,6 +0,0 @@ -# CHANGELOG - -## v1.0.0-rc1 - -This is the first logged release. Major changes (including breaking changes) -have occurred since earlier tags. diff --git a/vendor/github.com/go-logr/logr/CONTRIBUTING.md b/vendor/github.com/go-logr/logr/CONTRIBUTING.md deleted file mode 100644 index 5d37e294..00000000 --- a/vendor/github.com/go-logr/logr/CONTRIBUTING.md +++ /dev/null @@ -1,17 +0,0 @@ -# Contributing - -Logr is open to pull-requests, provided they fit within the intended scope of -the project. Specifically, this library aims to be VERY small and minimalist, -with no external dependencies. - -## Compatibility - -This project intends to follow [semantic versioning](http://semver.org) and -is very strict about compatibility. Any proposed changes MUST follow those -rules. - -## Performance - -As a logging library, logr must be as light-weight as possible. Any proposed -code change must include results of running the [benchmark](./benchmark) -before and after the change. diff --git a/vendor/github.com/go-logr/logr/LICENSE b/vendor/github.com/go-logr/logr/LICENSE deleted file mode 100644 index 8dada3ed..00000000 --- a/vendor/github.com/go-logr/logr/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md deleted file mode 100644 index ab593118..00000000 --- a/vendor/github.com/go-logr/logr/README.md +++ /dev/null @@ -1,282 +0,0 @@ -# A minimal logging API for Go - -[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr) - -logr offers an(other) opinion on how Go programs and libraries can do logging -without becoming coupled to a particular logging implementation. This is not -an implementation of logging - it is an API. In fact it is two APIs with two -different sets of users. - -The `Logger` type is intended for application and library authors. It provides -a relatively small API which can be used everywhere you want to emit logs. It -defers the actual act of writing logs (to files, to stdout, or whatever) to the -`LogSink` interface. - -The `LogSink` interface is intended for logging library implementers. It is a -pure interface which can be implemented by logging frameworks to provide the actual logging -functionality. - -This decoupling allows application and library developers to write code in -terms of `logr.Logger` (which has very low dependency fan-out) while the -implementation of logging is managed "up stack" (e.g. in or near `main()`.) -Application developers can then switch out implementations as necessary. - -Many people assert that libraries should not be logging, and as such efforts -like this are pointless. Those people are welcome to convince the authors of -the tens-of-thousands of libraries that *DO* write logs that they are all -wrong. In the meantime, logr takes a more practical approach. - -## Typical usage - -Somewhere, early in an application's life, it will make a decision about which -logging library (implementation) it actually wants to use. Something like: - -``` - func main() { - // ... other setup code ... - - // Create the "root" logger. We have chosen the "logimpl" implementation, - // which takes some initial parameters and returns a logr.Logger. - logger := logimpl.New(param1, param2) - - // ... other setup code ... -``` - -Most apps will call into other libraries, create structures to govern the flow, -etc. The `logr.Logger` object can be passed to these other libraries, stored -in structs, or even used as a package-global variable, if needed. For example: - -``` - app := createTheAppObject(logger) - app.Run() -``` - -Outside of this early setup, no other packages need to know about the choice of -implementation. They write logs in terms of the `logr.Logger` that they -received: - -``` - type appObject struct { - // ... other fields ... - logger logr.Logger - // ... other fields ... - } - - func (app *appObject) Run() { - app.logger.Info("starting up", "timestamp", time.Now()) - - // ... app code ... -``` - -## Background - -If the Go standard library had defined an interface for logging, this project -probably would not be needed. Alas, here we are. - -### Inspiration - -Before you consider this package, please read [this blog post by the -inimitable Dave Cheney][warning-makes-no-sense]. We really appreciate what -he has to say, and it largely aligns with our own experiences. - -### Differences from Dave's ideas - -The main differences are: - -1. Dave basically proposes doing away with the notion of a logging API in favor -of `fmt.Printf()`. We disagree, especially when you consider things like output -locations, timestamps, file and line decorations, and structured logging. This -package restricts the logging API to just 2 types of logs: info and error. - -Info logs are things you want to tell the user which are not errors. Error -logs are, well, errors. If your code receives an `error` from a subordinate -function call and is logging that `error` *and not returning it*, use error -logs. - -2. Verbosity-levels on info logs. This gives developers a chance to indicate -arbitrary grades of importance for info logs, without assigning names with -semantic meaning such as "warning", "trace", and "debug." Superficially this -may feel very similar, but the primary difference is the lack of semantics. -Because verbosity is a numerical value, it's safe to assume that an app running -with higher verbosity means more (and less important) logs will be generated. - -## Implementations (non-exhaustive) - -There are implementations for the following logging libraries: - -- **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr) -- **a testing.T** (for use in Go tests, with JSON-like output): [testr](https://github.com/go-logr/logr/tree/master/testr) -- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr) -- **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr) -- **a testing.T** (with klog-like text output): [ktesting](https://git.k8s.io/klog/ktesting) -- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr) -- **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr) -- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr) -- **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend) -- **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr) -- **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr) -- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0) -- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing) - -## FAQ - -### Conceptual - -#### Why structured logging? - -- **Structured logs are more easily queryable**: Since you've got - key-value pairs, it's much easier to query your structured logs for - particular values by filtering on the contents of a particular key -- - think searching request logs for error codes, Kubernetes reconcilers for - the name and namespace of the reconciled object, etc. - -- **Structured logging makes it easier to have cross-referenceable logs**: - Similarly to searchability, if you maintain conventions around your - keys, it becomes easy to gather all log lines related to a particular - concept. - -- **Structured logs allow better dimensions of filtering**: if you have - structure to your logs, you've got more precise control over how much - information is logged -- you might choose in a particular configuration - to log certain keys but not others, only log lines where a certain key - matches a certain value, etc., instead of just having v-levels and names - to key off of. - -- **Structured logs better represent structured data**: sometimes, the - data that you want to log is inherently structured (think tuple-link - objects.) Structured logs allow you to preserve that structure when - outputting. - -#### Why V-levels? - -**V-levels give operators an easy way to control the chattiness of log -operations**. V-levels provide a way for a given package to distinguish -the relative importance or verbosity of a given log message. Then, if -a particular logger or package is logging too many messages, the user -of the package can simply change the v-levels for that library. - -#### Why not named levels, like Info/Warning/Error? - -Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences -from Dave's ideas](#differences-from-daves-ideas). - -#### Why not allow format strings, too? - -**Format strings negate many of the benefits of structured logs**: - -- They're not easily searchable without resorting to fuzzy searching, - regular expressions, etc. - -- They don't store structured data well, since contents are flattened into - a string. - -- They're not cross-referenceable. - -- They don't compress easily, since the message is not constant. - -(Unless you turn positional parameters into key-value pairs with numerical -keys, at which point you've gotten key-value logging with meaningless -keys.) - -### Practical - -#### Why key-value pairs, and not a map? - -Key-value pairs are *much* easier to optimize, especially around -allocations. Zap (a structured logger that inspired logr's interface) has -[performance measurements](https://github.com/uber-go/zap#performance) -that show this quite nicely. - -While the interface ends up being a little less obvious, you get -potentially better performance, plus avoid making users type -`map[string]string{}` every time they want to log. - -#### What if my V-levels differ between libraries? - -That's fine. Control your V-levels on a per-logger basis, and use the -`WithName` method to pass different loggers to different libraries. - -Generally, you should take care to ensure that you have relatively -consistent V-levels within a given logger, however, as this makes deciding -on what verbosity of logs to request easier. - -#### But I really want to use a format string! - -That's not actually a question. Assuming your question is "how do -I convert my mental model of logging with format strings to logging with -constant messages": - -1. Figure out what the error actually is, as you'd write in a TL;DR style, - and use that as a message. - -2. For every place you'd write a format specifier, look to the word before - it, and add that as a key value pair. - -For instance, consider the following examples (all taken from spots in the -Kubernetes codebase): - -- `klog.V(4).Infof("Client is returning errors: code %v, error %v", - responseCode, err)` becomes `logger.Error(err, "client returned an - error", "code", responseCode)` - -- `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", - seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after - response when requesting url", "attempt", retries, "after - seconds", seconds, "url", url)` - -If you *really* must use a format string, use it in a key's value, and -call `fmt.Sprintf` yourself. For instance: `log.Printf("unable to -reflect over type %T")` becomes `logger.Info("unable to reflect over -type", "type", fmt.Sprintf("%T"))`. In general though, the cases where -this is necessary should be few and far between. - -#### How do I choose my V-levels? - -This is basically the only hard constraint: increase V-levels to denote -more verbose or more debug-y logs. - -Otherwise, you can start out with `0` as "you always want to see this", -`1` as "common logging that you might *possibly* want to turn off", and -`10` as "I would like to performance-test your log collection stack." - -Then gradually choose levels in between as you need them, working your way -down from 10 (for debug and trace style logs) and up from 1 (for chattier -info-type logs.) - -#### How do I choose my keys? - -Keys are fairly flexible, and can hold more or less any string -value. For best compatibility with implementations and consistency -with existing code in other projects, there are a few conventions you -should consider. - -- Make your keys human-readable. -- Constant keys are generally a good idea. -- Be consistent across your codebase. -- Keys should naturally match parts of the message string. -- Use lower case for simple keys and - [lowerCamelCase](https://en.wiktionary.org/wiki/lowerCamelCase) for - more complex ones. Kubernetes is one example of a project that has - [adopted that - convention](https://github.com/kubernetes/community/blob/HEAD/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments). - -While key names are mostly unrestricted (and spaces are acceptable), -it's generally a good idea to stick to printable ascii characters, or at -least match the general character set of your log lines. - -#### Why should keys be constant values? - -The point of structured logging is to make later log processing easier. Your -keys are, effectively, the schema of each log message. If you use different -keys across instances of the same log line, you will make your structured logs -much harder to use. `Sprintf()` is for values, not for keys! - -#### Why is this not a pure interface? - -The Logger type is implemented as a struct in order to allow the Go compiler to -optimize things like high-V `Info` logs that are not triggered. Not all of -these implementations are implemented yet, but this structure was suggested as -a way to ensure they *can* be implemented. All of the real work is behind the -`LogSink` interface. - -[warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging diff --git a/vendor/github.com/go-logr/logr/discard.go b/vendor/github.com/go-logr/logr/discard.go deleted file mode 100644 index 99fe8be9..00000000 --- a/vendor/github.com/go-logr/logr/discard.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2020 The logr Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package logr - -// Discard returns a Logger that discards all messages logged to it. It can be -// used whenever the caller is not interested in the logs. Logger instances -// produced by this function always compare as equal. -func Discard() Logger { - return New(nil) -} diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go deleted file mode 100644 index e52f0cd0..00000000 --- a/vendor/github.com/go-logr/logr/funcr/funcr.go +++ /dev/null @@ -1,804 +0,0 @@ -/* -Copyright 2021 The logr Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package funcr implements formatting of structured log messages and -// optionally captures the call site and timestamp. -// -// The simplest way to use it is via its implementation of a -// github.com/go-logr/logr.LogSink with output through an arbitrary -// "write" function. See New and NewJSON for details. -// -// # Custom LogSinks -// -// For users who need more control, a funcr.Formatter can be embedded inside -// your own custom LogSink implementation. This is useful when the LogSink -// needs to implement additional methods, for example. -// -// # Formatting -// -// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for -// values which are being logged. When rendering a struct, funcr will use Go's -// standard JSON tags (all except "string"). -package funcr - -import ( - "bytes" - "encoding" - "encoding/json" - "fmt" - "path/filepath" - "reflect" - "runtime" - "strconv" - "strings" - "time" - - "github.com/go-logr/logr" -) - -// New returns a logr.Logger which is implemented by an arbitrary function. -func New(fn func(prefix, args string), opts Options) logr.Logger { - return logr.New(newSink(fn, NewFormatter(opts))) -} - -// NewJSON returns a logr.Logger which is implemented by an arbitrary function -// and produces JSON output. -func NewJSON(fn func(obj string), opts Options) logr.Logger { - fnWrapper := func(_, obj string) { - fn(obj) - } - return logr.New(newSink(fnWrapper, NewFormatterJSON(opts))) -} - -// Underlier exposes access to the underlying logging function. Since -// callers only have a logr.Logger, they have to know which -// implementation is in use, so this interface is less of an -// abstraction and more of a way to test type conversion. -type Underlier interface { - GetUnderlying() func(prefix, args string) -} - -func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink { - l := &fnlogger{ - Formatter: formatter, - write: fn, - } - // For skipping fnlogger.Info and fnlogger.Error. - l.Formatter.AddCallDepth(1) - return l -} - -// Options carries parameters which influence the way logs are generated. -type Options struct { - // LogCaller tells funcr to add a "caller" key to some or all log lines. - // This has some overhead, so some users might not want it. - LogCaller MessageClass - - // LogCallerFunc tells funcr to also log the calling function name. This - // has no effect if caller logging is not enabled (see Options.LogCaller). - LogCallerFunc bool - - // LogTimestamp tells funcr to add a "ts" key to log lines. This has some - // overhead, so some users might not want it. - LogTimestamp bool - - // TimestampFormat tells funcr how to render timestamps when LogTimestamp - // is enabled. If not specified, a default format will be used. For more - // details, see docs for Go's time.Layout. - TimestampFormat string - - // Verbosity tells funcr which V logs to produce. Higher values enable - // more logs. Info logs at or below this level will be written, while logs - // above this level will be discarded. - Verbosity int - - // RenderBuiltinsHook allows users to mutate the list of key-value pairs - // while a log line is being rendered. The kvList argument follows logr - // conventions - each pair of slice elements is comprised of a string key - // and an arbitrary value (verified and sanitized before calling this - // hook). The value returned must follow the same conventions. This hook - // can be used to audit or modify logged data. For example, you might want - // to prefix all of funcr's built-in keys with some string. This hook is - // only called for built-in (provided by funcr itself) key-value pairs. - // Equivalent hooks are offered for key-value pairs saved via - // logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and - // for user-provided pairs (see RenderArgsHook). - RenderBuiltinsHook func(kvList []interface{}) []interface{} - - // RenderValuesHook is the same as RenderBuiltinsHook, except that it is - // only called for key-value pairs saved via logr.Logger.WithValues. See - // RenderBuiltinsHook for more details. - RenderValuesHook func(kvList []interface{}) []interface{} - - // RenderArgsHook is the same as RenderBuiltinsHook, except that it is only - // called for key-value pairs passed directly to Info and Error. See - // RenderBuiltinsHook for more details. - RenderArgsHook func(kvList []interface{}) []interface{} - - // MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct - // that contains a struct, etc.) it may log. Every time it finds a struct, - // slice, array, or map the depth is increased by one. When the maximum is - // reached, the value will be converted to a string indicating that the max - // depth has been exceeded. If this field is not specified, a default - // value will be used. - MaxLogDepth int -} - -// MessageClass indicates which category or categories of messages to consider. -type MessageClass int - -const ( - // None ignores all message classes. - None MessageClass = iota - // All considers all message classes. - All - // Info only considers info messages. - Info - // Error only considers error messages. - Error -) - -// fnlogger inherits some of its LogSink implementation from Formatter -// and just needs to add some glue code. -type fnlogger struct { - Formatter - write func(prefix, args string) -} - -func (l fnlogger) WithName(name string) logr.LogSink { - l.Formatter.AddName(name) - return &l -} - -func (l fnlogger) WithValues(kvList ...interface{}) logr.LogSink { - l.Formatter.AddValues(kvList) - return &l -} - -func (l fnlogger) WithCallDepth(depth int) logr.LogSink { - l.Formatter.AddCallDepth(depth) - return &l -} - -func (l fnlogger) Info(level int, msg string, kvList ...interface{}) { - prefix, args := l.FormatInfo(level, msg, kvList) - l.write(prefix, args) -} - -func (l fnlogger) Error(err error, msg string, kvList ...interface{}) { - prefix, args := l.FormatError(err, msg, kvList) - l.write(prefix, args) -} - -func (l fnlogger) GetUnderlying() func(prefix, args string) { - return l.write -} - -// Assert conformance to the interfaces. -var _ logr.LogSink = &fnlogger{} -var _ logr.CallDepthLogSink = &fnlogger{} -var _ Underlier = &fnlogger{} - -// NewFormatter constructs a Formatter which emits a JSON-like key=value format. -func NewFormatter(opts Options) Formatter { - return newFormatter(opts, outputKeyValue) -} - -// NewFormatterJSON constructs a Formatter which emits strict JSON. -func NewFormatterJSON(opts Options) Formatter { - return newFormatter(opts, outputJSON) -} - -// Defaults for Options. -const defaultTimestampFormat = "2006-01-02 15:04:05.000000" -const defaultMaxLogDepth = 16 - -func newFormatter(opts Options, outfmt outputFormat) Formatter { - if opts.TimestampFormat == "" { - opts.TimestampFormat = defaultTimestampFormat - } - if opts.MaxLogDepth == 0 { - opts.MaxLogDepth = defaultMaxLogDepth - } - f := Formatter{ - outputFormat: outfmt, - prefix: "", - values: nil, - depth: 0, - opts: &opts, - } - return f -} - -// Formatter is an opaque struct which can be embedded in a LogSink -// implementation. It should be constructed with NewFormatter. Some of -// its methods directly implement logr.LogSink. -type Formatter struct { - outputFormat outputFormat - prefix string - values []interface{} - valuesStr string - depth int - opts *Options -} - -// outputFormat indicates which outputFormat to use. -type outputFormat int - -const ( - // outputKeyValue emits a JSON-like key=value format, but not strict JSON. - outputKeyValue outputFormat = iota - // outputJSON emits strict JSON. - outputJSON -) - -// PseudoStruct is a list of key-value pairs that gets logged as a struct. -type PseudoStruct []interface{} - -// render produces a log line, ready to use. -func (f Formatter) render(builtins, args []interface{}) string { - // Empirically bytes.Buffer is faster than strings.Builder for this. - buf := bytes.NewBuffer(make([]byte, 0, 1024)) - if f.outputFormat == outputJSON { - buf.WriteByte('{') - } - vals := builtins - if hook := f.opts.RenderBuiltinsHook; hook != nil { - vals = hook(f.sanitize(vals)) - } - f.flatten(buf, vals, false, false) // keys are ours, no need to escape - continuing := len(builtins) > 0 - if len(f.valuesStr) > 0 { - if continuing { - if f.outputFormat == outputJSON { - buf.WriteByte(',') - } else { - buf.WriteByte(' ') - } - } - continuing = true - buf.WriteString(f.valuesStr) - } - vals = args - if hook := f.opts.RenderArgsHook; hook != nil { - vals = hook(f.sanitize(vals)) - } - f.flatten(buf, vals, continuing, true) // escape user-provided keys - if f.outputFormat == outputJSON { - buf.WriteByte('}') - } - return buf.String() -} - -// flatten renders a list of key-value pairs into a buffer. If continuing is -// true, it assumes that the buffer has previous values and will emit a -// separator (which depends on the output format) before the first pair it -// writes. If escapeKeys is true, the keys are assumed to have -// non-JSON-compatible characters in them and must be evaluated for escapes. -// -// This function returns a potentially modified version of kvList, which -// ensures that there is a value for every key (adding a value if needed) and -// that each key is a string (substituting a key if needed). -func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing bool, escapeKeys bool) []interface{} { - // This logic overlaps with sanitize() but saves one type-cast per key, - // which can be measurable. - if len(kvList)%2 != 0 { - kvList = append(kvList, noValue) - } - for i := 0; i < len(kvList); i += 2 { - k, ok := kvList[i].(string) - if !ok { - k = f.nonStringKey(kvList[i]) - kvList[i] = k - } - v := kvList[i+1] - - if i > 0 || continuing { - if f.outputFormat == outputJSON { - buf.WriteByte(',') - } else { - // In theory the format could be something we don't understand. In - // practice, we control it, so it won't be. - buf.WriteByte(' ') - } - } - - if escapeKeys { - buf.WriteString(prettyString(k)) - } else { - // this is faster - buf.WriteByte('"') - buf.WriteString(k) - buf.WriteByte('"') - } - if f.outputFormat == outputJSON { - buf.WriteByte(':') - } else { - buf.WriteByte('=') - } - buf.WriteString(f.pretty(v)) - } - return kvList -} - -func (f Formatter) pretty(value interface{}) string { - return f.prettyWithFlags(value, 0, 0) -} - -const ( - flagRawStruct = 0x1 // do not print braces on structs -) - -// TODO: This is not fast. Most of the overhead goes here. -func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string { - if depth > f.opts.MaxLogDepth { - return `""` - } - - // Handle types that take full control of logging. - if v, ok := value.(logr.Marshaler); ok { - // Replace the value with what the type wants to get logged. - // That then gets handled below via reflection. - value = invokeMarshaler(v) - } - - // Handle types that want to format themselves. - switch v := value.(type) { - case fmt.Stringer: - value = invokeStringer(v) - case error: - value = invokeError(v) - } - - // Handling the most common types without reflect is a small perf win. - switch v := value.(type) { - case bool: - return strconv.FormatBool(v) - case string: - return prettyString(v) - case int: - return strconv.FormatInt(int64(v), 10) - case int8: - return strconv.FormatInt(int64(v), 10) - case int16: - return strconv.FormatInt(int64(v), 10) - case int32: - return strconv.FormatInt(int64(v), 10) - case int64: - return strconv.FormatInt(int64(v), 10) - case uint: - return strconv.FormatUint(uint64(v), 10) - case uint8: - return strconv.FormatUint(uint64(v), 10) - case uint16: - return strconv.FormatUint(uint64(v), 10) - case uint32: - return strconv.FormatUint(uint64(v), 10) - case uint64: - return strconv.FormatUint(v, 10) - case uintptr: - return strconv.FormatUint(uint64(v), 10) - case float32: - return strconv.FormatFloat(float64(v), 'f', -1, 32) - case float64: - return strconv.FormatFloat(v, 'f', -1, 64) - case complex64: - return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"` - case complex128: - return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"` - case PseudoStruct: - buf := bytes.NewBuffer(make([]byte, 0, 1024)) - v = f.sanitize(v) - if flags&flagRawStruct == 0 { - buf.WriteByte('{') - } - for i := 0; i < len(v); i += 2 { - if i > 0 { - buf.WriteByte(',') - } - k, _ := v[i].(string) // sanitize() above means no need to check success - // arbitrary keys might need escaping - buf.WriteString(prettyString(k)) - buf.WriteByte(':') - buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1)) - } - if flags&flagRawStruct == 0 { - buf.WriteByte('}') - } - return buf.String() - } - - buf := bytes.NewBuffer(make([]byte, 0, 256)) - t := reflect.TypeOf(value) - if t == nil { - return "null" - } - v := reflect.ValueOf(value) - switch t.Kind() { - case reflect.Bool: - return strconv.FormatBool(v.Bool()) - case reflect.String: - return prettyString(v.String()) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return strconv.FormatInt(int64(v.Int()), 10) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return strconv.FormatUint(uint64(v.Uint()), 10) - case reflect.Float32: - return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32) - case reflect.Float64: - return strconv.FormatFloat(v.Float(), 'f', -1, 64) - case reflect.Complex64: - return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"` - case reflect.Complex128: - return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"` - case reflect.Struct: - if flags&flagRawStruct == 0 { - buf.WriteByte('{') - } - printComma := false // testing i>0 is not enough because of JSON omitted fields - for i := 0; i < t.NumField(); i++ { - fld := t.Field(i) - if fld.PkgPath != "" { - // reflect says this field is only defined for non-exported fields. - continue - } - if !v.Field(i).CanInterface() { - // reflect isn't clear exactly what this means, but we can't use it. - continue - } - name := "" - omitempty := false - if tag, found := fld.Tag.Lookup("json"); found { - if tag == "-" { - continue - } - if comma := strings.Index(tag, ","); comma != -1 { - if n := tag[:comma]; n != "" { - name = n - } - rest := tag[comma:] - if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") { - omitempty = true - } - } else { - name = tag - } - } - if omitempty && isEmpty(v.Field(i)) { - continue - } - if printComma { - buf.WriteByte(',') - } - printComma = true // if we got here, we are rendering a field - if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" { - buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1)) - continue - } - if name == "" { - name = fld.Name - } - // field names can't contain characters which need escaping - buf.WriteByte('"') - buf.WriteString(name) - buf.WriteByte('"') - buf.WriteByte(':') - buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1)) - } - if flags&flagRawStruct == 0 { - buf.WriteByte('}') - } - return buf.String() - case reflect.Slice, reflect.Array: - // If this is outputing as JSON make sure this isn't really a json.RawMessage. - // If so just emit "as-is" and don't pretty it as that will just print - // it as [X,Y,Z,...] which isn't terribly useful vs the string form you really want. - if f.outputFormat == outputJSON { - if rm, ok := value.(json.RawMessage); ok { - // If it's empty make sure we emit an empty value as the array style would below. - if len(rm) > 0 { - buf.Write(rm) - } else { - buf.WriteString("null") - } - return buf.String() - } - } - buf.WriteByte('[') - for i := 0; i < v.Len(); i++ { - if i > 0 { - buf.WriteByte(',') - } - e := v.Index(i) - buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1)) - } - buf.WriteByte(']') - return buf.String() - case reflect.Map: - buf.WriteByte('{') - // This does not sort the map keys, for best perf. - it := v.MapRange() - i := 0 - for it.Next() { - if i > 0 { - buf.WriteByte(',') - } - // If a map key supports TextMarshaler, use it. - keystr := "" - if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok { - txt, err := m.MarshalText() - if err != nil { - keystr = fmt.Sprintf("", err.Error()) - } else { - keystr = string(txt) - } - keystr = prettyString(keystr) - } else { - // prettyWithFlags will produce already-escaped values - keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1) - if t.Key().Kind() != reflect.String { - // JSON only does string keys. Unlike Go's standard JSON, we'll - // convert just about anything to a string. - keystr = prettyString(keystr) - } - } - buf.WriteString(keystr) - buf.WriteByte(':') - buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1)) - i++ - } - buf.WriteByte('}') - return buf.String() - case reflect.Ptr, reflect.Interface: - if v.IsNil() { - return "null" - } - return f.prettyWithFlags(v.Elem().Interface(), 0, depth) - } - return fmt.Sprintf(`""`, t.Kind().String()) -} - -func prettyString(s string) string { - // Avoid escaping (which does allocations) if we can. - if needsEscape(s) { - return strconv.Quote(s) - } - b := bytes.NewBuffer(make([]byte, 0, 1024)) - b.WriteByte('"') - b.WriteString(s) - b.WriteByte('"') - return b.String() -} - -// needsEscape determines whether the input string needs to be escaped or not, -// without doing any allocations. -func needsEscape(s string) bool { - for _, r := range s { - if !strconv.IsPrint(r) || r == '\\' || r == '"' { - return true - } - } - return false -} - -func isEmpty(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Complex64, reflect.Complex128: - return v.Complex() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - return false -} - -func invokeMarshaler(m logr.Marshaler) (ret interface{}) { - defer func() { - if r := recover(); r != nil { - ret = fmt.Sprintf("", r) - } - }() - return m.MarshalLog() -} - -func invokeStringer(s fmt.Stringer) (ret string) { - defer func() { - if r := recover(); r != nil { - ret = fmt.Sprintf("", r) - } - }() - return s.String() -} - -func invokeError(e error) (ret string) { - defer func() { - if r := recover(); r != nil { - ret = fmt.Sprintf("", r) - } - }() - return e.Error() -} - -// Caller represents the original call site for a log line, after considering -// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and -// Line fields will always be provided, while the Func field is optional. -// Users can set the render hook fields in Options to examine logged key-value -// pairs, one of which will be {"caller", Caller} if the Options.LogCaller -// field is enabled for the given MessageClass. -type Caller struct { - // File is the basename of the file for this call site. - File string `json:"file"` - // Line is the line number in the file for this call site. - Line int `json:"line"` - // Func is the function name for this call site, or empty if - // Options.LogCallerFunc is not enabled. - Func string `json:"function,omitempty"` -} - -func (f Formatter) caller() Caller { - // +1 for this frame, +1 for Info/Error. - pc, file, line, ok := runtime.Caller(f.depth + 2) - if !ok { - return Caller{"", 0, ""} - } - fn := "" - if f.opts.LogCallerFunc { - if fp := runtime.FuncForPC(pc); fp != nil { - fn = fp.Name() - } - } - - return Caller{filepath.Base(file), line, fn} -} - -const noValue = "" - -func (f Formatter) nonStringKey(v interface{}) string { - return fmt.Sprintf("", f.snippet(v)) -} - -// snippet produces a short snippet string of an arbitrary value. -func (f Formatter) snippet(v interface{}) string { - const snipLen = 16 - - snip := f.pretty(v) - if len(snip) > snipLen { - snip = snip[:snipLen] - } - return snip -} - -// sanitize ensures that a list of key-value pairs has a value for every key -// (adding a value if needed) and that each key is a string (substituting a key -// if needed). -func (f Formatter) sanitize(kvList []interface{}) []interface{} { - if len(kvList)%2 != 0 { - kvList = append(kvList, noValue) - } - for i := 0; i < len(kvList); i += 2 { - _, ok := kvList[i].(string) - if !ok { - kvList[i] = f.nonStringKey(kvList[i]) - } - } - return kvList -} - -// Init configures this Formatter from runtime info, such as the call depth -// imposed by logr itself. -// Note that this receiver is a pointer, so depth can be saved. -func (f *Formatter) Init(info logr.RuntimeInfo) { - f.depth += info.CallDepth -} - -// Enabled checks whether an info message at the given level should be logged. -func (f Formatter) Enabled(level int) bool { - return level <= f.opts.Verbosity -} - -// GetDepth returns the current depth of this Formatter. This is useful for -// implementations which do their own caller attribution. -func (f Formatter) GetDepth() int { - return f.depth -} - -// FormatInfo renders an Info log message into strings. The prefix will be -// empty when no names were set (via AddNames), or when the output is -// configured for JSON. -func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (prefix, argsStr string) { - args := make([]interface{}, 0, 64) // using a constant here impacts perf - prefix = f.prefix - if f.outputFormat == outputJSON { - args = append(args, "logger", prefix) - prefix = "" - } - if f.opts.LogTimestamp { - args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) - } - if policy := f.opts.LogCaller; policy == All || policy == Info { - args = append(args, "caller", f.caller()) - } - args = append(args, "level", level, "msg", msg) - return prefix, f.render(args, kvList) -} - -// FormatError renders an Error log message into strings. The prefix will be -// empty when no names were set (via AddNames), or when the output is -// configured for JSON. -func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (prefix, argsStr string) { - args := make([]interface{}, 0, 64) // using a constant here impacts perf - prefix = f.prefix - if f.outputFormat == outputJSON { - args = append(args, "logger", prefix) - prefix = "" - } - if f.opts.LogTimestamp { - args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) - } - if policy := f.opts.LogCaller; policy == All || policy == Error { - args = append(args, "caller", f.caller()) - } - args = append(args, "msg", msg) - var loggableErr interface{} - if err != nil { - loggableErr = err.Error() - } - args = append(args, "error", loggableErr) - return f.prefix, f.render(args, kvList) -} - -// AddName appends the specified name. funcr uses '/' characters to separate -// name elements. Callers should not pass '/' in the provided name string, but -// this library does not actually enforce that. -func (f *Formatter) AddName(name string) { - if len(f.prefix) > 0 { - f.prefix += "/" - } - f.prefix += name -} - -// AddValues adds key-value pairs to the set of saved values to be logged with -// each log line. -func (f *Formatter) AddValues(kvList []interface{}) { - // Three slice args forces a copy. - n := len(f.values) - f.values = append(f.values[:n:n], kvList...) - - vals := f.values - if hook := f.opts.RenderValuesHook; hook != nil { - vals = hook(f.sanitize(vals)) - } - - // Pre-render values, so we don't have to do it on each Info/Error call. - buf := bytes.NewBuffer(make([]byte, 0, 1024)) - f.flatten(buf, vals, false, true) // escape user-provided keys - f.valuesStr = buf.String() -} - -// AddCallDepth increases the number of stack-frames to skip when attributing -// the log line to a file and line. -func (f *Formatter) AddCallDepth(depth int) { - f.depth += depth -} diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go deleted file mode 100644 index e027aea3..00000000 --- a/vendor/github.com/go-logr/logr/logr.go +++ /dev/null @@ -1,550 +0,0 @@ -/* -Copyright 2019 The logr Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This design derives from Dave Cheney's blog: -// http://dave.cheney.net/2015/11/05/lets-talk-about-logging - -// Package logr defines a general-purpose logging API and abstract interfaces -// to back that API. Packages in the Go ecosystem can depend on this package, -// while callers can implement logging with whatever backend is appropriate. -// -// # Usage -// -// Logging is done using a Logger instance. Logger is a concrete type with -// methods, which defers the actual logging to a LogSink interface. The main -// methods of Logger are Info() and Error(). Arguments to Info() and Error() -// are key/value pairs rather than printf-style formatted strings, emphasizing -// "structured logging". -// -// With Go's standard log package, we might write: -// -// log.Printf("setting target value %s", targetValue) -// -// With logr's structured logging, we'd write: -// -// logger.Info("setting target", "value", targetValue) -// -// Errors are much the same. Instead of: -// -// log.Printf("failed to open the pod bay door for user %s: %v", user, err) -// -// We'd write: -// -// logger.Error(err, "failed to open the pod bay door", "user", user) -// -// Info() and Error() are very similar, but they are separate methods so that -// LogSink implementations can choose to do things like attach additional -// information (such as stack traces) on calls to Error(). Error() messages are -// always logged, regardless of the current verbosity. If there is no error -// instance available, passing nil is valid. -// -// # Verbosity -// -// Often we want to log information only when the application in "verbose -// mode". To write log lines that are more verbose, Logger has a V() method. -// The higher the V-level of a log line, the less critical it is considered. -// Log-lines with V-levels that are not enabled (as per the LogSink) will not -// be written. Level V(0) is the default, and logger.V(0).Info() has the same -// meaning as logger.Info(). Negative V-levels have the same meaning as V(0). -// Error messages do not have a verbosity level and are always logged. -// -// Where we might have written: -// -// if flVerbose >= 2 { -// log.Printf("an unusual thing happened") -// } -// -// We can write: -// -// logger.V(2).Info("an unusual thing happened") -// -// # Logger Names -// -// Logger instances can have name strings so that all messages logged through -// that instance have additional context. For example, you might want to add -// a subsystem name: -// -// logger.WithName("compactor").Info("started", "time", time.Now()) -// -// The WithName() method returns a new Logger, which can be passed to -// constructors or other functions for further use. Repeated use of WithName() -// will accumulate name "segments". These name segments will be joined in some -// way by the LogSink implementation. It is strongly recommended that name -// segments contain simple identifiers (letters, digits, and hyphen), and do -// not contain characters that could muddle the log output or confuse the -// joining operation (e.g. whitespace, commas, periods, slashes, brackets, -// quotes, etc). -// -// # Saved Values -// -// Logger instances can store any number of key/value pairs, which will be -// logged alongside all messages logged through that instance. For example, -// you might want to create a Logger instance per managed object: -// -// With the standard log package, we might write: -// -// log.Printf("decided to set field foo to value %q for object %s/%s", -// targetValue, object.Namespace, object.Name) -// -// With logr we'd write: -// -// // Elsewhere: set up the logger to log the object name. -// obj.logger = mainLogger.WithValues( -// "name", obj.name, "namespace", obj.namespace) -// -// // later on... -// obj.logger.Info("setting foo", "value", targetValue) -// -// # Best Practices -// -// Logger has very few hard rules, with the goal that LogSink implementations -// might have a lot of freedom to differentiate. There are, however, some -// things to consider. -// -// The log message consists of a constant message attached to the log line. -// This should generally be a simple description of what's occurring, and should -// never be a format string. Variable information can then be attached using -// named values. -// -// Keys are arbitrary strings, but should generally be constant values. Values -// may be any Go value, but how the value is formatted is determined by the -// LogSink implementation. -// -// Logger instances are meant to be passed around by value. Code that receives -// such a value can call its methods without having to check whether the -// instance is ready for use. -// -// Calling methods with the null logger (Logger{}) as instance will crash -// because it has no LogSink. Therefore this null logger should never be passed -// around. For cases where passing a logger is optional, a pointer to Logger -// should be used. -// -// # Key Naming Conventions -// -// Keys are not strictly required to conform to any specification or regex, but -// it is recommended that they: -// - be human-readable and meaningful (not auto-generated or simple ordinals) -// - be constant (not dependent on input data) -// - contain only printable characters -// - not contain whitespace or punctuation -// - use lower case for simple keys and lowerCamelCase for more complex ones -// -// These guidelines help ensure that log data is processed properly regardless -// of the log implementation. For example, log implementations will try to -// output JSON data or will store data for later database (e.g. SQL) queries. -// -// While users are generally free to use key names of their choice, it's -// generally best to avoid using the following keys, as they're frequently used -// by implementations: -// - "caller": the calling information (file/line) of a particular log line -// - "error": the underlying error value in the `Error` method -// - "level": the log level -// - "logger": the name of the associated logger -// - "msg": the log message -// - "stacktrace": the stack trace associated with a particular log line or -// error (often from the `Error` message) -// - "ts": the timestamp for a log line -// -// Implementations are encouraged to make use of these keys to represent the -// above concepts, when necessary (for example, in a pure-JSON output form, it -// would be necessary to represent at least message and timestamp as ordinary -// named values). -// -// # Break Glass -// -// Implementations may choose to give callers access to the underlying -// logging implementation. The recommended pattern for this is: -// -// // Underlier exposes access to the underlying logging implementation. -// // Since callers only have a logr.Logger, they have to know which -// // implementation is in use, so this interface is less of an abstraction -// // and more of way to test type conversion. -// type Underlier interface { -// GetUnderlying() -// } -// -// Logger grants access to the sink to enable type assertions like this: -// -// func DoSomethingWithImpl(log logr.Logger) { -// if underlier, ok := log.GetSink().(impl.Underlier); ok { -// implLogger := underlier.GetUnderlying() -// ... -// } -// } -// -// Custom `With*` functions can be implemented by copying the complete -// Logger struct and replacing the sink in the copy: -// -// // WithFooBar changes the foobar parameter in the log sink and returns a -// // new logger with that modified sink. It does nothing for loggers where -// // the sink doesn't support that parameter. -// func WithFoobar(log logr.Logger, foobar int) logr.Logger { -// if foobarLogSink, ok := log.GetSink().(FoobarSink); ok { -// log = log.WithSink(foobarLogSink.WithFooBar(foobar)) -// } -// return log -// } -// -// Don't use New to construct a new Logger with a LogSink retrieved from an -// existing Logger. Source code attribution might not work correctly and -// unexported fields in Logger get lost. -// -// Beware that the same LogSink instance may be shared by different logger -// instances. Calling functions that modify the LogSink will affect all of -// those. -package logr - -import ( - "context" -) - -// New returns a new Logger instance. This is primarily used by libraries -// implementing LogSink, rather than end users. Passing a nil sink will create -// a Logger which discards all log lines. -func New(sink LogSink) Logger { - logger := Logger{} - logger.setSink(sink) - if sink != nil { - sink.Init(runtimeInfo) - } - return logger -} - -// setSink stores the sink and updates any related fields. It mutates the -// logger and thus is only safe to use for loggers that are not currently being -// used concurrently. -func (l *Logger) setSink(sink LogSink) { - l.sink = sink -} - -// GetSink returns the stored sink. -func (l Logger) GetSink() LogSink { - return l.sink -} - -// WithSink returns a copy of the logger with the new sink. -func (l Logger) WithSink(sink LogSink) Logger { - l.setSink(sink) - return l -} - -// Logger is an interface to an abstract logging implementation. This is a -// concrete type for performance reasons, but all the real work is passed on to -// a LogSink. Implementations of LogSink should provide their own constructors -// that return Logger, not LogSink. -// -// The underlying sink can be accessed through GetSink and be modified through -// WithSink. This enables the implementation of custom extensions (see "Break -// Glass" in the package documentation). Normally the sink should be used only -// indirectly. -type Logger struct { - sink LogSink - level int -} - -// Enabled tests whether this Logger is enabled. For example, commandline -// flags might be used to set the logging verbosity and disable some info logs. -func (l Logger) Enabled() bool { - return l.sink != nil && l.sink.Enabled(l.level) -} - -// Info logs a non-error message with the given key/value pairs as context. -// -// The msg argument should be used to add some constant description to the log -// line. The key/value pairs can then be used to add additional variable -// information. The key/value pairs must alternate string keys and arbitrary -// values. -func (l Logger) Info(msg string, keysAndValues ...interface{}) { - if l.sink == nil { - return - } - if l.Enabled() { - if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { - withHelper.GetCallStackHelper()() - } - l.sink.Info(l.level, msg, keysAndValues...) - } -} - -// Error logs an error, with the given message and key/value pairs as context. -// It functions similarly to Info, but may have unique behavior, and should be -// preferred for logging errors (see the package documentations for more -// information). The log message will always be emitted, regardless of -// verbosity level. -// -// The msg argument should be used to add context to any underlying error, -// while the err argument should be used to attach the actual error that -// triggered this log line, if present. The err parameter is optional -// and nil may be passed instead of an error instance. -func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { - if l.sink == nil { - return - } - if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { - withHelper.GetCallStackHelper()() - } - l.sink.Error(err, msg, keysAndValues...) -} - -// V returns a new Logger instance for a specific verbosity level, relative to -// this Logger. In other words, V-levels are additive. A higher verbosity -// level means a log message is less important. Negative V-levels are treated -// as 0. -func (l Logger) V(level int) Logger { - if l.sink == nil { - return l - } - if level < 0 { - level = 0 - } - l.level += level - return l -} - -// WithValues returns a new Logger instance with additional key/value pairs. -// See Info for documentation on how key/value pairs work. -func (l Logger) WithValues(keysAndValues ...interface{}) Logger { - if l.sink == nil { - return l - } - l.setSink(l.sink.WithValues(keysAndValues...)) - return l -} - -// WithName returns a new Logger instance with the specified name element added -// to the Logger's name. Successive calls with WithName append additional -// suffixes to the Logger's name. It's strongly recommended that name segments -// contain only letters, digits, and hyphens (see the package documentation for -// more information). -func (l Logger) WithName(name string) Logger { - if l.sink == nil { - return l - } - l.setSink(l.sink.WithName(name)) - return l -} - -// WithCallDepth returns a Logger instance that offsets the call stack by the -// specified number of frames when logging call site information, if possible. -// This is useful for users who have helper functions between the "real" call -// site and the actual calls to Logger methods. If depth is 0 the attribution -// should be to the direct caller of this function. If depth is 1 the -// attribution should skip 1 call frame, and so on. Successive calls to this -// are additive. -// -// If the underlying log implementation supports a WithCallDepth(int) method, -// it will be called and the result returned. If the implementation does not -// support CallDepthLogSink, the original Logger will be returned. -// -// To skip one level, WithCallStackHelper() should be used instead of -// WithCallDepth(1) because it works with implementions that support the -// CallDepthLogSink and/or CallStackHelperLogSink interfaces. -func (l Logger) WithCallDepth(depth int) Logger { - if l.sink == nil { - return l - } - if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { - l.setSink(withCallDepth.WithCallDepth(depth)) - } - return l -} - -// WithCallStackHelper returns a new Logger instance that skips the direct -// caller when logging call site information, if possible. This is useful for -// users who have helper functions between the "real" call site and the actual -// calls to Logger methods and want to support loggers which depend on marking -// each individual helper function, like loggers based on testing.T. -// -// In addition to using that new logger instance, callers also must call the -// returned function. -// -// If the underlying log implementation supports a WithCallDepth(int) method, -// WithCallDepth(1) will be called to produce a new logger. If it supports a -// WithCallStackHelper() method, that will be also called. If the -// implementation does not support either of these, the original Logger will be -// returned. -func (l Logger) WithCallStackHelper() (func(), Logger) { - if l.sink == nil { - return func() {}, l - } - var helper func() - if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { - l.setSink(withCallDepth.WithCallDepth(1)) - } - if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { - helper = withHelper.GetCallStackHelper() - } else { - helper = func() {} - } - return helper, l -} - -// IsZero returns true if this logger is an uninitialized zero value -func (l Logger) IsZero() bool { - return l.sink == nil -} - -// contextKey is how we find Loggers in a context.Context. -type contextKey struct{} - -// FromContext returns a Logger from ctx or an error if no Logger is found. -func FromContext(ctx context.Context) (Logger, error) { - if v, ok := ctx.Value(contextKey{}).(Logger); ok { - return v, nil - } - - return Logger{}, notFoundError{} -} - -// notFoundError exists to carry an IsNotFound method. -type notFoundError struct{} - -func (notFoundError) Error() string { - return "no logr.Logger was present" -} - -func (notFoundError) IsNotFound() bool { - return true -} - -// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this -// returns a Logger that discards all log messages. -func FromContextOrDiscard(ctx context.Context) Logger { - if v, ok := ctx.Value(contextKey{}).(Logger); ok { - return v - } - - return Discard() -} - -// NewContext returns a new Context, derived from ctx, which carries the -// provided Logger. -func NewContext(ctx context.Context, logger Logger) context.Context { - return context.WithValue(ctx, contextKey{}, logger) -} - -// RuntimeInfo holds information that the logr "core" library knows which -// LogSinks might want to know. -type RuntimeInfo struct { - // CallDepth is the number of call frames the logr library adds between the - // end-user and the LogSink. LogSink implementations which choose to print - // the original logging site (e.g. file & line) should climb this many - // additional frames to find it. - CallDepth int -} - -// runtimeInfo is a static global. It must not be changed at run time. -var runtimeInfo = RuntimeInfo{ - CallDepth: 1, -} - -// LogSink represents a logging implementation. End-users will generally not -// interact with this type. -type LogSink interface { - // Init receives optional information about the logr library for LogSink - // implementations that need it. - Init(info RuntimeInfo) - - // Enabled tests whether this LogSink is enabled at the specified V-level. - // For example, commandline flags might be used to set the logging - // verbosity and disable some info logs. - Enabled(level int) bool - - // Info logs a non-error message with the given key/value pairs as context. - // The level argument is provided for optional logging. This method will - // only be called when Enabled(level) is true. See Logger.Info for more - // details. - Info(level int, msg string, keysAndValues ...interface{}) - - // Error logs an error, with the given message and key/value pairs as - // context. See Logger.Error for more details. - Error(err error, msg string, keysAndValues ...interface{}) - - // WithValues returns a new LogSink with additional key/value pairs. See - // Logger.WithValues for more details. - WithValues(keysAndValues ...interface{}) LogSink - - // WithName returns a new LogSink with the specified name appended. See - // Logger.WithName for more details. - WithName(name string) LogSink -} - -// CallDepthLogSink represents a LogSink that knows how to climb the call stack -// to identify the original call site and can offset the depth by a specified -// number of frames. This is useful for users who have helper functions -// between the "real" call site and the actual calls to Logger methods. -// Implementations that log information about the call site (such as file, -// function, or line) would otherwise log information about the intermediate -// helper functions. -// -// This is an optional interface and implementations are not required to -// support it. -type CallDepthLogSink interface { - // WithCallDepth returns a LogSink that will offset the call - // stack by the specified number of frames when logging call - // site information. - // - // If depth is 0, the LogSink should skip exactly the number - // of call frames defined in RuntimeInfo.CallDepth when Info - // or Error are called, i.e. the attribution should be to the - // direct caller of Logger.Info or Logger.Error. - // - // If depth is 1 the attribution should skip 1 call frame, and so on. - // Successive calls to this are additive. - WithCallDepth(depth int) LogSink -} - -// CallStackHelperLogSink represents a LogSink that knows how to climb -// the call stack to identify the original call site and can skip -// intermediate helper functions if they mark themselves as -// helper. Go's testing package uses that approach. -// -// This is useful for users who have helper functions between the -// "real" call site and the actual calls to Logger methods. -// Implementations that log information about the call site (such as -// file, function, or line) would otherwise log information about the -// intermediate helper functions. -// -// This is an optional interface and implementations are not required -// to support it. Implementations that choose to support this must not -// simply implement it as WithCallDepth(1), because -// Logger.WithCallStackHelper will call both methods if they are -// present. This should only be implemented for LogSinks that actually -// need it, as with testing.T. -type CallStackHelperLogSink interface { - // GetCallStackHelper returns a function that must be called - // to mark the direct caller as helper function when logging - // call site information. - GetCallStackHelper() func() -} - -// Marshaler is an optional interface that logged values may choose to -// implement. Loggers with structured output, such as JSON, should -// log the object return by the MarshalLog method instead of the -// original value. -type Marshaler interface { - // MarshalLog can be used to: - // - ensure that structs are not logged as strings when the original - // value has a String method: return a different type without a - // String method - // - select which fields of a complex type should get logged: - // return a simpler struct with fewer fields - // - log unexported fields: return a different struct - // with exported fields - // - // It may return any value of any type. - MarshalLog() interface{} -} diff --git a/vendor/github.com/go-logr/stdr/LICENSE b/vendor/github.com/go-logr/stdr/LICENSE deleted file mode 100644 index 261eeb9e..00000000 --- a/vendor/github.com/go-logr/stdr/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-logr/stdr/README.md b/vendor/github.com/go-logr/stdr/README.md deleted file mode 100644 index 51586678..00000000 --- a/vendor/github.com/go-logr/stdr/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# Minimal Go logging using logr and Go's standard library - -[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/stdr.svg)](https://pkg.go.dev/github.com/go-logr/stdr) - -This package implements the [logr interface](https://github.com/go-logr/logr) -in terms of Go's standard log package(https://pkg.go.dev/log). diff --git a/vendor/github.com/go-logr/stdr/stdr.go b/vendor/github.com/go-logr/stdr/stdr.go deleted file mode 100644 index 93a8aab5..00000000 --- a/vendor/github.com/go-logr/stdr/stdr.go +++ /dev/null @@ -1,170 +0,0 @@ -/* -Copyright 2019 The logr Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package stdr implements github.com/go-logr/logr.Logger in terms of -// Go's standard log package. -package stdr - -import ( - "log" - "os" - - "github.com/go-logr/logr" - "github.com/go-logr/logr/funcr" -) - -// The global verbosity level. See SetVerbosity(). -var globalVerbosity int - -// SetVerbosity sets the global level against which all info logs will be -// compared. If this is greater than or equal to the "V" of the logger, the -// message will be logged. A higher value here means more logs will be written. -// The previous verbosity value is returned. This is not concurrent-safe - -// callers must be sure to call it from only one goroutine. -func SetVerbosity(v int) int { - old := globalVerbosity - globalVerbosity = v - return old -} - -// New returns a logr.Logger which is implemented by Go's standard log package, -// or something like it. If std is nil, this will use a default logger -// instead. -// -// Example: stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) -func New(std StdLogger) logr.Logger { - return NewWithOptions(std, Options{}) -} - -// NewWithOptions returns a logr.Logger which is implemented by Go's standard -// log package, or something like it. See New for details. -func NewWithOptions(std StdLogger, opts Options) logr.Logger { - if std == nil { - // Go's log.Default() is only available in 1.16 and higher. - std = log.New(os.Stderr, "", log.LstdFlags) - } - - if opts.Depth < 0 { - opts.Depth = 0 - } - - fopts := funcr.Options{ - LogCaller: funcr.MessageClass(opts.LogCaller), - } - - sl := &logger{ - Formatter: funcr.NewFormatter(fopts), - std: std, - } - - // For skipping our own logger.Info/Error. - sl.Formatter.AddCallDepth(1 + opts.Depth) - - return logr.New(sl) -} - -// Options carries parameters which influence the way logs are generated. -type Options struct { - // Depth biases the assumed number of call frames to the "true" caller. - // This is useful when the calling code calls a function which then calls - // stdr (e.g. a logging shim to another API). Values less than zero will - // be treated as zero. - Depth int - - // LogCaller tells stdr to add a "caller" key to some or all log lines. - // Go's log package has options to log this natively, too. - LogCaller MessageClass - - // TODO: add an option to log the date/time -} - -// MessageClass indicates which category or categories of messages to consider. -type MessageClass int - -const ( - // None ignores all message classes. - None MessageClass = iota - // All considers all message classes. - All - // Info only considers info messages. - Info - // Error only considers error messages. - Error -) - -// StdLogger is the subset of the Go stdlib log.Logger API that is needed for -// this adapter. -type StdLogger interface { - // Output is the same as log.Output and log.Logger.Output. - Output(calldepth int, logline string) error -} - -type logger struct { - funcr.Formatter - std StdLogger -} - -var _ logr.LogSink = &logger{} -var _ logr.CallDepthLogSink = &logger{} - -func (l logger) Enabled(level int) bool { - return globalVerbosity >= level -} - -func (l logger) Info(level int, msg string, kvList ...interface{}) { - prefix, args := l.FormatInfo(level, msg, kvList) - if prefix != "" { - args = prefix + ": " + args - } - _ = l.std.Output(l.Formatter.GetDepth()+1, args) -} - -func (l logger) Error(err error, msg string, kvList ...interface{}) { - prefix, args := l.FormatError(err, msg, kvList) - if prefix != "" { - args = prefix + ": " + args - } - _ = l.std.Output(l.Formatter.GetDepth()+1, args) -} - -func (l logger) WithName(name string) logr.LogSink { - l.Formatter.AddName(name) - return &l -} - -func (l logger) WithValues(kvList ...interface{}) logr.LogSink { - l.Formatter.AddValues(kvList) - return &l -} - -func (l logger) WithCallDepth(depth int) logr.LogSink { - l.Formatter.AddCallDepth(depth) - return &l -} - -// Underlier exposes access to the underlying logging implementation. Since -// callers only have a logr.Logger, they have to know which implementation is -// in use, so this interface is less of an abstraction and more of way to test -// type conversion. -type Underlier interface { - GetUnderlying() StdLogger -} - -// GetUnderlying returns the StdLogger underneath this logger. Since StdLogger -// is itself an interface, the result may or may not be a Go log.Logger. -func (l logger) GetUnderlying() StdLogger { - return l.std -} diff --git a/vendor/github.com/go-playground/validator/v10/Makefile b/vendor/github.com/go-playground/validator/v10/Makefile index ec3455bd..09f171ba 100644 --- a/vendor/github.com/go-playground/validator/v10/Makefile +++ b/vendor/github.com/go-playground/validator/v10/Makefile @@ -13,6 +13,6 @@ test: $(GOCMD) test -cover -race ./... bench: - $(GOCMD) test -bench=. -benchmem ./... + $(GOCMD) test -run=NONE -bench=. -benchmem ./... .PHONY: test lint linters-install \ No newline at end of file diff --git a/vendor/github.com/go-playground/validator/v10/README.md b/vendor/github.com/go-playground/validator/v10/README.md index b2e0e2d9..30b60f80 100644 --- a/vendor/github.com/go-playground/validator/v10/README.md +++ b/vendor/github.com/go-playground/validator/v10/README.md @@ -1,7 +1,7 @@ Package validator ================= [![Join the chat at https://gitter.im/go-playground/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -![Project status](https://img.shields.io/badge/version-10.15.0-green.svg) +![Project status](https://img.shields.io/badge/version-10.15.3-green.svg) [![Build Status](https://travis-ci.org/go-playground/validator.svg?branch=master)](https://travis-ci.org/go-playground/validator) [![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=master&service=github)](https://coveralls.io/github/go-playground/validator?branch=master) [![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/validator)](https://goreportcard.com/report/github.com/go-playground/validator) @@ -67,6 +67,12 @@ Please see https://pkg.go.dev/github.com/go-playground/validator/v10 for detaile Baked-in Validations ------ +### Special Notes: +- If new to using validator it is highly recommended to initialize it using the `WithRequiredStructEnabled` option which is opt-in to new behaviour that will become the default behaviour in v11+. See documentation for more details. +```go +validate := validator.New(validator.WithRequiredStructEnabled()) +``` + ### Fields: | Tag | Description | @@ -260,71 +266,72 @@ Benchmarks ------ ###### Run on MacBook Pro (15-inch, 2017) go version go1.10.2 darwin/amd64 ```go +go version go1.21.0 darwin/arm64 goos: darwin -goarch: amd64 -pkg: github.com/go-playground/validator -BenchmarkFieldSuccess-8 20000000 83.6 ns/op 0 B/op 0 allocs/op -BenchmarkFieldSuccessParallel-8 50000000 26.8 ns/op 0 B/op 0 allocs/op -BenchmarkFieldFailure-8 5000000 291 ns/op 208 B/op 4 allocs/op -BenchmarkFieldFailureParallel-8 20000000 107 ns/op 208 B/op 4 allocs/op -BenchmarkFieldArrayDiveSuccess-8 2000000 623 ns/op 201 B/op 11 allocs/op -BenchmarkFieldArrayDiveSuccessParallel-8 10000000 237 ns/op 201 B/op 11 allocs/op -BenchmarkFieldArrayDiveFailure-8 2000000 859 ns/op 412 B/op 16 allocs/op -BenchmarkFieldArrayDiveFailureParallel-8 5000000 335 ns/op 413 B/op 16 allocs/op -BenchmarkFieldMapDiveSuccess-8 1000000 1292 ns/op 432 B/op 18 allocs/op -BenchmarkFieldMapDiveSuccessParallel-8 3000000 467 ns/op 432 B/op 18 allocs/op -BenchmarkFieldMapDiveFailure-8 1000000 1082 ns/op 512 B/op 16 allocs/op -BenchmarkFieldMapDiveFailureParallel-8 5000000 425 ns/op 512 B/op 16 allocs/op -BenchmarkFieldMapDiveWithKeysSuccess-8 1000000 1539 ns/op 480 B/op 21 allocs/op -BenchmarkFieldMapDiveWithKeysSuccessParallel-8 3000000 613 ns/op 480 B/op 21 allocs/op -BenchmarkFieldMapDiveWithKeysFailure-8 1000000 1413 ns/op 721 B/op 21 allocs/op -BenchmarkFieldMapDiveWithKeysFailureParallel-8 3000000 575 ns/op 721 B/op 21 allocs/op -BenchmarkFieldCustomTypeSuccess-8 10000000 216 ns/op 32 B/op 2 allocs/op -BenchmarkFieldCustomTypeSuccessParallel-8 20000000 82.2 ns/op 32 B/op 2 allocs/op -BenchmarkFieldCustomTypeFailure-8 5000000 274 ns/op 208 B/op 4 allocs/op -BenchmarkFieldCustomTypeFailureParallel-8 20000000 116 ns/op 208 B/op 4 allocs/op -BenchmarkFieldOrTagSuccess-8 2000000 740 ns/op 16 B/op 1 allocs/op -BenchmarkFieldOrTagSuccessParallel-8 3000000 474 ns/op 16 B/op 1 allocs/op -BenchmarkFieldOrTagFailure-8 3000000 471 ns/op 224 B/op 5 allocs/op -BenchmarkFieldOrTagFailureParallel-8 3000000 414 ns/op 224 B/op 5 allocs/op -BenchmarkStructLevelValidationSuccess-8 10000000 213 ns/op 32 B/op 2 allocs/op -BenchmarkStructLevelValidationSuccessParallel-8 20000000 91.8 ns/op 32 B/op 2 allocs/op -BenchmarkStructLevelValidationFailure-8 3000000 473 ns/op 304 B/op 8 allocs/op -BenchmarkStructLevelValidationFailureParallel-8 10000000 234 ns/op 304 B/op 8 allocs/op -BenchmarkStructSimpleCustomTypeSuccess-8 5000000 385 ns/op 32 B/op 2 allocs/op -BenchmarkStructSimpleCustomTypeSuccessParallel-8 10000000 161 ns/op 32 B/op 2 allocs/op -BenchmarkStructSimpleCustomTypeFailure-8 2000000 640 ns/op 424 B/op 9 allocs/op -BenchmarkStructSimpleCustomTypeFailureParallel-8 5000000 318 ns/op 440 B/op 10 allocs/op -BenchmarkStructFilteredSuccess-8 2000000 597 ns/op 288 B/op 9 allocs/op -BenchmarkStructFilteredSuccessParallel-8 10000000 266 ns/op 288 B/op 9 allocs/op -BenchmarkStructFilteredFailure-8 3000000 454 ns/op 256 B/op 7 allocs/op -BenchmarkStructFilteredFailureParallel-8 10000000 214 ns/op 256 B/op 7 allocs/op -BenchmarkStructPartialSuccess-8 3000000 502 ns/op 256 B/op 6 allocs/op -BenchmarkStructPartialSuccessParallel-8 10000000 225 ns/op 256 B/op 6 allocs/op -BenchmarkStructPartialFailure-8 2000000 702 ns/op 480 B/op 11 allocs/op -BenchmarkStructPartialFailureParallel-8 5000000 329 ns/op 480 B/op 11 allocs/op -BenchmarkStructExceptSuccess-8 2000000 793 ns/op 496 B/op 12 allocs/op -BenchmarkStructExceptSuccessParallel-8 10000000 193 ns/op 240 B/op 5 allocs/op -BenchmarkStructExceptFailure-8 2000000 639 ns/op 464 B/op 10 allocs/op -BenchmarkStructExceptFailureParallel-8 5000000 300 ns/op 464 B/op 10 allocs/op -BenchmarkStructSimpleCrossFieldSuccess-8 3000000 417 ns/op 72 B/op 3 allocs/op -BenchmarkStructSimpleCrossFieldSuccessParallel-8 10000000 163 ns/op 72 B/op 3 allocs/op -BenchmarkStructSimpleCrossFieldFailure-8 2000000 645 ns/op 304 B/op 8 allocs/op -BenchmarkStructSimpleCrossFieldFailureParallel-8 5000000 285 ns/op 304 B/op 8 allocs/op -BenchmarkStructSimpleCrossStructCrossFieldSuccess-8 3000000 588 ns/op 80 B/op 4 allocs/op -BenchmarkStructSimpleCrossStructCrossFieldSuccessParallel-8 10000000 221 ns/op 80 B/op 4 allocs/op -BenchmarkStructSimpleCrossStructCrossFieldFailure-8 2000000 868 ns/op 320 B/op 9 allocs/op -BenchmarkStructSimpleCrossStructCrossFieldFailureParallel-8 5000000 337 ns/op 320 B/op 9 allocs/op -BenchmarkStructSimpleSuccess-8 5000000 260 ns/op 0 B/op 0 allocs/op -BenchmarkStructSimpleSuccessParallel-8 20000000 90.6 ns/op 0 B/op 0 allocs/op -BenchmarkStructSimpleFailure-8 2000000 619 ns/op 424 B/op 9 allocs/op -BenchmarkStructSimpleFailureParallel-8 5000000 296 ns/op 424 B/op 9 allocs/op -BenchmarkStructComplexSuccess-8 1000000 1454 ns/op 128 B/op 8 allocs/op -BenchmarkStructComplexSuccessParallel-8 3000000 579 ns/op 128 B/op 8 allocs/op -BenchmarkStructComplexFailure-8 300000 4140 ns/op 3041 B/op 53 allocs/op -BenchmarkStructComplexFailureParallel-8 1000000 2127 ns/op 3041 B/op 53 allocs/op -BenchmarkOneof-8 10000000 140 ns/op 0 B/op 0 allocs/op -BenchmarkOneofParallel-8 20000000 70.1 ns/op 0 B/op 0 allocs/op +goarch: arm64 +pkg: github.com/go-playground/validator/v10 +BenchmarkFieldSuccess-8 33142266 35.94 ns/op 0 B/op 0 allocs/op +BenchmarkFieldSuccessParallel-8 200816191 6.568 ns/op 0 B/op 0 allocs/op +BenchmarkFieldFailure-8 6779707 175.1 ns/op 200 B/op 4 allocs/op +BenchmarkFieldFailureParallel-8 11044147 108.4 ns/op 200 B/op 4 allocs/op +BenchmarkFieldArrayDiveSuccess-8 6054232 194.4 ns/op 97 B/op 5 allocs/op +BenchmarkFieldArrayDiveSuccessParallel-8 12523388 94.07 ns/op 97 B/op 5 allocs/op +BenchmarkFieldArrayDiveFailure-8 3587043 334.3 ns/op 300 B/op 10 allocs/op +BenchmarkFieldArrayDiveFailureParallel-8 5816665 200.8 ns/op 300 B/op 10 allocs/op +BenchmarkFieldMapDiveSuccess-8 2217910 540.1 ns/op 288 B/op 14 allocs/op +BenchmarkFieldMapDiveSuccessParallel-8 4446698 258.7 ns/op 288 B/op 14 allocs/op +BenchmarkFieldMapDiveFailure-8 2392759 504.6 ns/op 376 B/op 13 allocs/op +BenchmarkFieldMapDiveFailureParallel-8 4244199 286.9 ns/op 376 B/op 13 allocs/op +BenchmarkFieldMapDiveWithKeysSuccess-8 2005857 592.1 ns/op 288 B/op 14 allocs/op +BenchmarkFieldMapDiveWithKeysSuccessParallel-8 4400850 296.9 ns/op 288 B/op 14 allocs/op +BenchmarkFieldMapDiveWithKeysFailure-8 1850227 643.8 ns/op 553 B/op 16 allocs/op +BenchmarkFieldMapDiveWithKeysFailureParallel-8 3293233 375.1 ns/op 553 B/op 16 allocs/op +BenchmarkFieldCustomTypeSuccess-8 12174412 98.25 ns/op 32 B/op 2 allocs/op +BenchmarkFieldCustomTypeSuccessParallel-8 34389907 35.49 ns/op 32 B/op 2 allocs/op +BenchmarkFieldCustomTypeFailure-8 7582524 156.6 ns/op 184 B/op 3 allocs/op +BenchmarkFieldCustomTypeFailureParallel-8 13019902 92.79 ns/op 184 B/op 3 allocs/op +BenchmarkFieldOrTagSuccess-8 3427260 349.4 ns/op 16 B/op 1 allocs/op +BenchmarkFieldOrTagSuccessParallel-8 15144128 81.25 ns/op 16 B/op 1 allocs/op +BenchmarkFieldOrTagFailure-8 5913546 201.9 ns/op 216 B/op 5 allocs/op +BenchmarkFieldOrTagFailureParallel-8 9810212 113.7 ns/op 216 B/op 5 allocs/op +BenchmarkStructLevelValidationSuccess-8 13456327 87.66 ns/op 16 B/op 1 allocs/op +BenchmarkStructLevelValidationSuccessParallel-8 41818888 27.77 ns/op 16 B/op 1 allocs/op +BenchmarkStructLevelValidationFailure-8 4166284 272.6 ns/op 264 B/op 7 allocs/op +BenchmarkStructLevelValidationFailureParallel-8 7594581 152.1 ns/op 264 B/op 7 allocs/op +BenchmarkStructSimpleCustomTypeSuccess-8 6508082 182.6 ns/op 32 B/op 2 allocs/op +BenchmarkStructSimpleCustomTypeSuccessParallel-8 23078605 54.78 ns/op 32 B/op 2 allocs/op +BenchmarkStructSimpleCustomTypeFailure-8 3118352 381.0 ns/op 416 B/op 9 allocs/op +BenchmarkStructSimpleCustomTypeFailureParallel-8 5300738 224.1 ns/op 432 B/op 10 allocs/op +BenchmarkStructFilteredSuccess-8 4761807 251.1 ns/op 216 B/op 5 allocs/op +BenchmarkStructFilteredSuccessParallel-8 8792598 128.6 ns/op 216 B/op 5 allocs/op +BenchmarkStructFilteredFailure-8 5202573 232.1 ns/op 216 B/op 5 allocs/op +BenchmarkStructFilteredFailureParallel-8 9591267 121.4 ns/op 216 B/op 5 allocs/op +BenchmarkStructPartialSuccess-8 5188512 231.6 ns/op 224 B/op 4 allocs/op +BenchmarkStructPartialSuccessParallel-8 9179776 123.1 ns/op 224 B/op 4 allocs/op +BenchmarkStructPartialFailure-8 3071212 392.5 ns/op 440 B/op 9 allocs/op +BenchmarkStructPartialFailureParallel-8 5344261 223.7 ns/op 440 B/op 9 allocs/op +BenchmarkStructExceptSuccess-8 3184230 375.0 ns/op 424 B/op 8 allocs/op +BenchmarkStructExceptSuccessParallel-8 10090130 108.9 ns/op 208 B/op 3 allocs/op +BenchmarkStructExceptFailure-8 3347226 357.7 ns/op 424 B/op 8 allocs/op +BenchmarkStructExceptFailureParallel-8 5654923 209.5 ns/op 424 B/op 8 allocs/op +BenchmarkStructSimpleCrossFieldSuccess-8 5232265 229.1 ns/op 56 B/op 3 allocs/op +BenchmarkStructSimpleCrossFieldSuccessParallel-8 17436674 64.75 ns/op 56 B/op 3 allocs/op +BenchmarkStructSimpleCrossFieldFailure-8 3128613 383.6 ns/op 272 B/op 8 allocs/op +BenchmarkStructSimpleCrossFieldFailureParallel-8 6994113 168.8 ns/op 272 B/op 8 allocs/op +BenchmarkStructSimpleCrossStructCrossFieldSuccess-8 3506487 340.9 ns/op 64 B/op 4 allocs/op +BenchmarkStructSimpleCrossStructCrossFieldSuccessParallel-8 13431300 91.77 ns/op 64 B/op 4 allocs/op +BenchmarkStructSimpleCrossStructCrossFieldFailure-8 2410566 500.9 ns/op 288 B/op 9 allocs/op +BenchmarkStructSimpleCrossStructCrossFieldFailureParallel-8 6344510 188.2 ns/op 288 B/op 9 allocs/op +BenchmarkStructSimpleSuccess-8 8922726 133.8 ns/op 0 B/op 0 allocs/op +BenchmarkStructSimpleSuccessParallel-8 55291153 23.63 ns/op 0 B/op 0 allocs/op +BenchmarkStructSimpleFailure-8 3171553 378.4 ns/op 416 B/op 9 allocs/op +BenchmarkStructSimpleFailureParallel-8 5571692 212.0 ns/op 416 B/op 9 allocs/op +BenchmarkStructComplexSuccess-8 1683750 714.5 ns/op 224 B/op 5 allocs/op +BenchmarkStructComplexSuccessParallel-8 4578046 257.0 ns/op 224 B/op 5 allocs/op +BenchmarkStructComplexFailure-8 481585 2547 ns/op 3041 B/op 48 allocs/op +BenchmarkStructComplexFailureParallel-8 965764 1577 ns/op 3040 B/op 48 allocs/op +BenchmarkOneof-8 17380881 68.50 ns/op 0 B/op 0 allocs/op +BenchmarkOneofParallel-8 8084733 153.5 ns/op 0 B/op 0 allocs/op ``` Complementary Software diff --git a/vendor/github.com/go-playground/validator/v10/baked_in.go b/vendor/github.com/go-playground/validator/v10/baked_in.go index ca9eeb1d..cc92b784 100644 --- a/vendor/github.com/go-playground/validator/v10/baked_in.go +++ b/vendor/github.com/go-playground/validator/v10/baked_in.go @@ -23,7 +23,7 @@ import ( "golang.org/x/text/language" "github.com/gabriel-vasile/mimetype" - "github.com/leodido/go-urn" + urn "github.com/leodido/go-urn" ) // Func accepts a FieldLevel interface for all validation needs. The return diff --git a/vendor/github.com/go-playground/validator/v10/cache.go b/vendor/github.com/go-playground/validator/v10/cache.go index ddd37b83..bbfd2a4a 100644 --- a/vendor/github.com/go-playground/validator/v10/cache.go +++ b/vendor/github.com/go-playground/validator/v10/cache.go @@ -20,7 +20,6 @@ const ( typeOr typeKeys typeEndKeys - typeNestedStructLevel ) const ( @@ -153,7 +152,7 @@ func (v *Validate) extractStructCache(current reflect.Value, sName string) *cStr // and so only struct level caching can be used instead of combined with Field tag caching if len(tag) > 0 { - ctag, _ = v.parseFieldTagsRecursive(tag, fld, "", false) + ctag, _ = v.parseFieldTagsRecursive(tag, fld.Name, "", false) } else { // even if field doesn't have validations need cTag for traversing to potential inner/nested // elements of the field. @@ -172,7 +171,7 @@ func (v *Validate) extractStructCache(current reflect.Value, sName string) *cStr return cs } -func (v *Validate) parseFieldTagsRecursive(tag string, field reflect.StructField, alias string, hasAlias bool) (firstCtag *cTag, current *cTag) { +func (v *Validate) parseFieldTagsRecursive(tag string, fieldName string, alias string, hasAlias bool) (firstCtag *cTag, current *cTag) { var t string noAlias := len(alias) == 0 tags := strings.Split(tag, tagSeparator) @@ -186,9 +185,9 @@ func (v *Validate) parseFieldTagsRecursive(tag string, field reflect.StructField // check map for alias and process new tags, otherwise process as usual if tagsVal, found := v.aliases[t]; found { if i == 0 { - firstCtag, current = v.parseFieldTagsRecursive(tagsVal, field, t, true) + firstCtag, current = v.parseFieldTagsRecursive(tagsVal, fieldName, t, true) } else { - next, curr := v.parseFieldTagsRecursive(tagsVal, field, t, true) + next, curr := v.parseFieldTagsRecursive(tagsVal, fieldName, t, true) current.next, current = next, curr } @@ -236,7 +235,7 @@ func (v *Validate) parseFieldTagsRecursive(tag string, field reflect.StructField } } - current.keys, _ = v.parseFieldTagsRecursive(string(b[:len(b)-1]), field, "", false) + current.keys, _ = v.parseFieldTagsRecursive(string(b[:len(b)-1]), fieldName, "", false) continue case endKeysTag: @@ -285,18 +284,14 @@ func (v *Validate) parseFieldTagsRecursive(tag string, field reflect.StructField current.tag = vals[0] if len(current.tag) == 0 { - panic(strings.TrimSpace(fmt.Sprintf(invalidValidation, field.Name))) + panic(strings.TrimSpace(fmt.Sprintf(invalidValidation, fieldName))) } if wrapper, ok := v.validations[current.tag]; ok { current.fn = wrapper.fn current.runValidationWhenNil = wrapper.runValidatinOnNil } else { - panic(strings.TrimSpace(fmt.Sprintf(undefinedValidation, current.tag, field.Name))) - } - - if current.typeof == typeDefault && isNestedStructOrStructPtr(field) { - current.typeof = typeNestedStructLevel + panic(strings.TrimSpace(fmt.Sprintf(undefinedValidation, current.tag, fieldName))) } if len(orVals) > 1 { @@ -324,7 +319,7 @@ func (v *Validate) fetchCacheTag(tag string) *cTag { // isn't parsed again. ctag, found = v.tagCache.Get(tag) if !found { - ctag, _ = v.parseFieldTagsRecursive(tag, reflect.StructField{}, "", false) + ctag, _ = v.parseFieldTagsRecursive(tag, "", "", false) v.tagCache.Set(tag, ctag) } } diff --git a/vendor/github.com/go-playground/validator/v10/doc.go b/vendor/github.com/go-playground/validator/v10/doc.go index d1eff50f..c4dbb595 100644 --- a/vendor/github.com/go-playground/validator/v10/doc.go +++ b/vendor/github.com/go-playground/validator/v10/doc.go @@ -247,7 +247,7 @@ Example #2 This validates that the value is not the data types default zero value. For numbers ensures value is not zero. For strings ensures value is not "". For slices, maps, pointers, interfaces, channels and functions -ensures the value is not nil. For structs ensures value is not the zero value. +ensures the value is not nil. For structs ensures value is not the zero value when using WithRequiredStructEnabled. Usage: required diff --git a/vendor/github.com/go-playground/validator/v10/options.go b/vendor/github.com/go-playground/validator/v10/options.go new file mode 100644 index 00000000..1dea56fd --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/options.go @@ -0,0 +1,16 @@ +package validator + +// Option represents a configurations option to be applied to validator during initialization. +type Option func(*Validate) + +// WithRequiredStructEnabled enables required tag on non-pointer structs to be applied instead of ignored. +// +// This was made opt-in behaviour in order to maintain backward compatibility with the behaviour previous +// to being able to apply struct level validations on struct fields directly. +// +// It is recommended you enabled this as it will be the default behaviour in v11+ +func WithRequiredStructEnabled() Option { + return func(v *Validate) { + v.requiredStructEnabled = true + } +} diff --git a/vendor/github.com/go-playground/validator/v10/util.go b/vendor/github.com/go-playground/validator/v10/util.go index 084d4617..4bd947bd 100644 --- a/vendor/github.com/go-playground/validator/v10/util.go +++ b/vendor/github.com/go-playground/validator/v10/util.go @@ -292,11 +292,3 @@ func panicIf(err error) { panic(err.Error()) } } - -func isNestedStructOrStructPtr(v reflect.StructField) bool { - if v.Type == nil { - return false - } - kind := v.Type.Kind() - return kind == reflect.Struct || kind == reflect.Ptr && v.Type.Elem().Kind() == reflect.Struct -} diff --git a/vendor/github.com/go-playground/validator/v10/validator.go b/vendor/github.com/go-playground/validator/v10/validator.go index a6fa1f5d..2cae8f7e 100644 --- a/vendor/github.com/go-playground/validator/v10/validator.go +++ b/vendor/github.com/go-playground/validator/v10/validator.go @@ -99,6 +99,8 @@ func (v *validate) traverseField(ctx context.Context, parent reflect.Value, curr current, kind, v.fldIsPointer = v.extractTypeInternal(current, false) + var isNestedStruct bool + switch kind { case reflect.Ptr, reflect.Interface, reflect.Invalid: @@ -160,86 +162,61 @@ func (v *validate) traverseField(ctx context.Context, parent reflect.Value, curr } } - case reflect.Struct: - - typ = current.Type() - - if !typ.ConvertibleTo(timeType) { - - if ct != nil { - - if ct.typeof == typeStructOnly { - goto CONTINUE - } else if ct.typeof == typeIsDefault || ct.typeof == typeNestedStructLevel { - // set Field Level fields - v.slflParent = parent - v.flField = current - v.cf = cf - v.ct = ct - - if !ct.fn(ctx, v) { - v.str1 = string(append(ns, cf.altName...)) - - if v.v.hasTagNameFunc { - v.str2 = string(append(structNs, cf.name...)) - } else { - v.str2 = v.str1 - } - - v.errs = append(v.errs, - &fieldError{ - v: v.v, - tag: ct.aliasTag, - actualTag: ct.tag, - ns: v.str1, - structNs: v.str2, - fieldLen: uint8(len(cf.altName)), - structfieldLen: uint8(len(cf.name)), - value: current.Interface(), - param: ct.param, - kind: kind, - typ: typ, - }, - ) - return - } - } - - ct = ct.next - } - - if ct != nil && ct.typeof == typeNoStructLevel { - return - } - - CONTINUE: - // if len == 0 then validating using 'Var' or 'VarWithValue' - // Var - doesn't make much sense to do it that way, should call 'Struct', but no harm... - // VarWithField - this allows for validating against each field within the struct against a specific value - // pretty handy in certain situations - if len(cf.name) > 0 { - ns = append(append(ns, cf.altName...), '.') - structNs = append(append(structNs, cf.name...), '.') - } - - v.validateStruct(ctx, parent, current, typ, ns, structNs, ct) + if kind == reflect.Invalid { return } - } - if ct == nil || !ct.hasTag { - return + case reflect.Struct: + isNestedStruct = !current.Type().ConvertibleTo(timeType) + // For backward compatibility before struct level validation tags were supported + // as there were a number of projects relying on `required` not failing on non-pointer + // structs. Since it's basically nonsensical to use `required` with a non-pointer struct + // are explicitly skipping the required validation for it. This WILL be removed in the + // next major version. + if !v.v.requiredStructEnabled && ct != nil && ct.tag == requiredTag { + ct = ct.next + } } typ = current.Type() OUTER: for { - if ct == nil { + if ct == nil || !ct.hasTag || (isNestedStruct && len(cf.name) == 0) { + // isNestedStruct check here + if isNestedStruct { + // if len == 0 then validating using 'Var' or 'VarWithValue' + // Var - doesn't make much sense to do it that way, should call 'Struct', but no harm... + // VarWithField - this allows for validating against each field within the struct against a specific value + // pretty handy in certain situations + if len(cf.name) > 0 { + ns = append(append(ns, cf.altName...), '.') + structNs = append(append(structNs, cf.name...), '.') + } + + v.validateStruct(ctx, parent, current, typ, ns, structNs, ct) + } return } switch ct.typeof { + case typeNoStructLevel: + return + + case typeStructOnly: + if isNestedStruct { + // if len == 0 then validating using 'Var' or 'VarWithValue' + // Var - doesn't make much sense to do it that way, should call 'Struct', but no harm... + // VarWithField - this allows for validating against each field within the struct against a specific value + // pretty handy in certain situations + if len(cf.name) > 0 { + ns = append(append(ns, cf.altName...), '.') + structNs = append(append(structNs, cf.name...), '.') + } + + v.validateStruct(ctx, parent, current, typ, ns, structNs, ct) + } + return case typeOmitEmpty: @@ -366,7 +343,7 @@ OUTER: ct = ct.next if ct == nil { - return + continue OUTER } if ct.typeof != typeOr { diff --git a/vendor/github.com/go-playground/validator/v10/validator_instance.go b/vendor/github.com/go-playground/validator/v10/validator_instance.go index d9dbf0ce..a4dbdd09 100644 --- a/vendor/github.com/go-playground/validator/v10/validator_instance.go +++ b/vendor/github.com/go-playground/validator/v10/validator_instance.go @@ -79,19 +79,20 @@ type internalValidationFuncWrapper struct { // Validate contains the validator settings and cache type Validate struct { - tagName string - pool *sync.Pool - hasCustomFuncs bool - hasTagNameFunc bool - tagNameFunc TagNameFunc - structLevelFuncs map[reflect.Type]StructLevelFuncCtx - customFuncs map[reflect.Type]CustomTypeFunc - aliases map[string]string - validations map[string]internalValidationFuncWrapper - transTagFunc map[ut.Translator]map[string]TranslationFunc // map[]map[]TranslationFunc - rules map[reflect.Type]map[string]string - tagCache *tagCache - structCache *structCache + tagName string + pool *sync.Pool + tagNameFunc TagNameFunc + structLevelFuncs map[reflect.Type]StructLevelFuncCtx + customFuncs map[reflect.Type]CustomTypeFunc + aliases map[string]string + validations map[string]internalValidationFuncWrapper + transTagFunc map[ut.Translator]map[string]TranslationFunc // map[]map[]TranslationFunc + rules map[reflect.Type]map[string]string + tagCache *tagCache + structCache *structCache + hasCustomFuncs bool + hasTagNameFunc bool + requiredStructEnabled bool } // New returns a new instance of 'validate' with sane defaults. @@ -99,7 +100,7 @@ type Validate struct { // It caches information about your struct and validations, // in essence only parsing your validation tags once per struct type. // Using multiple instances neglects the benefit of caching. -func New() *Validate { +func New(options ...Option) *Validate { tc := new(tagCache) tc.m.Store(make(map[string]*cTag)) @@ -146,6 +147,9 @@ func New() *Validate { }, } + for _, o := range options { + o(v) + } return v } diff --git a/vendor/github.com/gogf/gf/v2/LICENSE b/vendor/github.com/gogf/gf/v2/LICENSE deleted file mode 100644 index 0c20e2aa..00000000 --- a/vendor/github.com/gogf/gf/v2/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2017 john@goframe.org https://goframe.org - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/gogf/gf/v2/container/garray/garray.go b/vendor/github.com/gogf/gf/v2/container/garray/garray.go deleted file mode 100644 index 08e9ece8..00000000 --- a/vendor/github.com/gogf/gf/v2/container/garray/garray.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package garray provides most commonly used array containers which also support concurrent-safe/unsafe switch feature. -package garray diff --git a/vendor/github.com/gogf/gf/v2/container/garray/garray_func.go b/vendor/github.com/gogf/gf/v2/container/garray/garray_func.go deleted file mode 100644 index 155cca0d..00000000 --- a/vendor/github.com/gogf/gf/v2/container/garray/garray_func.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package garray - -import "strings" - -// defaultComparatorInt for int comparison. -func defaultComparatorInt(a, b int) int { - if a < b { - return -1 - } - if a > b { - return 1 - } - return 0 -} - -// defaultComparatorStr for string comparison. -func defaultComparatorStr(a, b string) int { - return strings.Compare(a, b) -} - -// quickSortInt is the quick-sorting algorithm implements for int. -func quickSortInt(values []int, comparator func(a, b int) int) { - if len(values) <= 1 { - return - } - mid, i := values[0], 1 - head, tail := 0, len(values)-1 - for head < tail { - if comparator(values[i], mid) > 0 { - values[i], values[tail] = values[tail], values[i] - tail-- - } else { - values[i], values[head] = values[head], values[i] - head++ - i++ - } - } - values[head] = mid - quickSortInt(values[:head], comparator) - quickSortInt(values[head+1:], comparator) -} - -// quickSortStr is the quick-sorting algorithm implements for string. -func quickSortStr(values []string, comparator func(a, b string) int) { - if len(values) <= 1 { - return - } - mid, i := values[0], 1 - head, tail := 0, len(values)-1 - for head < tail { - if comparator(values[i], mid) > 0 { - values[i], values[tail] = values[tail], values[i] - tail-- - } else { - values[i], values[head] = values[head], values[i] - head++ - i++ - } - } - values[head] = mid - quickSortStr(values[:head], comparator) - quickSortStr(values[head+1:], comparator) -} diff --git a/vendor/github.com/gogf/gf/v2/container/garray/garray_normal_any.go b/vendor/github.com/gogf/gf/v2/container/garray/garray_normal_any.go deleted file mode 100644 index 0ce08bda..00000000 --- a/vendor/github.com/gogf/gf/v2/container/garray/garray_normal_any.go +++ /dev/null @@ -1,870 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package garray - -import ( - "bytes" - "fmt" - "math" - "sort" - - "github.com/gogf/gf/v2/errors/gcode" - "github.com/gogf/gf/v2/errors/gerror" - "github.com/gogf/gf/v2/internal/deepcopy" - "github.com/gogf/gf/v2/internal/empty" - "github.com/gogf/gf/v2/internal/json" - "github.com/gogf/gf/v2/internal/rwmutex" - "github.com/gogf/gf/v2/text/gstr" - "github.com/gogf/gf/v2/util/gconv" - "github.com/gogf/gf/v2/util/grand" -) - -// Array is a golang array with rich features. -// It contains a concurrent-safe/unsafe switch, which should be set -// when its initialization and cannot be changed then. -type Array struct { - mu rwmutex.RWMutex - array []interface{} -} - -// New creates and returns an empty array. -// The parameter `safe` is used to specify whether using array in concurrent-safety, -// which is false in default. -func New(safe ...bool) *Array { - return NewArraySize(0, 0, safe...) -} - -// NewArray is alias of New, please see New. -func NewArray(safe ...bool) *Array { - return NewArraySize(0, 0, safe...) -} - -// NewArraySize create and returns an array with given size and cap. -// The parameter `safe` is used to specify whether using array in concurrent-safety, -// which is false in default. -func NewArraySize(size int, cap int, safe ...bool) *Array { - return &Array{ - mu: rwmutex.Create(safe...), - array: make([]interface{}, size, cap), - } -} - -// NewArrayRange creates and returns an array by a range from `start` to `end` -// with step value `step`. -func NewArrayRange(start, end, step int, safe ...bool) *Array { - if step == 0 { - panic(fmt.Sprintf(`invalid step value: %d`, step)) - } - slice := make([]interface{}, 0) - index := 0 - for i := start; i <= end; i += step { - slice = append(slice, i) - index++ - } - return NewArrayFrom(slice, safe...) -} - -// NewFrom is alias of NewArrayFrom. -// See NewArrayFrom. -func NewFrom(array []interface{}, safe ...bool) *Array { - return NewArrayFrom(array, safe...) -} - -// NewFromCopy is alias of NewArrayFromCopy. -// See NewArrayFromCopy. -func NewFromCopy(array []interface{}, safe ...bool) *Array { - return NewArrayFromCopy(array, safe...) -} - -// NewArrayFrom creates and returns an array with given slice `array`. -// The parameter `safe` is used to specify whether using array in concurrent-safety, -// which is false in default. -func NewArrayFrom(array []interface{}, safe ...bool) *Array { - return &Array{ - mu: rwmutex.Create(safe...), - array: array, - } -} - -// NewArrayFromCopy creates and returns an array from a copy of given slice `array`. -// The parameter `safe` is used to specify whether using array in concurrent-safety, -// which is false in default. -func NewArrayFromCopy(array []interface{}, safe ...bool) *Array { - newArray := make([]interface{}, len(array)) - copy(newArray, array) - return &Array{ - mu: rwmutex.Create(safe...), - array: newArray, - } -} - -// At returns the value by the specified index. -// If the given `index` is out of range of the array, it returns `nil`. -func (a *Array) At(index int) (value interface{}) { - value, _ = a.Get(index) - return -} - -// Get returns the value by the specified index. -// If the given `index` is out of range of the array, the `found` is false. -func (a *Array) Get(index int) (value interface{}, found bool) { - a.mu.RLock() - defer a.mu.RUnlock() - if index < 0 || index >= len(a.array) { - return nil, false - } - return a.array[index], true -} - -// Set sets value to specified index. -func (a *Array) Set(index int, value interface{}) error { - a.mu.Lock() - defer a.mu.Unlock() - if index < 0 || index >= len(a.array) { - return gerror.NewCodef(gcode.CodeInvalidParameter, "index %d out of array range %d", index, len(a.array)) - } - a.array[index] = value - return nil -} - -// SetArray sets the underlying slice array with the given `array`. -func (a *Array) SetArray(array []interface{}) *Array { - a.mu.Lock() - defer a.mu.Unlock() - a.array = array - return a -} - -// Replace replaces the array items by given `array` from the beginning of array. -func (a *Array) Replace(array []interface{}) *Array { - a.mu.Lock() - defer a.mu.Unlock() - max := len(array) - if max > len(a.array) { - max = len(a.array) - } - for i := 0; i < max; i++ { - a.array[i] = array[i] - } - return a -} - -// Sum returns the sum of values in an array. -func (a *Array) Sum() (sum int) { - a.mu.RLock() - defer a.mu.RUnlock() - for _, v := range a.array { - sum += gconv.Int(v) - } - return -} - -// SortFunc sorts the array by custom function `less`. -func (a *Array) SortFunc(less func(v1, v2 interface{}) bool) *Array { - a.mu.Lock() - defer a.mu.Unlock() - sort.Slice(a.array, func(i, j int) bool { - return less(a.array[i], a.array[j]) - }) - return a -} - -// InsertBefore inserts the `values` to the front of `index`. -func (a *Array) InsertBefore(index int, values ...interface{}) error { - a.mu.Lock() - defer a.mu.Unlock() - if index < 0 || index >= len(a.array) { - return gerror.NewCodef(gcode.CodeInvalidParameter, "index %d out of array range %d", index, len(a.array)) - } - rear := append([]interface{}{}, a.array[index:]...) - a.array = append(a.array[0:index], values...) - a.array = append(a.array, rear...) - return nil -} - -// InsertAfter inserts the `values` to the back of `index`. -func (a *Array) InsertAfter(index int, values ...interface{}) error { - a.mu.Lock() - defer a.mu.Unlock() - if index < 0 || index >= len(a.array) { - return gerror.NewCodef(gcode.CodeInvalidParameter, "index %d out of array range %d", index, len(a.array)) - } - rear := append([]interface{}{}, a.array[index+1:]...) - a.array = append(a.array[0:index+1], values...) - a.array = append(a.array, rear...) - return nil -} - -// Remove removes an item by index. -// If the given `index` is out of range of the array, the `found` is false. -func (a *Array) Remove(index int) (value interface{}, found bool) { - a.mu.Lock() - defer a.mu.Unlock() - return a.doRemoveWithoutLock(index) -} - -// doRemoveWithoutLock removes an item by index without lock. -func (a *Array) doRemoveWithoutLock(index int) (value interface{}, found bool) { - if index < 0 || index >= len(a.array) { - return nil, false - } - // Determine array boundaries when deleting to improve deletion efficiency. - if index == 0 { - value := a.array[0] - a.array = a.array[1:] - return value, true - } else if index == len(a.array)-1 { - value := a.array[index] - a.array = a.array[:index] - return value, true - } - // If it is a non-boundary delete, - // it will involve the creation of an array, - // then the deletion is less efficient. - value = a.array[index] - a.array = append(a.array[:index], a.array[index+1:]...) - return value, true -} - -// RemoveValue removes an item by value. -// It returns true if value is found in the array, or else false if not found. -func (a *Array) RemoveValue(value interface{}) bool { - a.mu.Lock() - defer a.mu.Unlock() - if i := a.doSearchWithoutLock(value); i != -1 { - a.doRemoveWithoutLock(i) - return true - } - return false -} - -// RemoveValues removes multiple items by `values`. -func (a *Array) RemoveValues(values ...interface{}) { - a.mu.Lock() - defer a.mu.Unlock() - for _, value := range values { - if i := a.doSearchWithoutLock(value); i != -1 { - a.doRemoveWithoutLock(i) - } - } -} - -// PushLeft pushes one or multiple items to the beginning of array. -func (a *Array) PushLeft(value ...interface{}) *Array { - a.mu.Lock() - a.array = append(value, a.array...) - a.mu.Unlock() - return a -} - -// PushRight pushes one or multiple items to the end of array. -// It equals to Append. -func (a *Array) PushRight(value ...interface{}) *Array { - a.mu.Lock() - a.array = append(a.array, value...) - a.mu.Unlock() - return a -} - -// PopRand randomly pops and return an item out of array. -// Note that if the array is empty, the `found` is false. -func (a *Array) PopRand() (value interface{}, found bool) { - a.mu.Lock() - defer a.mu.Unlock() - return a.doRemoveWithoutLock(grand.Intn(len(a.array))) -} - -// PopRands randomly pops and returns `size` items out of array. -func (a *Array) PopRands(size int) []interface{} { - a.mu.Lock() - defer a.mu.Unlock() - if size <= 0 || len(a.array) == 0 { - return nil - } - if size >= len(a.array) { - size = len(a.array) - } - array := make([]interface{}, size) - for i := 0; i < size; i++ { - array[i], _ = a.doRemoveWithoutLock(grand.Intn(len(a.array))) - } - return array -} - -// PopLeft pops and returns an item from the beginning of array. -// Note that if the array is empty, the `found` is false. -func (a *Array) PopLeft() (value interface{}, found bool) { - a.mu.Lock() - defer a.mu.Unlock() - if len(a.array) == 0 { - return nil, false - } - value = a.array[0] - a.array = a.array[1:] - return value, true -} - -// PopRight pops and returns an item from the end of array. -// Note that if the array is empty, the `found` is false. -func (a *Array) PopRight() (value interface{}, found bool) { - a.mu.Lock() - defer a.mu.Unlock() - index := len(a.array) - 1 - if index < 0 { - return nil, false - } - value = a.array[index] - a.array = a.array[:index] - return value, true -} - -// PopLefts pops and returns `size` items from the beginning of array. -func (a *Array) PopLefts(size int) []interface{} { - a.mu.Lock() - defer a.mu.Unlock() - if size <= 0 || len(a.array) == 0 { - return nil - } - if size >= len(a.array) { - array := a.array - a.array = a.array[:0] - return array - } - value := a.array[0:size] - a.array = a.array[size:] - return value -} - -// PopRights pops and returns `size` items from the end of array. -func (a *Array) PopRights(size int) []interface{} { - a.mu.Lock() - defer a.mu.Unlock() - if size <= 0 || len(a.array) == 0 { - return nil - } - index := len(a.array) - size - if index <= 0 { - array := a.array - a.array = a.array[:0] - return array - } - value := a.array[index:] - a.array = a.array[:index] - return value -} - -// Range picks and returns items by range, like array[start:end]. -// Notice, if in concurrent-safe usage, it returns a copy of slice; -// else a pointer to the underlying data. -// -// If `end` is negative, then the offset will start from the end of array. -// If `end` is omitted, then the sequence will have everything from start up -// until the end of the array. -func (a *Array) Range(start int, end ...int) []interface{} { - a.mu.RLock() - defer a.mu.RUnlock() - offsetEnd := len(a.array) - if len(end) > 0 && end[0] < offsetEnd { - offsetEnd = end[0] - } - if start > offsetEnd { - return nil - } - if start < 0 { - start = 0 - } - array := ([]interface{})(nil) - if a.mu.IsSafe() { - array = make([]interface{}, offsetEnd-start) - copy(array, a.array[start:offsetEnd]) - } else { - array = a.array[start:offsetEnd] - } - return array -} - -// SubSlice returns a slice of elements from the array as specified -// by the `offset` and `size` parameters. -// If in concurrent safe usage, it returns a copy of the slice; else a pointer. -// -// If offset is non-negative, the sequence will start at that offset in the array. -// If offset is negative, the sequence will start that far from the end of the array. -// -// If length is given and is positive, then the sequence will have up to that many elements in it. -// If the array is shorter than the length, then only the available array elements will be present. -// If length is given and is negative then the sequence will stop that many elements from the end of the array. -// If it is omitted, then the sequence will have everything from offset up until the end of the array. -// -// Any possibility crossing the left border of array, it will fail. -func (a *Array) SubSlice(offset int, length ...int) []interface{} { - a.mu.RLock() - defer a.mu.RUnlock() - size := len(a.array) - if len(length) > 0 { - size = length[0] - } - if offset > len(a.array) { - return nil - } - if offset < 0 { - offset = len(a.array) + offset - if offset < 0 { - return nil - } - } - if size < 0 { - offset += size - size = -size - if offset < 0 { - return nil - } - } - end := offset + size - if end > len(a.array) { - end = len(a.array) - size = len(a.array) - offset - } - if a.mu.IsSafe() { - s := make([]interface{}, size) - copy(s, a.array[offset:]) - return s - } else { - return a.array[offset:end] - } -} - -// Append is alias of PushRight, please See PushRight. -func (a *Array) Append(value ...interface{}) *Array { - a.PushRight(value...) - return a -} - -// Len returns the length of array. -func (a *Array) Len() int { - a.mu.RLock() - length := len(a.array) - a.mu.RUnlock() - return length -} - -// Slice returns the underlying data of array. -// Note that, if it's in concurrent-safe usage, it returns a copy of underlying data, -// or else a pointer to the underlying data. -func (a *Array) Slice() []interface{} { - if a.mu.IsSafe() { - a.mu.RLock() - defer a.mu.RUnlock() - array := make([]interface{}, len(a.array)) - copy(array, a.array) - return array - } else { - return a.array - } -} - -// Interfaces returns current array as []interface{}. -func (a *Array) Interfaces() []interface{} { - return a.Slice() -} - -// Clone returns a new array, which is a copy of current array. -func (a *Array) Clone() (newArray *Array) { - a.mu.RLock() - array := make([]interface{}, len(a.array)) - copy(array, a.array) - a.mu.RUnlock() - return NewArrayFrom(array, a.mu.IsSafe()) -} - -// Clear deletes all items of current array. -func (a *Array) Clear() *Array { - a.mu.Lock() - if len(a.array) > 0 { - a.array = make([]interface{}, 0) - } - a.mu.Unlock() - return a -} - -// Contains checks whether a value exists in the array. -func (a *Array) Contains(value interface{}) bool { - return a.Search(value) != -1 -} - -// Search searches array by `value`, returns the index of `value`, -// or returns -1 if not exists. -func (a *Array) Search(value interface{}) int { - a.mu.RLock() - defer a.mu.RUnlock() - return a.doSearchWithoutLock(value) -} - -func (a *Array) doSearchWithoutLock(value interface{}) int { - if len(a.array) == 0 { - return -1 - } - result := -1 - for index, v := range a.array { - if v == value { - result = index - break - } - } - return result -} - -// Unique uniques the array, clear repeated items. -// Example: [1,1,2,3,2] -> [1,2,3] -func (a *Array) Unique() *Array { - a.mu.Lock() - defer a.mu.Unlock() - if len(a.array) == 0 { - return a - } - var ( - ok bool - temp interface{} - uniqueSet = make(map[interface{}]struct{}) - uniqueArray = make([]interface{}, 0, len(a.array)) - ) - for i := 0; i < len(a.array); i++ { - temp = a.array[i] - if _, ok = uniqueSet[temp]; ok { - continue - } - uniqueSet[temp] = struct{}{} - uniqueArray = append(uniqueArray, temp) - } - a.array = uniqueArray - return a -} - -// LockFunc locks writing by callback function `f`. -func (a *Array) LockFunc(f func(array []interface{})) *Array { - a.mu.Lock() - defer a.mu.Unlock() - f(a.array) - return a -} - -// RLockFunc locks reading by callback function `f`. -func (a *Array) RLockFunc(f func(array []interface{})) *Array { - a.mu.RLock() - defer a.mu.RUnlock() - f(a.array) - return a -} - -// Merge merges `array` into current array. -// The parameter `array` can be any garray or slice type. -// The difference between Merge and Append is Append supports only specified slice type, -// but Merge supports more parameter types. -func (a *Array) Merge(array interface{}) *Array { - return a.Append(gconv.Interfaces(array)...) -} - -// Fill fills an array with num entries of the value `value`, -// keys starting at the `startIndex` parameter. -func (a *Array) Fill(startIndex int, num int, value interface{}) error { - a.mu.Lock() - defer a.mu.Unlock() - if startIndex < 0 || startIndex > len(a.array) { - return gerror.NewCodef(gcode.CodeInvalidParameter, "index %d out of array range %d", startIndex, len(a.array)) - } - for i := startIndex; i < startIndex+num; i++ { - if i > len(a.array)-1 { - a.array = append(a.array, value) - } else { - a.array[i] = value - } - } - return nil -} - -// Chunk splits an array into multiple arrays, -// the size of each array is determined by `size`. -// The last chunk may contain less than size elements. -func (a *Array) Chunk(size int) [][]interface{} { - if size < 1 { - return nil - } - a.mu.RLock() - defer a.mu.RUnlock() - length := len(a.array) - chunks := int(math.Ceil(float64(length) / float64(size))) - var n [][]interface{} - for i, end := 0, 0; chunks > 0; chunks-- { - end = (i + 1) * size - if end > length { - end = length - } - n = append(n, a.array[i*size:end]) - i++ - } - return n -} - -// Pad pads array to the specified length with `value`. -// If size is positive then the array is padded on the right, or negative on the left. -// If the absolute value of `size` is less than or equal to the length of the array -// then no padding takes place. -func (a *Array) Pad(size int, val interface{}) *Array { - a.mu.Lock() - defer a.mu.Unlock() - if size == 0 || (size > 0 && size < len(a.array)) || (size < 0 && size > -len(a.array)) { - return a - } - n := size - if size < 0 { - n = -size - } - n -= len(a.array) - tmp := make([]interface{}, n) - for i := 0; i < n; i++ { - tmp[i] = val - } - if size > 0 { - a.array = append(a.array, tmp...) - } else { - a.array = append(tmp, a.array...) - } - return a -} - -// Rand randomly returns one item from array(no deleting). -func (a *Array) Rand() (value interface{}, found bool) { - a.mu.RLock() - defer a.mu.RUnlock() - if len(a.array) == 0 { - return nil, false - } - return a.array[grand.Intn(len(a.array))], true -} - -// Rands randomly returns `size` items from array(no deleting). -func (a *Array) Rands(size int) []interface{} { - a.mu.RLock() - defer a.mu.RUnlock() - if size <= 0 || len(a.array) == 0 { - return nil - } - array := make([]interface{}, size) - for i := 0; i < size; i++ { - array[i] = a.array[grand.Intn(len(a.array))] - } - return array -} - -// Shuffle randomly shuffles the array. -func (a *Array) Shuffle() *Array { - a.mu.Lock() - defer a.mu.Unlock() - for i, v := range grand.Perm(len(a.array)) { - a.array[i], a.array[v] = a.array[v], a.array[i] - } - return a -} - -// Reverse makes array with elements in reverse order. -func (a *Array) Reverse() *Array { - a.mu.Lock() - defer a.mu.Unlock() - for i, j := 0, len(a.array)-1; i < j; i, j = i+1, j-1 { - a.array[i], a.array[j] = a.array[j], a.array[i] - } - return a -} - -// Join joins array elements with a string `glue`. -func (a *Array) Join(glue string) string { - a.mu.RLock() - defer a.mu.RUnlock() - if len(a.array) == 0 { - return "" - } - buffer := bytes.NewBuffer(nil) - for k, v := range a.array { - buffer.WriteString(gconv.String(v)) - if k != len(a.array)-1 { - buffer.WriteString(glue) - } - } - return buffer.String() -} - -// CountValues counts the number of occurrences of all values in the array. -func (a *Array) CountValues() map[interface{}]int { - m := make(map[interface{}]int) - a.mu.RLock() - defer a.mu.RUnlock() - for _, v := range a.array { - m[v]++ - } - return m -} - -// Iterator is alias of IteratorAsc. -func (a *Array) Iterator(f func(k int, v interface{}) bool) { - a.IteratorAsc(f) -} - -// IteratorAsc iterates the array readonly in ascending order with given callback function `f`. -// If `f` returns true, then it continues iterating; or false to stop. -func (a *Array) IteratorAsc(f func(k int, v interface{}) bool) { - a.mu.RLock() - defer a.mu.RUnlock() - for k, v := range a.array { - if !f(k, v) { - break - } - } -} - -// IteratorDesc iterates the array readonly in descending order with given callback function `f`. -// If `f` returns true, then it continues iterating; or false to stop. -func (a *Array) IteratorDesc(f func(k int, v interface{}) bool) { - a.mu.RLock() - defer a.mu.RUnlock() - for i := len(a.array) - 1; i >= 0; i-- { - if !f(i, a.array[i]) { - break - } - } -} - -// String returns current array as a string, which implements like json.Marshal does. -func (a *Array) String() string { - if a == nil { - return "" - } - a.mu.RLock() - defer a.mu.RUnlock() - buffer := bytes.NewBuffer(nil) - buffer.WriteByte('[') - s := "" - for k, v := range a.array { - s = gconv.String(v) - if gstr.IsNumeric(s) { - buffer.WriteString(s) - } else { - buffer.WriteString(`"` + gstr.QuoteMeta(s, `"\`) + `"`) - } - if k != len(a.array)-1 { - buffer.WriteByte(',') - } - } - buffer.WriteByte(']') - return buffer.String() -} - -// MarshalJSON implements the interface MarshalJSON for json.Marshal. -// Note that do not use pointer as its receiver here. -func (a Array) MarshalJSON() ([]byte, error) { - a.mu.RLock() - defer a.mu.RUnlock() - return json.Marshal(a.array) -} - -// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. -func (a *Array) UnmarshalJSON(b []byte) error { - if a.array == nil { - a.array = make([]interface{}, 0) - } - a.mu.Lock() - defer a.mu.Unlock() - if err := json.UnmarshalUseNumber(b, &a.array); err != nil { - return err - } - return nil -} - -// UnmarshalValue is an interface implement which sets any type of value for array. -func (a *Array) UnmarshalValue(value interface{}) error { - a.mu.Lock() - defer a.mu.Unlock() - switch value.(type) { - case string, []byte: - return json.UnmarshalUseNumber(gconv.Bytes(value), &a.array) - default: - a.array = gconv.SliceAny(value) - } - return nil -} - -// Filter iterates array and filters elements using custom callback function. -// It removes the element from array if callback function `filter` returns true, -// it or else does nothing and continues iterating. -func (a *Array) Filter(filter func(index int, value interface{}) bool) *Array { - a.mu.Lock() - defer a.mu.Unlock() - for i := 0; i < len(a.array); { - if filter(i, a.array[i]) { - a.array = append(a.array[:i], a.array[i+1:]...) - } else { - i++ - } - } - return a -} - -// FilterNil removes all nil value of the array. -func (a *Array) FilterNil() *Array { - a.mu.Lock() - defer a.mu.Unlock() - for i := 0; i < len(a.array); { - if empty.IsNil(a.array[i]) { - a.array = append(a.array[:i], a.array[i+1:]...) - } else { - i++ - } - } - return a -} - -// FilterEmpty removes all empty value of the array. -// Values like: 0, nil, false, "", len(slice/map/chan) == 0 are considered empty. -func (a *Array) FilterEmpty() *Array { - a.mu.Lock() - defer a.mu.Unlock() - for i := 0; i < len(a.array); { - if empty.IsEmpty(a.array[i]) { - a.array = append(a.array[:i], a.array[i+1:]...) - } else { - i++ - } - } - return a -} - -// Walk applies a user supplied function `f` to every item of array. -func (a *Array) Walk(f func(value interface{}) interface{}) *Array { - a.mu.Lock() - defer a.mu.Unlock() - for i, v := range a.array { - a.array[i] = f(v) - } - return a -} - -// IsEmpty checks whether the array is empty. -func (a *Array) IsEmpty() bool { - return a.Len() == 0 -} - -// DeepCopy implements interface for deep copy of current type. -func (a *Array) DeepCopy() interface{} { - if a == nil { - return nil - } - a.mu.RLock() - defer a.mu.RUnlock() - newSlice := make([]interface{}, len(a.array)) - for i, v := range a.array { - newSlice[i] = deepcopy.Copy(v) - } - return NewArrayFrom(newSlice, a.mu.IsSafe()) -} diff --git a/vendor/github.com/gogf/gf/v2/container/garray/garray_normal_int.go b/vendor/github.com/gogf/gf/v2/container/garray/garray_normal_int.go deleted file mode 100644 index a1473317..00000000 --- a/vendor/github.com/gogf/gf/v2/container/garray/garray_normal_int.go +++ /dev/null @@ -1,846 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package garray - -import ( - "bytes" - "fmt" - "math" - "sort" - - "github.com/gogf/gf/v2/errors/gcode" - "github.com/gogf/gf/v2/errors/gerror" - "github.com/gogf/gf/v2/internal/json" - "github.com/gogf/gf/v2/internal/rwmutex" - "github.com/gogf/gf/v2/util/gconv" - "github.com/gogf/gf/v2/util/grand" -) - -// IntArray is a golang int array with rich features. -// It contains a concurrent-safe/unsafe switch, which should be set -// when its initialization and cannot be changed then. -type IntArray struct { - mu rwmutex.RWMutex - array []int -} - -// NewIntArray creates and returns an empty array. -// The parameter `safe` is used to specify whether using array in concurrent-safety, -// which is false in default. -func NewIntArray(safe ...bool) *IntArray { - return NewIntArraySize(0, 0, safe...) -} - -// NewIntArraySize create and returns an array with given size and cap. -// The parameter `safe` is used to specify whether using array in concurrent-safety, -// which is false in default. -func NewIntArraySize(size int, cap int, safe ...bool) *IntArray { - return &IntArray{ - mu: rwmutex.Create(safe...), - array: make([]int, size, cap), - } -} - -// NewIntArrayRange creates and returns an array by a range from `start` to `end` -// with step value `step`. -func NewIntArrayRange(start, end, step int, safe ...bool) *IntArray { - if step == 0 { - panic(fmt.Sprintf(`invalid step value: %d`, step)) - } - slice := make([]int, 0) - index := 0 - for i := start; i <= end; i += step { - slice = append(slice, i) - index++ - } - return NewIntArrayFrom(slice, safe...) -} - -// NewIntArrayFrom creates and returns an array with given slice `array`. -// The parameter `safe` is used to specify whether using array in concurrent-safety, -// which is false in default. -func NewIntArrayFrom(array []int, safe ...bool) *IntArray { - return &IntArray{ - mu: rwmutex.Create(safe...), - array: array, - } -} - -// NewIntArrayFromCopy creates and returns an array from a copy of given slice `array`. -// The parameter `safe` is used to specify whether using array in concurrent-safety, -// which is false in default. -func NewIntArrayFromCopy(array []int, safe ...bool) *IntArray { - newArray := make([]int, len(array)) - copy(newArray, array) - return &IntArray{ - mu: rwmutex.Create(safe...), - array: newArray, - } -} - -// At returns the value by the specified index. -// If the given `index` is out of range of the array, it returns `0`. -func (a *IntArray) At(index int) (value int) { - value, _ = a.Get(index) - return -} - -// Get returns the value by the specified index. -// If the given `index` is out of range of the array, the `found` is false. -func (a *IntArray) Get(index int) (value int, found bool) { - a.mu.RLock() - defer a.mu.RUnlock() - if index < 0 || index >= len(a.array) { - return 0, false - } - return a.array[index], true -} - -// Set sets value to specified index. -func (a *IntArray) Set(index int, value int) error { - a.mu.Lock() - defer a.mu.Unlock() - if index < 0 || index >= len(a.array) { - return gerror.NewCodef(gcode.CodeInvalidParameter, "index %d out of array range %d", index, len(a.array)) - } - a.array[index] = value - return nil -} - -// SetArray sets the underlying slice array with the given `array`. -func (a *IntArray) SetArray(array []int) *IntArray { - a.mu.Lock() - defer a.mu.Unlock() - a.array = array - return a -} - -// Replace replaces the array items by given `array` from the beginning of array. -func (a *IntArray) Replace(array []int) *IntArray { - a.mu.Lock() - defer a.mu.Unlock() - max := len(array) - if max > len(a.array) { - max = len(a.array) - } - for i := 0; i < max; i++ { - a.array[i] = array[i] - } - return a -} - -// Sum returns the sum of values in an array. -func (a *IntArray) Sum() (sum int) { - a.mu.RLock() - defer a.mu.RUnlock() - for _, v := range a.array { - sum += v - } - return -} - -// Sort sorts the array in increasing order. -// The parameter `reverse` controls whether sort in increasing order(default) or decreasing order. -func (a *IntArray) Sort(reverse ...bool) *IntArray { - a.mu.Lock() - defer a.mu.Unlock() - if len(reverse) > 0 && reverse[0] { - sort.Slice(a.array, func(i, j int) bool { - return a.array[i] >= a.array[j] - }) - } else { - sort.Ints(a.array) - } - return a -} - -// SortFunc sorts the array by custom function `less`. -func (a *IntArray) SortFunc(less func(v1, v2 int) bool) *IntArray { - a.mu.Lock() - defer a.mu.Unlock() - sort.Slice(a.array, func(i, j int) bool { - return less(a.array[i], a.array[j]) - }) - return a -} - -// InsertBefore inserts the `values` to the front of `index`. -func (a *IntArray) InsertBefore(index int, values ...int) error { - a.mu.Lock() - defer a.mu.Unlock() - if index < 0 || index >= len(a.array) { - return gerror.NewCodef(gcode.CodeInvalidParameter, "index %d out of array range %d", index, len(a.array)) - } - rear := append([]int{}, a.array[index:]...) - a.array = append(a.array[0:index], values...) - a.array = append(a.array, rear...) - return nil -} - -// InsertAfter inserts the `value` to the back of `index`. -func (a *IntArray) InsertAfter(index int, values ...int) error { - a.mu.Lock() - defer a.mu.Unlock() - if index < 0 || index >= len(a.array) { - return gerror.NewCodef(gcode.CodeInvalidParameter, "index %d out of array range %d", index, len(a.array)) - } - rear := append([]int{}, a.array[index+1:]...) - a.array = append(a.array[0:index+1], values...) - a.array = append(a.array, rear...) - return nil -} - -// Remove removes an item by index. -// If the given `index` is out of range of the array, the `found` is false. -func (a *IntArray) Remove(index int) (value int, found bool) { - a.mu.Lock() - defer a.mu.Unlock() - return a.doRemoveWithoutLock(index) -} - -// doRemoveWithoutLock removes an item by index without lock. -func (a *IntArray) doRemoveWithoutLock(index int) (value int, found bool) { - if index < 0 || index >= len(a.array) { - return 0, false - } - // Determine array boundaries when deleting to improve deletion efficiency. - if index == 0 { - value := a.array[0] - a.array = a.array[1:] - return value, true - } else if index == len(a.array)-1 { - value := a.array[index] - a.array = a.array[:index] - return value, true - } - // If it is a non-boundary delete, - // it will involve the creation of an array, - // then the deletion is less efficient. - value = a.array[index] - a.array = append(a.array[:index], a.array[index+1:]...) - return value, true -} - -// RemoveValue removes an item by value. -// It returns true if value is found in the array, or else false if not found. -func (a *IntArray) RemoveValue(value int) bool { - a.mu.Lock() - defer a.mu.Unlock() - if i := a.doSearchWithoutLock(value); i != -1 { - a.doRemoveWithoutLock(i) - return true - } - return false -} - -// RemoveValues removes multiple items by `values`. -func (a *IntArray) RemoveValues(values ...int) { - a.mu.Lock() - defer a.mu.Unlock() - for _, value := range values { - if i := a.doSearchWithoutLock(value); i != -1 { - a.doRemoveWithoutLock(i) - } - } -} - -// PushLeft pushes one or multiple items to the beginning of array. -func (a *IntArray) PushLeft(value ...int) *IntArray { - a.mu.Lock() - a.array = append(value, a.array...) - a.mu.Unlock() - return a -} - -// PushRight pushes one or multiple items to the end of array. -// It equals to Append. -func (a *IntArray) PushRight(value ...int) *IntArray { - a.mu.Lock() - a.array = append(a.array, value...) - a.mu.Unlock() - return a -} - -// PopLeft pops and returns an item from the beginning of array. -// Note that if the array is empty, the `found` is false. -func (a *IntArray) PopLeft() (value int, found bool) { - a.mu.Lock() - defer a.mu.Unlock() - if len(a.array) == 0 { - return 0, false - } - value = a.array[0] - a.array = a.array[1:] - return value, true -} - -// PopRight pops and returns an item from the end of array. -// Note that if the array is empty, the `found` is false. -func (a *IntArray) PopRight() (value int, found bool) { - a.mu.Lock() - defer a.mu.Unlock() - index := len(a.array) - 1 - if index < 0 { - return 0, false - } - value = a.array[index] - a.array = a.array[:index] - return value, true -} - -// PopRand randomly pops and return an item out of array. -// Note that if the array is empty, the `found` is false. -func (a *IntArray) PopRand() (value int, found bool) { - a.mu.Lock() - defer a.mu.Unlock() - return a.doRemoveWithoutLock(grand.Intn(len(a.array))) -} - -// PopRands randomly pops and returns `size` items out of array. -// If the given `size` is greater than size of the array, it returns all elements of the array. -// Note that if given `size` <= 0 or the array is empty, it returns nil. -func (a *IntArray) PopRands(size int) []int { - a.mu.Lock() - defer a.mu.Unlock() - if size <= 0 || len(a.array) == 0 { - return nil - } - if size >= len(a.array) { - size = len(a.array) - } - array := make([]int, size) - for i := 0; i < size; i++ { - array[i], _ = a.doRemoveWithoutLock(grand.Intn(len(a.array))) - } - return array -} - -// PopLefts pops and returns `size` items from the beginning of array. -// If the given `size` is greater than size of the array, it returns all elements of the array. -// Note that if given `size` <= 0 or the array is empty, it returns nil. -func (a *IntArray) PopLefts(size int) []int { - a.mu.Lock() - defer a.mu.Unlock() - if size <= 0 || len(a.array) == 0 { - return nil - } - if size >= len(a.array) { - array := a.array - a.array = a.array[:0] - return array - } - value := a.array[0:size] - a.array = a.array[size:] - return value -} - -// PopRights pops and returns `size` items from the end of array. -// If the given `size` is greater than size of the array, it returns all elements of the array. -// Note that if given `size` <= 0 or the array is empty, it returns nil. -func (a *IntArray) PopRights(size int) []int { - a.mu.Lock() - defer a.mu.Unlock() - if size <= 0 || len(a.array) == 0 { - return nil - } - index := len(a.array) - size - if index <= 0 { - array := a.array - a.array = a.array[:0] - return array - } - value := a.array[index:] - a.array = a.array[:index] - return value -} - -// Range picks and returns items by range, like array[start:end]. -// Notice, if in concurrent-safe usage, it returns a copy of slice; -// else a pointer to the underlying data. -// -// If `end` is negative, then the offset will start from the end of array. -// If `end` is omitted, then the sequence will have everything from start up -// until the end of the array. -func (a *IntArray) Range(start int, end ...int) []int { - a.mu.RLock() - defer a.mu.RUnlock() - offsetEnd := len(a.array) - if len(end) > 0 && end[0] < offsetEnd { - offsetEnd = end[0] - } - if start > offsetEnd { - return nil - } - if start < 0 { - start = 0 - } - array := ([]int)(nil) - if a.mu.IsSafe() { - array = make([]int, offsetEnd-start) - copy(array, a.array[start:offsetEnd]) - } else { - array = a.array[start:offsetEnd] - } - return array -} - -// SubSlice returns a slice of elements from the array as specified -// by the `offset` and `size` parameters. -// If in concurrent safe usage, it returns a copy of the slice; else a pointer. -// -// If offset is non-negative, the sequence will start at that offset in the array. -// If offset is negative, the sequence will start that far from the end of the array. -// -// If length is given and is positive, then the sequence will have up to that many elements in it. -// If the array is shorter than the length, then only the available array elements will be present. -// If length is given and is negative then the sequence will stop that many elements from the end of the array. -// If it is omitted, then the sequence will have everything from offset up until the end of the array. -// -// Any possibility crossing the left border of array, it will fail. -func (a *IntArray) SubSlice(offset int, length ...int) []int { - a.mu.RLock() - defer a.mu.RUnlock() - size := len(a.array) - if len(length) > 0 { - size = length[0] - } - if offset > len(a.array) { - return nil - } - if offset < 0 { - offset = len(a.array) + offset - if offset < 0 { - return nil - } - } - if size < 0 { - offset += size - size = -size - if offset < 0 { - return nil - } - } - end := offset + size - if end > len(a.array) { - end = len(a.array) - size = len(a.array) - offset - } - if a.mu.IsSafe() { - s := make([]int, size) - copy(s, a.array[offset:]) - return s - } else { - return a.array[offset:end] - } -} - -// Append is alias of PushRight,please See PushRight. -func (a *IntArray) Append(value ...int) *IntArray { - a.mu.Lock() - a.array = append(a.array, value...) - a.mu.Unlock() - return a -} - -// Len returns the length of array. -func (a *IntArray) Len() int { - a.mu.RLock() - length := len(a.array) - a.mu.RUnlock() - return length -} - -// Slice returns the underlying data of array. -// Note that, if it's in concurrent-safe usage, it returns a copy of underlying data, -// or else a pointer to the underlying data. -func (a *IntArray) Slice() []int { - array := ([]int)(nil) - if a.mu.IsSafe() { - a.mu.RLock() - defer a.mu.RUnlock() - array = make([]int, len(a.array)) - copy(array, a.array) - } else { - array = a.array - } - return array -} - -// Interfaces returns current array as []interface{}. -func (a *IntArray) Interfaces() []interface{} { - a.mu.RLock() - defer a.mu.RUnlock() - array := make([]interface{}, len(a.array)) - for k, v := range a.array { - array[k] = v - } - return array -} - -// Clone returns a new array, which is a copy of current array. -func (a *IntArray) Clone() (newArray *IntArray) { - a.mu.RLock() - array := make([]int, len(a.array)) - copy(array, a.array) - a.mu.RUnlock() - return NewIntArrayFrom(array, a.mu.IsSafe()) -} - -// Clear deletes all items of current array. -func (a *IntArray) Clear() *IntArray { - a.mu.Lock() - if len(a.array) > 0 { - a.array = make([]int, 0) - } - a.mu.Unlock() - return a -} - -// Contains checks whether a value exists in the array. -func (a *IntArray) Contains(value int) bool { - return a.Search(value) != -1 -} - -// Search searches array by `value`, returns the index of `value`, -// or returns -1 if not exists. -func (a *IntArray) Search(value int) int { - a.mu.RLock() - defer a.mu.RUnlock() - return a.doSearchWithoutLock(value) -} - -func (a *IntArray) doSearchWithoutLock(value int) int { - if len(a.array) == 0 { - return -1 - } - result := -1 - for index, v := range a.array { - if v == value { - result = index - break - } - } - return result -} - -// Unique uniques the array, clear repeated items. -// Example: [1,1,2,3,2] -> [1,2,3] -func (a *IntArray) Unique() *IntArray { - a.mu.Lock() - defer a.mu.Unlock() - if len(a.array) == 0 { - return a - } - var ( - ok bool - temp int - uniqueSet = make(map[int]struct{}) - uniqueArray = make([]int, 0, len(a.array)) - ) - for i := 0; i < len(a.array); i++ { - temp = a.array[i] - if _, ok = uniqueSet[temp]; ok { - continue - } - uniqueSet[temp] = struct{}{} - uniqueArray = append(uniqueArray, temp) - } - a.array = uniqueArray - return a -} - -// LockFunc locks writing by callback function `f`. -func (a *IntArray) LockFunc(f func(array []int)) *IntArray { - a.mu.Lock() - defer a.mu.Unlock() - f(a.array) - return a -} - -// RLockFunc locks reading by callback function `f`. -func (a *IntArray) RLockFunc(f func(array []int)) *IntArray { - a.mu.RLock() - defer a.mu.RUnlock() - f(a.array) - return a -} - -// Merge merges `array` into current array. -// The parameter `array` can be any garray or slice type. -// The difference between Merge and Append is Append supports only specified slice type, -// but Merge supports more parameter types. -func (a *IntArray) Merge(array interface{}) *IntArray { - return a.Append(gconv.Ints(array)...) -} - -// Fill fills an array with num entries of the value `value`, -// keys starting at the `startIndex` parameter. -func (a *IntArray) Fill(startIndex int, num int, value int) error { - a.mu.Lock() - defer a.mu.Unlock() - if startIndex < 0 || startIndex > len(a.array) { - return gerror.NewCodef(gcode.CodeInvalidParameter, "index %d out of array range %d", startIndex, len(a.array)) - } - for i := startIndex; i < startIndex+num; i++ { - if i > len(a.array)-1 { - a.array = append(a.array, value) - } else { - a.array[i] = value - } - } - return nil -} - -// Chunk splits an array into multiple arrays, -// the size of each array is determined by `size`. -// The last chunk may contain less than size elements. -func (a *IntArray) Chunk(size int) [][]int { - if size < 1 { - return nil - } - a.mu.RLock() - defer a.mu.RUnlock() - length := len(a.array) - chunks := int(math.Ceil(float64(length) / float64(size))) - var n [][]int - for i, end := 0, 0; chunks > 0; chunks-- { - end = (i + 1) * size - if end > length { - end = length - } - n = append(n, a.array[i*size:end]) - i++ - } - return n -} - -// Pad pads array to the specified length with `value`. -// If size is positive then the array is padded on the right, or negative on the left. -// If the absolute value of `size` is less than or equal to the length of the array -// then no padding takes place. -func (a *IntArray) Pad(size int, value int) *IntArray { - a.mu.Lock() - defer a.mu.Unlock() - if size == 0 || (size > 0 && size < len(a.array)) || (size < 0 && size > -len(a.array)) { - return a - } - n := size - if size < 0 { - n = -size - } - n -= len(a.array) - tmp := make([]int, n) - for i := 0; i < n; i++ { - tmp[i] = value - } - if size > 0 { - a.array = append(a.array, tmp...) - } else { - a.array = append(tmp, a.array...) - } - return a -} - -// Rand randomly returns one item from array(no deleting). -func (a *IntArray) Rand() (value int, found bool) { - a.mu.RLock() - defer a.mu.RUnlock() - if len(a.array) == 0 { - return 0, false - } - return a.array[grand.Intn(len(a.array))], true -} - -// Rands randomly returns `size` items from array(no deleting). -func (a *IntArray) Rands(size int) []int { - a.mu.RLock() - defer a.mu.RUnlock() - if size <= 0 || len(a.array) == 0 { - return nil - } - array := make([]int, size) - for i := 0; i < size; i++ { - array[i] = a.array[grand.Intn(len(a.array))] - } - return array -} - -// Shuffle randomly shuffles the array. -func (a *IntArray) Shuffle() *IntArray { - a.mu.Lock() - defer a.mu.Unlock() - for i, v := range grand.Perm(len(a.array)) { - a.array[i], a.array[v] = a.array[v], a.array[i] - } - return a -} - -// Reverse makes array with elements in reverse order. -func (a *IntArray) Reverse() *IntArray { - a.mu.Lock() - defer a.mu.Unlock() - for i, j := 0, len(a.array)-1; i < j; i, j = i+1, j-1 { - a.array[i], a.array[j] = a.array[j], a.array[i] - } - return a -} - -// Join joins array elements with a string `glue`. -func (a *IntArray) Join(glue string) string { - a.mu.RLock() - defer a.mu.RUnlock() - if len(a.array) == 0 { - return "" - } - buffer := bytes.NewBuffer(nil) - for k, v := range a.array { - buffer.WriteString(gconv.String(v)) - if k != len(a.array)-1 { - buffer.WriteString(glue) - } - } - return buffer.String() -} - -// CountValues counts the number of occurrences of all values in the array. -func (a *IntArray) CountValues() map[int]int { - m := make(map[int]int) - a.mu.RLock() - defer a.mu.RUnlock() - for _, v := range a.array { - m[v]++ - } - return m -} - -// Iterator is alias of IteratorAsc. -func (a *IntArray) Iterator(f func(k int, v int) bool) { - a.IteratorAsc(f) -} - -// IteratorAsc iterates the array readonly in ascending order with given callback function `f`. -// If `f` returns true, then it continues iterating; or false to stop. -func (a *IntArray) IteratorAsc(f func(k int, v int) bool) { - a.mu.RLock() - defer a.mu.RUnlock() - for k, v := range a.array { - if !f(k, v) { - break - } - } -} - -// IteratorDesc iterates the array readonly in descending order with given callback function `f`. -// If `f` returns true, then it continues iterating; or false to stop. -func (a *IntArray) IteratorDesc(f func(k int, v int) bool) { - a.mu.RLock() - defer a.mu.RUnlock() - for i := len(a.array) - 1; i >= 0; i-- { - if !f(i, a.array[i]) { - break - } - } -} - -// String returns current array as a string, which implements like json.Marshal does. -func (a *IntArray) String() string { - if a == nil { - return "" - } - return "[" + a.Join(",") + "]" -} - -// MarshalJSON implements the interface MarshalJSON for json.Marshal. -// Note that do not use pointer as its receiver here. -func (a IntArray) MarshalJSON() ([]byte, error) { - a.mu.RLock() - defer a.mu.RUnlock() - return json.Marshal(a.array) -} - -// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. -func (a *IntArray) UnmarshalJSON(b []byte) error { - if a.array == nil { - a.array = make([]int, 0) - } - a.mu.Lock() - defer a.mu.Unlock() - if err := json.UnmarshalUseNumber(b, &a.array); err != nil { - return err - } - return nil -} - -// UnmarshalValue is an interface implement which sets any type of value for array. -func (a *IntArray) UnmarshalValue(value interface{}) error { - a.mu.Lock() - defer a.mu.Unlock() - switch value.(type) { - case string, []byte: - return json.UnmarshalUseNumber(gconv.Bytes(value), &a.array) - default: - a.array = gconv.SliceInt(value) - } - return nil -} - -// Filter iterates array and filters elements using custom callback function. -// It removes the element from array if callback function `filter` returns true, -// it or else does nothing and continues iterating. -func (a *IntArray) Filter(filter func(index int, value int) bool) *IntArray { - a.mu.Lock() - defer a.mu.Unlock() - for i := 0; i < len(a.array); { - if filter(i, a.array[i]) { - a.array = append(a.array[:i], a.array[i+1:]...) - } else { - i++ - } - } - return a -} - -// FilterEmpty removes all zero value of the array. -func (a *IntArray) FilterEmpty() *IntArray { - a.mu.Lock() - defer a.mu.Unlock() - for i := 0; i < len(a.array); { - if a.array[i] == 0 { - a.array = append(a.array[:i], a.array[i+1:]...) - } else { - i++ - } - } - return a -} - -// Walk applies a user supplied function `f` to every item of array. -func (a *IntArray) Walk(f func(value int) int) *IntArray { - a.mu.Lock() - defer a.mu.Unlock() - for i, v := range a.array { - a.array[i] = f(v) - } - return a -} - -// IsEmpty checks whether the array is empty. -func (a *IntArray) IsEmpty() bool { - return a.Len() == 0 -} - -// DeepCopy implements interface for deep copy of current type. -func (a *IntArray) DeepCopy() interface{} { - if a == nil { - return nil - } - a.mu.RLock() - defer a.mu.RUnlock() - newSlice := make([]int, len(a.array)) - copy(newSlice, a.array) - return NewIntArrayFrom(newSlice, a.mu.IsSafe()) -} diff --git a/vendor/github.com/gogf/gf/v2/container/garray/garray_normal_str.go b/vendor/github.com/gogf/gf/v2/container/garray/garray_normal_str.go deleted file mode 100644 index 55921596..00000000 --- a/vendor/github.com/gogf/gf/v2/container/garray/garray_normal_str.go +++ /dev/null @@ -1,857 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package garray - -import ( - "bytes" - "math" - "sort" - "strings" - - "github.com/gogf/gf/v2/errors/gcode" - "github.com/gogf/gf/v2/errors/gerror" - "github.com/gogf/gf/v2/internal/json" - "github.com/gogf/gf/v2/internal/rwmutex" - "github.com/gogf/gf/v2/text/gstr" - "github.com/gogf/gf/v2/util/gconv" - "github.com/gogf/gf/v2/util/grand" -) - -// StrArray is a golang string array with rich features. -// It contains a concurrent-safe/unsafe switch, which should be set -// when its initialization and cannot be changed then. -type StrArray struct { - mu rwmutex.RWMutex - array []string -} - -// NewStrArray creates and returns an empty array. -// The parameter `safe` is used to specify whether using array in concurrent-safety, -// which is false in default. -func NewStrArray(safe ...bool) *StrArray { - return NewStrArraySize(0, 0, safe...) -} - -// NewStrArraySize create and returns an array with given size and cap. -// The parameter `safe` is used to specify whether using array in concurrent-safety, -// which is false in default. -func NewStrArraySize(size int, cap int, safe ...bool) *StrArray { - return &StrArray{ - mu: rwmutex.Create(safe...), - array: make([]string, size, cap), - } -} - -// NewStrArrayFrom creates and returns an array with given slice `array`. -// The parameter `safe` is used to specify whether using array in concurrent-safety, -// which is false in default. -func NewStrArrayFrom(array []string, safe ...bool) *StrArray { - return &StrArray{ - mu: rwmutex.Create(safe...), - array: array, - } -} - -// NewStrArrayFromCopy creates and returns an array from a copy of given slice `array`. -// The parameter `safe` is used to specify whether using array in concurrent-safety, -// which is false in default. -func NewStrArrayFromCopy(array []string, safe ...bool) *StrArray { - newArray := make([]string, len(array)) - copy(newArray, array) - return &StrArray{ - mu: rwmutex.Create(safe...), - array: newArray, - } -} - -// At returns the value by the specified index. -// If the given `index` is out of range of the array, it returns an empty string. -func (a *StrArray) At(index int) (value string) { - value, _ = a.Get(index) - return -} - -// Get returns the value by the specified index. -// If the given `index` is out of range of the array, the `found` is false. -func (a *StrArray) Get(index int) (value string, found bool) { - a.mu.RLock() - defer a.mu.RUnlock() - if index < 0 || index >= len(a.array) { - return "", false - } - return a.array[index], true -} - -// Set sets value to specified index. -func (a *StrArray) Set(index int, value string) error { - a.mu.Lock() - defer a.mu.Unlock() - if index < 0 || index >= len(a.array) { - return gerror.NewCodef(gcode.CodeInvalidParameter, "index %d out of array range %d", index, len(a.array)) - } - a.array[index] = value - return nil -} - -// SetArray sets the underlying slice array with the given `array`. -func (a *StrArray) SetArray(array []string) *StrArray { - a.mu.Lock() - defer a.mu.Unlock() - a.array = array - return a -} - -// Replace replaces the array items by given `array` from the beginning of array. -func (a *StrArray) Replace(array []string) *StrArray { - a.mu.Lock() - defer a.mu.Unlock() - max := len(array) - if max > len(a.array) { - max = len(a.array) - } - for i := 0; i < max; i++ { - a.array[i] = array[i] - } - return a -} - -// Sum returns the sum of values in an array. -func (a *StrArray) Sum() (sum int) { - a.mu.RLock() - defer a.mu.RUnlock() - for _, v := range a.array { - sum += gconv.Int(v) - } - return -} - -// Sort sorts the array in increasing order. -// The parameter `reverse` controls whether sort -// in increasing order(default) or decreasing order -func (a *StrArray) Sort(reverse ...bool) *StrArray { - a.mu.Lock() - defer a.mu.Unlock() - if len(reverse) > 0 && reverse[0] { - sort.Slice(a.array, func(i, j int) bool { - return strings.Compare(a.array[i], a.array[j]) >= 0 - }) - } else { - sort.Strings(a.array) - } - return a -} - -// SortFunc sorts the array by custom function `less`. -func (a *StrArray) SortFunc(less func(v1, v2 string) bool) *StrArray { - a.mu.Lock() - defer a.mu.Unlock() - sort.Slice(a.array, func(i, j int) bool { - return less(a.array[i], a.array[j]) - }) - return a -} - -// InsertBefore inserts the `values` to the front of `index`. -func (a *StrArray) InsertBefore(index int, values ...string) error { - a.mu.Lock() - defer a.mu.Unlock() - if index < 0 || index >= len(a.array) { - return gerror.NewCodef(gcode.CodeInvalidParameter, "index %d out of array range %d", index, len(a.array)) - } - rear := append([]string{}, a.array[index:]...) - a.array = append(a.array[0:index], values...) - a.array = append(a.array, rear...) - return nil -} - -// InsertAfter inserts the `values` to the back of `index`. -func (a *StrArray) InsertAfter(index int, values ...string) error { - a.mu.Lock() - defer a.mu.Unlock() - if index < 0 || index >= len(a.array) { - return gerror.NewCodef(gcode.CodeInvalidParameter, "index %d out of array range %d", index, len(a.array)) - } - rear := append([]string{}, a.array[index+1:]...) - a.array = append(a.array[0:index+1], values...) - a.array = append(a.array, rear...) - return nil -} - -// Remove removes an item by index. -// If the given `index` is out of range of the array, the `found` is false. -func (a *StrArray) Remove(index int) (value string, found bool) { - a.mu.Lock() - defer a.mu.Unlock() - return a.doRemoveWithoutLock(index) -} - -// doRemoveWithoutLock removes an item by index without lock. -func (a *StrArray) doRemoveWithoutLock(index int) (value string, found bool) { - if index < 0 || index >= len(a.array) { - return "", false - } - // Determine array boundaries when deleting to improve deletion efficiency. - if index == 0 { - value := a.array[0] - a.array = a.array[1:] - return value, true - } else if index == len(a.array)-1 { - value := a.array[index] - a.array = a.array[:index] - return value, true - } - // If it is a non-boundary delete, - // it will involve the creation of an array, - // then the deletion is less efficient. - value = a.array[index] - a.array = append(a.array[:index], a.array[index+1:]...) - return value, true -} - -// RemoveValue removes an item by value. -// It returns true if value is found in the array, or else false if not found. -func (a *StrArray) RemoveValue(value string) bool { - if i := a.Search(value); i != -1 { - _, found := a.Remove(i) - return found - } - return false -} - -// RemoveValues removes multiple items by `values`. -func (a *StrArray) RemoveValues(values ...string) { - a.mu.Lock() - defer a.mu.Unlock() - for _, value := range values { - if i := a.doSearchWithoutLock(value); i != -1 { - a.doRemoveWithoutLock(i) - } - } -} - -// PushLeft pushes one or multiple items to the beginning of array. -func (a *StrArray) PushLeft(value ...string) *StrArray { - a.mu.Lock() - a.array = append(value, a.array...) - a.mu.Unlock() - return a -} - -// PushRight pushes one or multiple items to the end of array. -// It equals to Append. -func (a *StrArray) PushRight(value ...string) *StrArray { - a.mu.Lock() - a.array = append(a.array, value...) - a.mu.Unlock() - return a -} - -// PopLeft pops and returns an item from the beginning of array. -// Note that if the array is empty, the `found` is false. -func (a *StrArray) PopLeft() (value string, found bool) { - a.mu.Lock() - defer a.mu.Unlock() - if len(a.array) == 0 { - return "", false - } - value = a.array[0] - a.array = a.array[1:] - return value, true -} - -// PopRight pops and returns an item from the end of array. -// Note that if the array is empty, the `found` is false. -func (a *StrArray) PopRight() (value string, found bool) { - a.mu.Lock() - defer a.mu.Unlock() - index := len(a.array) - 1 - if index < 0 { - return "", false - } - value = a.array[index] - a.array = a.array[:index] - return value, true -} - -// PopRand randomly pops and return an item out of array. -// Note that if the array is empty, the `found` is false. -func (a *StrArray) PopRand() (value string, found bool) { - a.mu.Lock() - defer a.mu.Unlock() - return a.doRemoveWithoutLock(grand.Intn(len(a.array))) -} - -// PopRands randomly pops and returns `size` items out of array. -// If the given `size` is greater than size of the array, it returns all elements of the array. -// Note that if given `size` <= 0 or the array is empty, it returns nil. -func (a *StrArray) PopRands(size int) []string { - a.mu.Lock() - defer a.mu.Unlock() - if size <= 0 || len(a.array) == 0 { - return nil - } - if size >= len(a.array) { - size = len(a.array) - } - array := make([]string, size) - for i := 0; i < size; i++ { - array[i], _ = a.doRemoveWithoutLock(grand.Intn(len(a.array))) - } - return array -} - -// PopLefts pops and returns `size` items from the beginning of array. -// If the given `size` is greater than size of the array, it returns all elements of the array. -// Note that if given `size` <= 0 or the array is empty, it returns nil. -func (a *StrArray) PopLefts(size int) []string { - a.mu.Lock() - defer a.mu.Unlock() - if size <= 0 || len(a.array) == 0 { - return nil - } - if size >= len(a.array) { - array := a.array - a.array = a.array[:0] - return array - } - value := a.array[0:size] - a.array = a.array[size:] - return value -} - -// PopRights pops and returns `size` items from the end of array. -// If the given `size` is greater than size of the array, it returns all elements of the array. -// Note that if given `size` <= 0 or the array is empty, it returns nil. -func (a *StrArray) PopRights(size int) []string { - a.mu.Lock() - defer a.mu.Unlock() - if size <= 0 || len(a.array) == 0 { - return nil - } - index := len(a.array) - size - if index <= 0 { - array := a.array - a.array = a.array[:0] - return array - } - value := a.array[index:] - a.array = a.array[:index] - return value -} - -// Range picks and returns items by range, like array[start:end]. -// Notice, if in concurrent-safe usage, it returns a copy of slice; -// else a pointer to the underlying data. -// -// If `end` is negative, then the offset will start from the end of array. -// If `end` is omitted, then the sequence will have everything from start up -// until the end of the array. -func (a *StrArray) Range(start int, end ...int) []string { - a.mu.RLock() - defer a.mu.RUnlock() - offsetEnd := len(a.array) - if len(end) > 0 && end[0] < offsetEnd { - offsetEnd = end[0] - } - if start > offsetEnd { - return nil - } - if start < 0 { - start = 0 - } - array := ([]string)(nil) - if a.mu.IsSafe() { - array = make([]string, offsetEnd-start) - copy(array, a.array[start:offsetEnd]) - } else { - array = a.array[start:offsetEnd] - } - return array -} - -// SubSlice returns a slice of elements from the array as specified -// by the `offset` and `size` parameters. -// If in concurrent safe usage, it returns a copy of the slice; else a pointer. -// -// If offset is non-negative, the sequence will start at that offset in the array. -// If offset is negative, the sequence will start that far from the end of the array. -// -// If length is given and is positive, then the sequence will have up to that many elements in it. -// If the array is shorter than the length, then only the available array elements will be present. -// If length is given and is negative then the sequence will stop that many elements from the end of the array. -// If it is omitted, then the sequence will have everything from offset up until the end of the array. -// -// Any possibility crossing the left border of array, it will fail. -func (a *StrArray) SubSlice(offset int, length ...int) []string { - a.mu.RLock() - defer a.mu.RUnlock() - size := len(a.array) - if len(length) > 0 { - size = length[0] - } - if offset > len(a.array) { - return nil - } - if offset < 0 { - offset = len(a.array) + offset - if offset < 0 { - return nil - } - } - if size < 0 { - offset += size - size = -size - if offset < 0 { - return nil - } - } - end := offset + size - if end > len(a.array) { - end = len(a.array) - size = len(a.array) - offset - } - if a.mu.IsSafe() { - s := make([]string, size) - copy(s, a.array[offset:]) - return s - } - return a.array[offset:end] -} - -// Append is alias of PushRight,please See PushRight. -func (a *StrArray) Append(value ...string) *StrArray { - a.mu.Lock() - a.array = append(a.array, value...) - a.mu.Unlock() - return a -} - -// Len returns the length of array. -func (a *StrArray) Len() int { - a.mu.RLock() - length := len(a.array) - a.mu.RUnlock() - return length -} - -// Slice returns the underlying data of array. -// Note that, if it's in concurrent-safe usage, it returns a copy of underlying data, -// or else a pointer to the underlying data. -func (a *StrArray) Slice() []string { - array := ([]string)(nil) - if a.mu.IsSafe() { - a.mu.RLock() - defer a.mu.RUnlock() - array = make([]string, len(a.array)) - copy(array, a.array) - } else { - array = a.array - } - return array -} - -// Interfaces returns current array as []interface{}. -func (a *StrArray) Interfaces() []interface{} { - a.mu.RLock() - defer a.mu.RUnlock() - array := make([]interface{}, len(a.array)) - for k, v := range a.array { - array[k] = v - } - return array -} - -// Clone returns a new array, which is a copy of current array. -func (a *StrArray) Clone() (newArray *StrArray) { - a.mu.RLock() - array := make([]string, len(a.array)) - copy(array, a.array) - a.mu.RUnlock() - return NewStrArrayFrom(array, a.mu.IsSafe()) -} - -// Clear deletes all items of current array. -func (a *StrArray) Clear() *StrArray { - a.mu.Lock() - if len(a.array) > 0 { - a.array = make([]string, 0) - } - a.mu.Unlock() - return a -} - -// Contains checks whether a value exists in the array. -func (a *StrArray) Contains(value string) bool { - return a.Search(value) != -1 -} - -// ContainsI checks whether a value exists in the array with case-insensitively. -// Note that it internally iterates the whole array to do the comparison with case-insensitively. -func (a *StrArray) ContainsI(value string) bool { - a.mu.RLock() - defer a.mu.RUnlock() - if len(a.array) == 0 { - return false - } - for _, v := range a.array { - if strings.EqualFold(v, value) { - return true - } - } - return false -} - -// Search searches array by `value`, returns the index of `value`, -// or returns -1 if not exists. -func (a *StrArray) Search(value string) int { - a.mu.RLock() - defer a.mu.RUnlock() - return a.doSearchWithoutLock(value) -} - -func (a *StrArray) doSearchWithoutLock(value string) int { - if len(a.array) == 0 { - return -1 - } - result := -1 - for index, v := range a.array { - if strings.Compare(v, value) == 0 { - result = index - break - } - } - return result -} - -// Unique uniques the array, clear repeated items. -// Example: [1,1,2,3,2] -> [1,2,3] -func (a *StrArray) Unique() *StrArray { - a.mu.Lock() - defer a.mu.Unlock() - if len(a.array) == 0 { - return a - } - var ( - ok bool - temp string - uniqueSet = make(map[string]struct{}) - uniqueArray = make([]string, 0, len(a.array)) - ) - for i := 0; i < len(a.array); i++ { - temp = a.array[i] - if _, ok = uniqueSet[temp]; ok { - continue - } - uniqueSet[temp] = struct{}{} - uniqueArray = append(uniqueArray, temp) - } - a.array = uniqueArray - return a -} - -// LockFunc locks writing by callback function `f`. -func (a *StrArray) LockFunc(f func(array []string)) *StrArray { - a.mu.Lock() - defer a.mu.Unlock() - f(a.array) - return a -} - -// RLockFunc locks reading by callback function `f`. -func (a *StrArray) RLockFunc(f func(array []string)) *StrArray { - a.mu.RLock() - defer a.mu.RUnlock() - f(a.array) - return a -} - -// Merge merges `array` into current array. -// The parameter `array` can be any garray or slice type. -// The difference between Merge and Append is Append supports only specified slice type, -// but Merge supports more parameter types. -func (a *StrArray) Merge(array interface{}) *StrArray { - return a.Append(gconv.Strings(array)...) -} - -// Fill fills an array with num entries of the value `value`, -// keys starting at the `startIndex` parameter. -func (a *StrArray) Fill(startIndex int, num int, value string) error { - a.mu.Lock() - defer a.mu.Unlock() - if startIndex < 0 || startIndex > len(a.array) { - return gerror.NewCodef(gcode.CodeInvalidParameter, "index %d out of array range %d", startIndex, len(a.array)) - } - for i := startIndex; i < startIndex+num; i++ { - if i > len(a.array)-1 { - a.array = append(a.array, value) - } else { - a.array[i] = value - } - } - return nil -} - -// Chunk splits an array into multiple arrays, -// the size of each array is determined by `size`. -// The last chunk may contain less than size elements. -func (a *StrArray) Chunk(size int) [][]string { - if size < 1 { - return nil - } - a.mu.RLock() - defer a.mu.RUnlock() - length := len(a.array) - chunks := int(math.Ceil(float64(length) / float64(size))) - var n [][]string - for i, end := 0, 0; chunks > 0; chunks-- { - end = (i + 1) * size - if end > length { - end = length - } - n = append(n, a.array[i*size:end]) - i++ - } - return n -} - -// Pad pads array to the specified length with `value`. -// If size is positive then the array is padded on the right, or negative on the left. -// If the absolute value of `size` is less than or equal to the length of the array -// then no padding takes place. -func (a *StrArray) Pad(size int, value string) *StrArray { - a.mu.Lock() - defer a.mu.Unlock() - if size == 0 || (size > 0 && size < len(a.array)) || (size < 0 && size > -len(a.array)) { - return a - } - n := size - if size < 0 { - n = -size - } - n -= len(a.array) - tmp := make([]string, n) - for i := 0; i < n; i++ { - tmp[i] = value - } - if size > 0 { - a.array = append(a.array, tmp...) - } else { - a.array = append(tmp, a.array...) - } - return a -} - -// Rand randomly returns one item from array(no deleting). -func (a *StrArray) Rand() (value string, found bool) { - a.mu.RLock() - defer a.mu.RUnlock() - if len(a.array) == 0 { - return "", false - } - return a.array[grand.Intn(len(a.array))], true -} - -// Rands randomly returns `size` items from array(no deleting). -func (a *StrArray) Rands(size int) []string { - a.mu.RLock() - defer a.mu.RUnlock() - if size <= 0 || len(a.array) == 0 { - return nil - } - array := make([]string, size) - for i := 0; i < size; i++ { - array[i] = a.array[grand.Intn(len(a.array))] - } - return array -} - -// Shuffle randomly shuffles the array. -func (a *StrArray) Shuffle() *StrArray { - a.mu.Lock() - defer a.mu.Unlock() - for i, v := range grand.Perm(len(a.array)) { - a.array[i], a.array[v] = a.array[v], a.array[i] - } - return a -} - -// Reverse makes array with elements in reverse order. -func (a *StrArray) Reverse() *StrArray { - a.mu.Lock() - defer a.mu.Unlock() - for i, j := 0, len(a.array)-1; i < j; i, j = i+1, j-1 { - a.array[i], a.array[j] = a.array[j], a.array[i] - } - return a -} - -// Join joins array elements with a string `glue`. -func (a *StrArray) Join(glue string) string { - a.mu.RLock() - defer a.mu.RUnlock() - if len(a.array) == 0 { - return "" - } - buffer := bytes.NewBuffer(nil) - for k, v := range a.array { - buffer.WriteString(v) - if k != len(a.array)-1 { - buffer.WriteString(glue) - } - } - return buffer.String() -} - -// CountValues counts the number of occurrences of all values in the array. -func (a *StrArray) CountValues() map[string]int { - m := make(map[string]int) - a.mu.RLock() - defer a.mu.RUnlock() - for _, v := range a.array { - m[v]++ - } - return m -} - -// Iterator is alias of IteratorAsc. -func (a *StrArray) Iterator(f func(k int, v string) bool) { - a.IteratorAsc(f) -} - -// IteratorAsc iterates the array readonly in ascending order with given callback function `f`. -// If `f` returns true, then it continues iterating; or false to stop. -func (a *StrArray) IteratorAsc(f func(k int, v string) bool) { - a.mu.RLock() - defer a.mu.RUnlock() - for k, v := range a.array { - if !f(k, v) { - break - } - } -} - -// IteratorDesc iterates the array readonly in descending order with given callback function `f`. -// If `f` returns true, then it continues iterating; or false to stop. -func (a *StrArray) IteratorDesc(f func(k int, v string) bool) { - a.mu.RLock() - defer a.mu.RUnlock() - for i := len(a.array) - 1; i >= 0; i-- { - if !f(i, a.array[i]) { - break - } - } -} - -// String returns current array as a string, which implements like json.Marshal does. -func (a *StrArray) String() string { - if a == nil { - return "" - } - a.mu.RLock() - defer a.mu.RUnlock() - buffer := bytes.NewBuffer(nil) - buffer.WriteByte('[') - for k, v := range a.array { - buffer.WriteString(`"` + gstr.QuoteMeta(v, `"\`) + `"`) - if k != len(a.array)-1 { - buffer.WriteByte(',') - } - } - buffer.WriteByte(']') - return buffer.String() -} - -// MarshalJSON implements the interface MarshalJSON for json.Marshal. -// Note that do not use pointer as its receiver here. -func (a StrArray) MarshalJSON() ([]byte, error) { - a.mu.RLock() - defer a.mu.RUnlock() - return json.Marshal(a.array) -} - -// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. -func (a *StrArray) UnmarshalJSON(b []byte) error { - if a.array == nil { - a.array = make([]string, 0) - } - a.mu.Lock() - defer a.mu.Unlock() - if err := json.UnmarshalUseNumber(b, &a.array); err != nil { - return err - } - return nil -} - -// UnmarshalValue is an interface implement which sets any type of value for array. -func (a *StrArray) UnmarshalValue(value interface{}) error { - a.mu.Lock() - defer a.mu.Unlock() - switch value.(type) { - case string, []byte: - return json.UnmarshalUseNumber(gconv.Bytes(value), &a.array) - default: - a.array = gconv.SliceStr(value) - } - return nil -} - -// Filter iterates array and filters elements using custom callback function. -// It removes the element from array if callback function `filter` returns true, -// it or else does nothing and continues iterating. -func (a *StrArray) Filter(filter func(index int, value string) bool) *StrArray { - a.mu.Lock() - defer a.mu.Unlock() - for i := 0; i < len(a.array); { - if filter(i, a.array[i]) { - a.array = append(a.array[:i], a.array[i+1:]...) - } else { - i++ - } - } - return a -} - -// FilterEmpty removes all empty string value of the array. -func (a *StrArray) FilterEmpty() *StrArray { - a.mu.Lock() - defer a.mu.Unlock() - for i := 0; i < len(a.array); { - if a.array[i] == "" { - a.array = append(a.array[:i], a.array[i+1:]...) - } else { - i++ - } - } - return a -} - -// Walk applies a user supplied function `f` to every item of array. -func (a *StrArray) Walk(f func(value string) string) *StrArray { - a.mu.Lock() - defer a.mu.Unlock() - for i, v := range a.array { - a.array[i] = f(v) - } - return a -} - -// IsEmpty checks whether the array is empty. -func (a *StrArray) IsEmpty() bool { - return a.Len() == 0 -} - -// DeepCopy implements interface for deep copy of current type. -func (a *StrArray) DeepCopy() interface{} { - if a == nil { - return nil - } - a.mu.RLock() - defer a.mu.RUnlock() - newSlice := make([]string, len(a.array)) - copy(newSlice, a.array) - return NewStrArrayFrom(newSlice, a.mu.IsSafe()) -} diff --git a/vendor/github.com/gogf/gf/v2/container/garray/garray_sorted_any.go b/vendor/github.com/gogf/gf/v2/container/garray/garray_sorted_any.go deleted file mode 100644 index 8ab6e3ee..00000000 --- a/vendor/github.com/gogf/gf/v2/container/garray/garray_sorted_any.go +++ /dev/null @@ -1,842 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package garray - -import ( - "bytes" - "fmt" - "math" - "sort" - - "github.com/gogf/gf/v2/internal/deepcopy" - "github.com/gogf/gf/v2/internal/empty" - "github.com/gogf/gf/v2/internal/json" - "github.com/gogf/gf/v2/internal/rwmutex" - "github.com/gogf/gf/v2/text/gstr" - "github.com/gogf/gf/v2/util/gconv" - "github.com/gogf/gf/v2/util/grand" - "github.com/gogf/gf/v2/util/gutil" -) - -// SortedArray is a golang sorted array with rich features. -// It is using increasing order in default, which can be changed by -// setting it a custom comparator. -// It contains a concurrent-safe/unsafe switch, which should be set -// when its initialization and cannot be changed then. -type SortedArray struct { - mu rwmutex.RWMutex - array []interface{} - unique bool // Whether enable unique feature(false) - comparator func(a, b interface{}) int // Comparison function(it returns -1: a < b; 0: a == b; 1: a > b) -} - -// NewSortedArray creates and returns an empty sorted array. -// The parameter `safe` is used to specify whether using array in concurrent-safety, which is false in default. -// The parameter `comparator` used to compare values to sort in array, -// if it returns value < 0, means `a` < `b`; the `a` will be inserted before `b`; -// if it returns value = 0, means `a` = `b`; the `a` will be replaced by `b`; -// if it returns value > 0, means `a` > `b`; the `a` will be inserted after `b`; -func NewSortedArray(comparator func(a, b interface{}) int, safe ...bool) *SortedArray { - return NewSortedArraySize(0, comparator, safe...) -} - -// NewSortedArraySize create and returns an sorted array with given size and cap. -// The parameter `safe` is used to specify whether using array in concurrent-safety, -// which is false in default. -func NewSortedArraySize(cap int, comparator func(a, b interface{}) int, safe ...bool) *SortedArray { - return &SortedArray{ - mu: rwmutex.Create(safe...), - array: make([]interface{}, 0, cap), - comparator: comparator, - } -} - -// NewSortedArrayRange creates and returns an array by a range from `start` to `end` -// with step value `step`. -func NewSortedArrayRange(start, end, step int, comparator func(a, b interface{}) int, safe ...bool) *SortedArray { - if step == 0 { - panic(fmt.Sprintf(`invalid step value: %d`, step)) - } - slice := make([]interface{}, 0) - index := 0 - for i := start; i <= end; i += step { - slice = append(slice, i) - index++ - } - return NewSortedArrayFrom(slice, comparator, safe...) -} - -// NewSortedArrayFrom creates and returns an sorted array with given slice `array`. -// The parameter `safe` is used to specify whether using array in concurrent-safety, -// which is false in default. -func NewSortedArrayFrom(array []interface{}, comparator func(a, b interface{}) int, safe ...bool) *SortedArray { - a := NewSortedArraySize(0, comparator, safe...) - a.array = array - sort.Slice(a.array, func(i, j int) bool { - return a.getComparator()(a.array[i], a.array[j]) < 0 - }) - return a -} - -// NewSortedArrayFromCopy creates and returns an sorted array from a copy of given slice `array`. -// The parameter `safe` is used to specify whether using array in concurrent-safety, -// which is false in default. -func NewSortedArrayFromCopy(array []interface{}, comparator func(a, b interface{}) int, safe ...bool) *SortedArray { - newArray := make([]interface{}, len(array)) - copy(newArray, array) - return NewSortedArrayFrom(newArray, comparator, safe...) -} - -// At returns the value by the specified index. -// If the given `index` is out of range of the array, it returns `nil`. -func (a *SortedArray) At(index int) (value interface{}) { - value, _ = a.Get(index) - return -} - -// SetArray sets the underlying slice array with the given `array`. -func (a *SortedArray) SetArray(array []interface{}) *SortedArray { - a.mu.Lock() - defer a.mu.Unlock() - a.array = array - sort.Slice(a.array, func(i, j int) bool { - return a.getComparator()(a.array[i], a.array[j]) < 0 - }) - return a -} - -// SetComparator sets/changes the comparator for sorting. -// It resorts the array as the comparator is changed. -func (a *SortedArray) SetComparator(comparator func(a, b interface{}) int) { - a.mu.Lock() - defer a.mu.Unlock() - a.comparator = comparator - sort.Slice(a.array, func(i, j int) bool { - return a.getComparator()(a.array[i], a.array[j]) < 0 - }) -} - -// Sort sorts the array in increasing order. -// The parameter `reverse` controls whether sort -// in increasing order(default) or decreasing order -func (a *SortedArray) Sort() *SortedArray { - a.mu.Lock() - defer a.mu.Unlock() - sort.Slice(a.array, func(i, j int) bool { - return a.getComparator()(a.array[i], a.array[j]) < 0 - }) - return a -} - -// Add adds one or multiple values to sorted array, the array always keeps sorted. -// It's alias of function Append, see Append. -func (a *SortedArray) Add(values ...interface{}) *SortedArray { - return a.Append(values...) -} - -// Append adds one or multiple values to sorted array, the array always keeps sorted. -func (a *SortedArray) Append(values ...interface{}) *SortedArray { - if len(values) == 0 { - return a - } - a.mu.Lock() - defer a.mu.Unlock() - for _, value := range values { - index, cmp := a.binSearch(value, false) - if a.unique && cmp == 0 { - continue - } - if index < 0 { - a.array = append(a.array, value) - continue - } - if cmp > 0 { - index++ - } - a.array = append(a.array[:index], append([]interface{}{value}, a.array[index:]...)...) - } - return a -} - -// Get returns the value by the specified index. -// If the given `index` is out of range of the array, the `found` is false. -func (a *SortedArray) Get(index int) (value interface{}, found bool) { - a.mu.RLock() - defer a.mu.RUnlock() - if index < 0 || index >= len(a.array) { - return nil, false - } - return a.array[index], true -} - -// Remove removes an item by index. -// If the given `index` is out of range of the array, the `found` is false. -func (a *SortedArray) Remove(index int) (value interface{}, found bool) { - a.mu.Lock() - defer a.mu.Unlock() - return a.doRemoveWithoutLock(index) -} - -// doRemoveWithoutLock removes an item by index without lock. -func (a *SortedArray) doRemoveWithoutLock(index int) (value interface{}, found bool) { - if index < 0 || index >= len(a.array) { - return nil, false - } - // Determine array boundaries when deleting to improve deletion efficiency. - if index == 0 { - value := a.array[0] - a.array = a.array[1:] - return value, true - } else if index == len(a.array)-1 { - value := a.array[index] - a.array = a.array[:index] - return value, true - } - // If it is a non-boundary delete, - // it will involve the creation of an array, - // then the deletion is less efficient. - value = a.array[index] - a.array = append(a.array[:index], a.array[index+1:]...) - return value, true -} - -// RemoveValue removes an item by value. -// It returns true if value is found in the array, or else false if not found. -func (a *SortedArray) RemoveValue(value interface{}) bool { - a.mu.Lock() - defer a.mu.Unlock() - if i, r := a.binSearch(value, false); r == 0 { - _, res := a.doRemoveWithoutLock(i) - return res - } - return false -} - -// RemoveValues removes an item by `values`. -func (a *SortedArray) RemoveValues(values ...interface{}) { - a.mu.Lock() - defer a.mu.Unlock() - for _, value := range values { - if i, r := a.binSearch(value, false); r == 0 { - a.doRemoveWithoutLock(i) - } - } -} - -// PopLeft pops and returns an item from the beginning of array. -// Note that if the array is empty, the `found` is false. -func (a *SortedArray) PopLeft() (value interface{}, found bool) { - a.mu.Lock() - defer a.mu.Unlock() - if len(a.array) == 0 { - return nil, false - } - value = a.array[0] - a.array = a.array[1:] - return value, true -} - -// PopRight pops and returns an item from the end of array. -// Note that if the array is empty, the `found` is false. -func (a *SortedArray) PopRight() (value interface{}, found bool) { - a.mu.Lock() - defer a.mu.Unlock() - index := len(a.array) - 1 - if index < 0 { - return nil, false - } - value = a.array[index] - a.array = a.array[:index] - return value, true -} - -// PopRand randomly pops and return an item out of array. -// Note that if the array is empty, the `found` is false. -func (a *SortedArray) PopRand() (value interface{}, found bool) { - a.mu.Lock() - defer a.mu.Unlock() - return a.doRemoveWithoutLock(grand.Intn(len(a.array))) -} - -// PopRands randomly pops and returns `size` items out of array. -func (a *SortedArray) PopRands(size int) []interface{} { - a.mu.Lock() - defer a.mu.Unlock() - if size <= 0 || len(a.array) == 0 { - return nil - } - if size >= len(a.array) { - size = len(a.array) - } - array := make([]interface{}, size) - for i := 0; i < size; i++ { - array[i], _ = a.doRemoveWithoutLock(grand.Intn(len(a.array))) - } - return array -} - -// PopLefts pops and returns `size` items from the beginning of array. -func (a *SortedArray) PopLefts(size int) []interface{} { - a.mu.Lock() - defer a.mu.Unlock() - if size <= 0 || len(a.array) == 0 { - return nil - } - if size >= len(a.array) { - array := a.array - a.array = a.array[:0] - return array - } - value := a.array[0:size] - a.array = a.array[size:] - return value -} - -// PopRights pops and returns `size` items from the end of array. -func (a *SortedArray) PopRights(size int) []interface{} { - a.mu.Lock() - defer a.mu.Unlock() - if size <= 0 || len(a.array) == 0 { - return nil - } - index := len(a.array) - size - if index <= 0 { - array := a.array - a.array = a.array[:0] - return array - } - value := a.array[index:] - a.array = a.array[:index] - return value -} - -// Range picks and returns items by range, like array[start:end]. -// Notice, if in concurrent-safe usage, it returns a copy of slice; -// else a pointer to the underlying data. -// -// If `end` is negative, then the offset will start from the end of array. -// If `end` is omitted, then the sequence will have everything from start up -// until the end of the array. -func (a *SortedArray) Range(start int, end ...int) []interface{} { - a.mu.RLock() - defer a.mu.RUnlock() - offsetEnd := len(a.array) - if len(end) > 0 && end[0] < offsetEnd { - offsetEnd = end[0] - } - if start > offsetEnd { - return nil - } - if start < 0 { - start = 0 - } - array := ([]interface{})(nil) - if a.mu.IsSafe() { - array = make([]interface{}, offsetEnd-start) - copy(array, a.array[start:offsetEnd]) - } else { - array = a.array[start:offsetEnd] - } - return array -} - -// SubSlice returns a slice of elements from the array as specified -// by the `offset` and `size` parameters. -// If in concurrent safe usage, it returns a copy of the slice; else a pointer. -// -// If offset is non-negative, the sequence will start at that offset in the array. -// If offset is negative, the sequence will start that far from the end of the array. -// -// If length is given and is positive, then the sequence will have up to that many elements in it. -// If the array is shorter than the length, then only the available array elements will be present. -// If length is given and is negative then the sequence will stop that many elements from the end of the array. -// If it is omitted, then the sequence will have everything from offset up until the end of the array. -// -// Any possibility crossing the left border of array, it will fail. -func (a *SortedArray) SubSlice(offset int, length ...int) []interface{} { - a.mu.RLock() - defer a.mu.RUnlock() - size := len(a.array) - if len(length) > 0 { - size = length[0] - } - if offset > len(a.array) { - return nil - } - if offset < 0 { - offset = len(a.array) + offset - if offset < 0 { - return nil - } - } - if size < 0 { - offset += size - size = -size - if offset < 0 { - return nil - } - } - end := offset + size - if end > len(a.array) { - end = len(a.array) - size = len(a.array) - offset - } - if a.mu.IsSafe() { - s := make([]interface{}, size) - copy(s, a.array[offset:]) - return s - } else { - return a.array[offset:end] - } -} - -// Sum returns the sum of values in an array. -func (a *SortedArray) Sum() (sum int) { - a.mu.RLock() - defer a.mu.RUnlock() - for _, v := range a.array { - sum += gconv.Int(v) - } - return -} - -// Len returns the length of array. -func (a *SortedArray) Len() int { - a.mu.RLock() - length := len(a.array) - a.mu.RUnlock() - return length -} - -// Slice returns the underlying data of array. -// Note that, if it's in concurrent-safe usage, it returns a copy of underlying data, -// or else a pointer to the underlying data. -func (a *SortedArray) Slice() []interface{} { - var array []interface{} - if a.mu.IsSafe() { - a.mu.RLock() - defer a.mu.RUnlock() - array = make([]interface{}, len(a.array)) - copy(array, a.array) - } else { - array = a.array - } - return array -} - -// Interfaces returns current array as []interface{}. -func (a *SortedArray) Interfaces() []interface{} { - return a.Slice() -} - -// Contains checks whether a value exists in the array. -func (a *SortedArray) Contains(value interface{}) bool { - return a.Search(value) != -1 -} - -// Search searches array by `value`, returns the index of `value`, -// or returns -1 if not exists. -func (a *SortedArray) Search(value interface{}) (index int) { - if i, r := a.binSearch(value, true); r == 0 { - return i - } - return -1 -} - -// Binary search. -// It returns the last compared index and the result. -// If `result` equals to 0, it means the value at `index` is equals to `value`. -// If `result` lesser than 0, it means the value at `index` is lesser than `value`. -// If `result` greater than 0, it means the value at `index` is greater than `value`. -func (a *SortedArray) binSearch(value interface{}, lock bool) (index int, result int) { - if lock { - a.mu.RLock() - defer a.mu.RUnlock() - } - if len(a.array) == 0 { - return -1, -2 - } - min := 0 - max := len(a.array) - 1 - mid := 0 - cmp := -2 - for min <= max { - mid = min + (max-min)/2 - cmp = a.getComparator()(value, a.array[mid]) - switch { - case cmp < 0: - max = mid - 1 - case cmp > 0: - min = mid + 1 - default: - return mid, cmp - } - } - return mid, cmp -} - -// SetUnique sets unique mark to the array, -// which means it does not contain any repeated items. -// It also does unique check, remove all repeated items. -func (a *SortedArray) SetUnique(unique bool) *SortedArray { - oldUnique := a.unique - a.unique = unique - if unique && oldUnique != unique { - a.Unique() - } - return a -} - -// Unique uniques the array, clear repeated items. -func (a *SortedArray) Unique() *SortedArray { - a.mu.Lock() - defer a.mu.Unlock() - if len(a.array) == 0 { - return a - } - i := 0 - for { - if i == len(a.array)-1 { - break - } - if a.getComparator()(a.array[i], a.array[i+1]) == 0 { - a.array = append(a.array[:i+1], a.array[i+1+1:]...) - } else { - i++ - } - } - return a -} - -// Clone returns a new array, which is a copy of current array. -func (a *SortedArray) Clone() (newArray *SortedArray) { - a.mu.RLock() - array := make([]interface{}, len(a.array)) - copy(array, a.array) - a.mu.RUnlock() - return NewSortedArrayFrom(array, a.comparator, a.mu.IsSafe()) -} - -// Clear deletes all items of current array. -func (a *SortedArray) Clear() *SortedArray { - a.mu.Lock() - if len(a.array) > 0 { - a.array = make([]interface{}, 0) - } - a.mu.Unlock() - return a -} - -// LockFunc locks writing by callback function `f`. -func (a *SortedArray) LockFunc(f func(array []interface{})) *SortedArray { - a.mu.Lock() - defer a.mu.Unlock() - - // Keep the array always sorted. - defer sort.Slice(a.array, func(i, j int) bool { - return a.getComparator()(a.array[i], a.array[j]) < 0 - }) - - f(a.array) - return a -} - -// RLockFunc locks reading by callback function `f`. -func (a *SortedArray) RLockFunc(f func(array []interface{})) *SortedArray { - a.mu.RLock() - defer a.mu.RUnlock() - f(a.array) - return a -} - -// Merge merges `array` into current array. -// The parameter `array` can be any garray or slice type. -// The difference between Merge and Append is Append supports only specified slice type, -// but Merge supports more parameter types. -func (a *SortedArray) Merge(array interface{}) *SortedArray { - return a.Add(gconv.Interfaces(array)...) -} - -// Chunk splits an array into multiple arrays, -// the size of each array is determined by `size`. -// The last chunk may contain less than size elements. -func (a *SortedArray) Chunk(size int) [][]interface{} { - if size < 1 { - return nil - } - a.mu.RLock() - defer a.mu.RUnlock() - length := len(a.array) - chunks := int(math.Ceil(float64(length) / float64(size))) - var n [][]interface{} - for i, end := 0, 0; chunks > 0; chunks-- { - end = (i + 1) * size - if end > length { - end = length - } - n = append(n, a.array[i*size:end]) - i++ - } - return n -} - -// Rand randomly returns one item from array(no deleting). -func (a *SortedArray) Rand() (value interface{}, found bool) { - a.mu.RLock() - defer a.mu.RUnlock() - if len(a.array) == 0 { - return nil, false - } - return a.array[grand.Intn(len(a.array))], true -} - -// Rands randomly returns `size` items from array(no deleting). -func (a *SortedArray) Rands(size int) []interface{} { - a.mu.RLock() - defer a.mu.RUnlock() - if size <= 0 || len(a.array) == 0 { - return nil - } - array := make([]interface{}, size) - for i := 0; i < size; i++ { - array[i] = a.array[grand.Intn(len(a.array))] - } - return array -} - -// Join joins array elements with a string `glue`. -func (a *SortedArray) Join(glue string) string { - a.mu.RLock() - defer a.mu.RUnlock() - if len(a.array) == 0 { - return "" - } - buffer := bytes.NewBuffer(nil) - for k, v := range a.array { - buffer.WriteString(gconv.String(v)) - if k != len(a.array)-1 { - buffer.WriteString(glue) - } - } - return buffer.String() -} - -// CountValues counts the number of occurrences of all values in the array. -func (a *SortedArray) CountValues() map[interface{}]int { - m := make(map[interface{}]int) - a.mu.RLock() - defer a.mu.RUnlock() - for _, v := range a.array { - m[v]++ - } - return m -} - -// Iterator is alias of IteratorAsc. -func (a *SortedArray) Iterator(f func(k int, v interface{}) bool) { - a.IteratorAsc(f) -} - -// IteratorAsc iterates the array readonly in ascending order with given callback function `f`. -// If `f` returns true, then it continues iterating; or false to stop. -func (a *SortedArray) IteratorAsc(f func(k int, v interface{}) bool) { - a.mu.RLock() - defer a.mu.RUnlock() - for k, v := range a.array { - if !f(k, v) { - break - } - } -} - -// IteratorDesc iterates the array readonly in descending order with given callback function `f`. -// If `f` returns true, then it continues iterating; or false to stop. -func (a *SortedArray) IteratorDesc(f func(k int, v interface{}) bool) { - a.mu.RLock() - defer a.mu.RUnlock() - for i := len(a.array) - 1; i >= 0; i-- { - if !f(i, a.array[i]) { - break - } - } -} - -// String returns current array as a string, which implements like json.Marshal does. -func (a *SortedArray) String() string { - if a == nil { - return "" - } - a.mu.RLock() - defer a.mu.RUnlock() - buffer := bytes.NewBuffer(nil) - buffer.WriteByte('[') - s := "" - for k, v := range a.array { - s = gconv.String(v) - if gstr.IsNumeric(s) { - buffer.WriteString(s) - } else { - buffer.WriteString(`"` + gstr.QuoteMeta(s, `"\`) + `"`) - } - if k != len(a.array)-1 { - buffer.WriteByte(',') - } - } - buffer.WriteByte(']') - return buffer.String() -} - -// MarshalJSON implements the interface MarshalJSON for json.Marshal. -// Note that do not use pointer as its receiver here. -func (a SortedArray) MarshalJSON() ([]byte, error) { - a.mu.RLock() - defer a.mu.RUnlock() - return json.Marshal(a.array) -} - -// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. -// Note that the comparator is set as string comparator in default. -func (a *SortedArray) UnmarshalJSON(b []byte) error { - if a.comparator == nil { - a.array = make([]interface{}, 0) - a.comparator = gutil.ComparatorString - } - a.mu.Lock() - defer a.mu.Unlock() - if err := json.UnmarshalUseNumber(b, &a.array); err != nil { - return err - } - if a.comparator != nil && a.array != nil { - sort.Slice(a.array, func(i, j int) bool { - return a.comparator(a.array[i], a.array[j]) < 0 - }) - } - return nil -} - -// UnmarshalValue is an interface implement which sets any type of value for array. -// Note that the comparator is set as string comparator in default. -func (a *SortedArray) UnmarshalValue(value interface{}) (err error) { - if a.comparator == nil { - a.comparator = gutil.ComparatorString - } - a.mu.Lock() - defer a.mu.Unlock() - switch value.(type) { - case string, []byte: - err = json.UnmarshalUseNumber(gconv.Bytes(value), &a.array) - default: - a.array = gconv.SliceAny(value) - } - if a.comparator != nil && a.array != nil { - sort.Slice(a.array, func(i, j int) bool { - return a.comparator(a.array[i], a.array[j]) < 0 - }) - } - return err -} - -// FilterNil removes all nil value of the array. -func (a *SortedArray) FilterNil() *SortedArray { - a.mu.Lock() - defer a.mu.Unlock() - for i := 0; i < len(a.array); { - if empty.IsNil(a.array[i]) { - a.array = append(a.array[:i], a.array[i+1:]...) - } else { - break - } - } - for i := len(a.array) - 1; i >= 0; { - if empty.IsNil(a.array[i]) { - a.array = append(a.array[:i], a.array[i+1:]...) - } else { - break - } - } - return a -} - -// Filter iterates array and filters elements using custom callback function. -// It removes the element from array if callback function `filter` returns true, -// it or else does nothing and continues iterating. -func (a *SortedArray) Filter(filter func(index int, value interface{}) bool) *SortedArray { - a.mu.Lock() - defer a.mu.Unlock() - for i := 0; i < len(a.array); { - if filter(i, a.array[i]) { - a.array = append(a.array[:i], a.array[i+1:]...) - } else { - i++ - } - } - return a -} - -// FilterEmpty removes all empty value of the array. -// Values like: 0, nil, false, "", len(slice/map/chan) == 0 are considered empty. -func (a *SortedArray) FilterEmpty() *SortedArray { - a.mu.Lock() - defer a.mu.Unlock() - for i := 0; i < len(a.array); { - if empty.IsEmpty(a.array[i]) { - a.array = append(a.array[:i], a.array[i+1:]...) - } else { - break - } - } - for i := len(a.array) - 1; i >= 0; { - if empty.IsEmpty(a.array[i]) { - a.array = append(a.array[:i], a.array[i+1:]...) - } else { - break - } - } - return a -} - -// Walk applies a user supplied function `f` to every item of array. -func (a *SortedArray) Walk(f func(value interface{}) interface{}) *SortedArray { - a.mu.Lock() - defer a.mu.Unlock() - // Keep the array always sorted. - defer sort.Slice(a.array, func(i, j int) bool { - return a.getComparator()(a.array[i], a.array[j]) < 0 - }) - for i, v := range a.array { - a.array[i] = f(v) - } - return a -} - -// IsEmpty checks whether the array is empty. -func (a *SortedArray) IsEmpty() bool { - return a.Len() == 0 -} - -// getComparator returns the comparator if it's previously set, -// or else it panics. -func (a *SortedArray) getComparator() func(a, b interface{}) int { - if a.comparator == nil { - panic("comparator is missing for sorted array") - } - return a.comparator -} - -// DeepCopy implements interface for deep copy of current type. -func (a *SortedArray) DeepCopy() interface{} { - if a == nil { - return nil - } - a.mu.RLock() - defer a.mu.RUnlock() - newSlice := make([]interface{}, len(a.array)) - for i, v := range a.array { - newSlice[i] = deepcopy.Copy(v) - } - return NewSortedArrayFrom(newSlice, a.comparator, a.mu.IsSafe()) -} diff --git a/vendor/github.com/gogf/gf/v2/container/garray/garray_sorted_int.go b/vendor/github.com/gogf/gf/v2/container/garray/garray_sorted_int.go deleted file mode 100644 index b9181aea..00000000 --- a/vendor/github.com/gogf/gf/v2/container/garray/garray_sorted_int.go +++ /dev/null @@ -1,787 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package garray - -import ( - "bytes" - "fmt" - "math" - "sort" - - "github.com/gogf/gf/v2/internal/json" - "github.com/gogf/gf/v2/internal/rwmutex" - "github.com/gogf/gf/v2/util/gconv" - "github.com/gogf/gf/v2/util/grand" -) - -// SortedIntArray is a golang sorted int array with rich features. -// It is using increasing order in default, which can be changed by -// setting it a custom comparator. -// It contains a concurrent-safe/unsafe switch, which should be set -// when its initialization and cannot be changed then. -type SortedIntArray struct { - mu rwmutex.RWMutex - array []int - unique bool // Whether enable unique feature(false) - comparator func(a, b int) int // Comparison function(it returns -1: a < b; 0: a == b; 1: a > b) -} - -// NewSortedIntArray creates and returns an empty sorted array. -// The parameter `safe` is used to specify whether using array in concurrent-safety, -// which is false in default. -func NewSortedIntArray(safe ...bool) *SortedIntArray { - return NewSortedIntArraySize(0, safe...) -} - -// NewSortedIntArrayComparator creates and returns an empty sorted array with specified comparator. -// The parameter `safe` is used to specify whether using array in concurrent-safety which is false in default. -func NewSortedIntArrayComparator(comparator func(a, b int) int, safe ...bool) *SortedIntArray { - array := NewSortedIntArray(safe...) - array.comparator = comparator - return array -} - -// NewSortedIntArraySize create and returns an sorted array with given size and cap. -// The parameter `safe` is used to specify whether using array in concurrent-safety, -// which is false in default. -func NewSortedIntArraySize(cap int, safe ...bool) *SortedIntArray { - return &SortedIntArray{ - mu: rwmutex.Create(safe...), - array: make([]int, 0, cap), - comparator: defaultComparatorInt, - } -} - -// NewSortedIntArrayRange creates and returns an array by a range from `start` to `end` -// with step value `step`. -func NewSortedIntArrayRange(start, end, step int, safe ...bool) *SortedIntArray { - if step == 0 { - panic(fmt.Sprintf(`invalid step value: %d`, step)) - } - slice := make([]int, 0) - index := 0 - for i := start; i <= end; i += step { - slice = append(slice, i) - index++ - } - return NewSortedIntArrayFrom(slice, safe...) -} - -// NewSortedIntArrayFrom creates and returns an sorted array with given slice `array`. -// The parameter `safe` is used to specify whether using array in concurrent-safety, -// which is false in default. -func NewSortedIntArrayFrom(array []int, safe ...bool) *SortedIntArray { - a := NewSortedIntArraySize(0, safe...) - a.array = array - sort.Ints(a.array) - return a -} - -// NewSortedIntArrayFromCopy creates and returns an sorted array from a copy of given slice `array`. -// The parameter `safe` is used to specify whether using array in concurrent-safety, -// which is false in default. -func NewSortedIntArrayFromCopy(array []int, safe ...bool) *SortedIntArray { - newArray := make([]int, len(array)) - copy(newArray, array) - return NewSortedIntArrayFrom(newArray, safe...) -} - -// At returns the value by the specified index. -// If the given `index` is out of range of the array, it returns `0`. -func (a *SortedIntArray) At(index int) (value int) { - value, _ = a.Get(index) - return -} - -// SetArray sets the underlying slice array with the given `array`. -func (a *SortedIntArray) SetArray(array []int) *SortedIntArray { - a.mu.Lock() - defer a.mu.Unlock() - a.array = array - quickSortInt(a.array, a.getComparator()) - return a -} - -// Sort sorts the array in increasing order. -// The parameter `reverse` controls whether sort -// in increasing order(default) or decreasing order. -func (a *SortedIntArray) Sort() *SortedIntArray { - a.mu.Lock() - defer a.mu.Unlock() - quickSortInt(a.array, a.getComparator()) - return a -} - -// Add adds one or multiple values to sorted array, the array always keeps sorted. -// It's alias of function Append, see Append. -func (a *SortedIntArray) Add(values ...int) *SortedIntArray { - return a.Append(values...) -} - -// Append adds one or multiple values to sorted array, the array always keeps sorted. -func (a *SortedIntArray) Append(values ...int) *SortedIntArray { - if len(values) == 0 { - return a - } - a.mu.Lock() - defer a.mu.Unlock() - for _, value := range values { - index, cmp := a.binSearch(value, false) - if a.unique && cmp == 0 { - continue - } - if index < 0 { - a.array = append(a.array, value) - continue - } - if cmp > 0 { - index++ - } - rear := append([]int{}, a.array[index:]...) - a.array = append(a.array[0:index], value) - a.array = append(a.array, rear...) - } - return a -} - -// Get returns the value by the specified index. -// If the given `index` is out of range of the array, the `found` is false. -func (a *SortedIntArray) Get(index int) (value int, found bool) { - a.mu.RLock() - defer a.mu.RUnlock() - if index < 0 || index >= len(a.array) { - return 0, false - } - return a.array[index], true -} - -// Remove removes an item by index. -// If the given `index` is out of range of the array, the `found` is false. -func (a *SortedIntArray) Remove(index int) (value int, found bool) { - a.mu.Lock() - defer a.mu.Unlock() - return a.doRemoveWithoutLock(index) -} - -// doRemoveWithoutLock removes an item by index without lock. -func (a *SortedIntArray) doRemoveWithoutLock(index int) (value int, found bool) { - if index < 0 || index >= len(a.array) { - return 0, false - } - // Determine array boundaries when deleting to improve deletion efficiency. - if index == 0 { - value := a.array[0] - a.array = a.array[1:] - return value, true - } else if index == len(a.array)-1 { - value := a.array[index] - a.array = a.array[:index] - return value, true - } - // If it is a non-boundary delete, - // it will involve the creation of an array, - // then the deletion is less efficient. - value = a.array[index] - a.array = append(a.array[:index], a.array[index+1:]...) - return value, true -} - -// RemoveValue removes an item by value. -// It returns true if value is found in the array, or else false if not found. -func (a *SortedIntArray) RemoveValue(value int) bool { - a.mu.Lock() - defer a.mu.Unlock() - if i, r := a.binSearch(value, false); r == 0 { - _, res := a.doRemoveWithoutLock(i) - return res - } - return false -} - -// RemoveValues removes an item by `values`. -func (a *SortedIntArray) RemoveValues(values ...int) { - a.mu.Lock() - defer a.mu.Unlock() - for _, value := range values { - if i, r := a.binSearch(value, false); r == 0 { - a.doRemoveWithoutLock(i) - } - } -} - -// PopLeft pops and returns an item from the beginning of array. -// Note that if the array is empty, the `found` is false. -func (a *SortedIntArray) PopLeft() (value int, found bool) { - a.mu.Lock() - defer a.mu.Unlock() - if len(a.array) == 0 { - return 0, false - } - value = a.array[0] - a.array = a.array[1:] - return value, true -} - -// PopRight pops and returns an item from the end of array. -// Note that if the array is empty, the `found` is false. -func (a *SortedIntArray) PopRight() (value int, found bool) { - a.mu.Lock() - defer a.mu.Unlock() - index := len(a.array) - 1 - if index < 0 { - return 0, false - } - value = a.array[index] - a.array = a.array[:index] - return value, true -} - -// PopRand randomly pops and return an item out of array. -// Note that if the array is empty, the `found` is false. -func (a *SortedIntArray) PopRand() (value int, found bool) { - a.mu.Lock() - defer a.mu.Unlock() - return a.doRemoveWithoutLock(grand.Intn(len(a.array))) -} - -// PopRands randomly pops and returns `size` items out of array. -// If the given `size` is greater than size of the array, it returns all elements of the array. -// Note that if given `size` <= 0 or the array is empty, it returns nil. -func (a *SortedIntArray) PopRands(size int) []int { - a.mu.Lock() - defer a.mu.Unlock() - if size <= 0 || len(a.array) == 0 { - return nil - } - if size >= len(a.array) { - size = len(a.array) - } - array := make([]int, size) - for i := 0; i < size; i++ { - array[i], _ = a.doRemoveWithoutLock(grand.Intn(len(a.array))) - } - return array -} - -// PopLefts pops and returns `size` items from the beginning of array. -// If the given `size` is greater than size of the array, it returns all elements of the array. -// Note that if given `size` <= 0 or the array is empty, it returns nil. -func (a *SortedIntArray) PopLefts(size int) []int { - a.mu.Lock() - defer a.mu.Unlock() - if size <= 0 || len(a.array) == 0 { - return nil - } - if size >= len(a.array) { - array := a.array - a.array = a.array[:0] - return array - } - value := a.array[0:size] - a.array = a.array[size:] - return value -} - -// PopRights pops and returns `size` items from the end of array. -// If the given `size` is greater than size of the array, it returns all elements of the array. -// Note that if given `size` <= 0 or the array is empty, it returns nil. -func (a *SortedIntArray) PopRights(size int) []int { - a.mu.Lock() - defer a.mu.Unlock() - if size <= 0 || len(a.array) == 0 { - return nil - } - index := len(a.array) - size - if index <= 0 { - array := a.array - a.array = a.array[:0] - return array - } - value := a.array[index:] - a.array = a.array[:index] - return value -} - -// Range picks and returns items by range, like array[start:end]. -// Notice, if in concurrent-safe usage, it returns a copy of slice; -// else a pointer to the underlying data. -// -// If `end` is negative, then the offset will start from the end of array. -// If `end` is omitted, then the sequence will have everything from start up -// until the end of the array. -func (a *SortedIntArray) Range(start int, end ...int) []int { - a.mu.RLock() - defer a.mu.RUnlock() - offsetEnd := len(a.array) - if len(end) > 0 && end[0] < offsetEnd { - offsetEnd = end[0] - } - if start > offsetEnd { - return nil - } - if start < 0 { - start = 0 - } - array := ([]int)(nil) - if a.mu.IsSafe() { - array = make([]int, offsetEnd-start) - copy(array, a.array[start:offsetEnd]) - } else { - array = a.array[start:offsetEnd] - } - return array -} - -// SubSlice returns a slice of elements from the array as specified -// by the `offset` and `size` parameters. -// If in concurrent safe usage, it returns a copy of the slice; else a pointer. -// -// If offset is non-negative, the sequence will start at that offset in the array. -// If offset is negative, the sequence will start that far from the end of the array. -// -// If length is given and is positive, then the sequence will have up to that many elements in it. -// If the array is shorter than the length, then only the available array elements will be present. -// If length is given and is negative then the sequence will stop that many elements from the end of the array. -// If it is omitted, then the sequence will have everything from offset up until the end of the array. -// -// Any possibility crossing the left border of array, it will fail. -func (a *SortedIntArray) SubSlice(offset int, length ...int) []int { - a.mu.RLock() - defer a.mu.RUnlock() - size := len(a.array) - if len(length) > 0 { - size = length[0] - } - if offset > len(a.array) { - return nil - } - if offset < 0 { - offset = len(a.array) + offset - if offset < 0 { - return nil - } - } - if size < 0 { - offset += size - size = -size - if offset < 0 { - return nil - } - } - end := offset + size - if end > len(a.array) { - end = len(a.array) - size = len(a.array) - offset - } - if a.mu.IsSafe() { - s := make([]int, size) - copy(s, a.array[offset:]) - return s - } else { - return a.array[offset:end] - } -} - -// Len returns the length of array. -func (a *SortedIntArray) Len() int { - a.mu.RLock() - length := len(a.array) - a.mu.RUnlock() - return length -} - -// Sum returns the sum of values in an array. -func (a *SortedIntArray) Sum() (sum int) { - a.mu.RLock() - defer a.mu.RUnlock() - for _, v := range a.array { - sum += v - } - return -} - -// Slice returns the underlying data of array. -// Note that, if it's in concurrent-safe usage, it returns a copy of underlying data, -// or else a pointer to the underlying data. -func (a *SortedIntArray) Slice() []int { - array := ([]int)(nil) - if a.mu.IsSafe() { - a.mu.RLock() - defer a.mu.RUnlock() - array = make([]int, len(a.array)) - copy(array, a.array) - } else { - array = a.array - } - return array -} - -// Interfaces returns current array as []interface{}. -func (a *SortedIntArray) Interfaces() []interface{} { - a.mu.RLock() - defer a.mu.RUnlock() - array := make([]interface{}, len(a.array)) - for k, v := range a.array { - array[k] = v - } - return array -} - -// Contains checks whether a value exists in the array. -func (a *SortedIntArray) Contains(value int) bool { - return a.Search(value) != -1 -} - -// Search searches array by `value`, returns the index of `value`, -// or returns -1 if not exists. -func (a *SortedIntArray) Search(value int) (index int) { - if i, r := a.binSearch(value, true); r == 0 { - return i - } - return -1 -} - -// Binary search. -// It returns the last compared index and the result. -// If `result` equals to 0, it means the value at `index` is equals to `value`. -// If `result` lesser than 0, it means the value at `index` is lesser than `value`. -// If `result` greater than 0, it means the value at `index` is greater than `value`. -func (a *SortedIntArray) binSearch(value int, lock bool) (index int, result int) { - if lock { - a.mu.RLock() - defer a.mu.RUnlock() - } - if len(a.array) == 0 { - return -1, -2 - } - min := 0 - max := len(a.array) - 1 - mid := 0 - cmp := -2 - for min <= max { - mid = min + int((max-min)/2) - cmp = a.getComparator()(value, a.array[mid]) - switch { - case cmp < 0: - max = mid - 1 - case cmp > 0: - min = mid + 1 - default: - return mid, cmp - } - } - return mid, cmp -} - -// SetUnique sets unique mark to the array, -// which means it does not contain any repeated items. -// It also do unique check, remove all repeated items. -func (a *SortedIntArray) SetUnique(unique bool) *SortedIntArray { - oldUnique := a.unique - a.unique = unique - if unique && oldUnique != unique { - a.Unique() - } - return a -} - -// Unique uniques the array, clear repeated items. -func (a *SortedIntArray) Unique() *SortedIntArray { - a.mu.Lock() - defer a.mu.Unlock() - if len(a.array) == 0 { - return a - } - i := 0 - for { - if i == len(a.array)-1 { - break - } - if a.getComparator()(a.array[i], a.array[i+1]) == 0 { - a.array = append(a.array[:i+1], a.array[i+1+1:]...) - } else { - i++ - } - } - return a -} - -// Clone returns a new array, which is a copy of current array. -func (a *SortedIntArray) Clone() (newArray *SortedIntArray) { - a.mu.RLock() - array := make([]int, len(a.array)) - copy(array, a.array) - a.mu.RUnlock() - return NewSortedIntArrayFrom(array, a.mu.IsSafe()) -} - -// Clear deletes all items of current array. -func (a *SortedIntArray) Clear() *SortedIntArray { - a.mu.Lock() - if len(a.array) > 0 { - a.array = make([]int, 0) - } - a.mu.Unlock() - return a -} - -// LockFunc locks writing by callback function `f`. -func (a *SortedIntArray) LockFunc(f func(array []int)) *SortedIntArray { - a.mu.Lock() - defer a.mu.Unlock() - f(a.array) - return a -} - -// RLockFunc locks reading by callback function `f`. -func (a *SortedIntArray) RLockFunc(f func(array []int)) *SortedIntArray { - a.mu.RLock() - defer a.mu.RUnlock() - f(a.array) - return a -} - -// Merge merges `array` into current array. -// The parameter `array` can be any garray or slice type. -// The difference between Merge and Append is Append supports only specified slice type, -// but Merge supports more parameter types. -func (a *SortedIntArray) Merge(array interface{}) *SortedIntArray { - return a.Add(gconv.Ints(array)...) -} - -// Chunk splits an array into multiple arrays, -// the size of each array is determined by `size`. -// The last chunk may contain less than size elements. -func (a *SortedIntArray) Chunk(size int) [][]int { - if size < 1 { - return nil - } - a.mu.RLock() - defer a.mu.RUnlock() - length := len(a.array) - chunks := int(math.Ceil(float64(length) / float64(size))) - var n [][]int - for i, end := 0, 0; chunks > 0; chunks-- { - end = (i + 1) * size - if end > length { - end = length - } - n = append(n, a.array[i*size:end]) - i++ - } - return n -} - -// Rand randomly returns one item from array(no deleting). -func (a *SortedIntArray) Rand() (value int, found bool) { - a.mu.RLock() - defer a.mu.RUnlock() - if len(a.array) == 0 { - return 0, false - } - return a.array[grand.Intn(len(a.array))], true -} - -// Rands randomly returns `size` items from array(no deleting). -func (a *SortedIntArray) Rands(size int) []int { - a.mu.RLock() - defer a.mu.RUnlock() - if size <= 0 || len(a.array) == 0 { - return nil - } - array := make([]int, size) - for i := 0; i < size; i++ { - array[i] = a.array[grand.Intn(len(a.array))] - } - return array -} - -// Join joins array elements with a string `glue`. -func (a *SortedIntArray) Join(glue string) string { - a.mu.RLock() - defer a.mu.RUnlock() - if len(a.array) == 0 { - return "" - } - buffer := bytes.NewBuffer(nil) - for k, v := range a.array { - buffer.WriteString(gconv.String(v)) - if k != len(a.array)-1 { - buffer.WriteString(glue) - } - } - return buffer.String() -} - -// CountValues counts the number of occurrences of all values in the array. -func (a *SortedIntArray) CountValues() map[int]int { - m := make(map[int]int) - a.mu.RLock() - defer a.mu.RUnlock() - for _, v := range a.array { - m[v]++ - } - return m -} - -// Iterator is alias of IteratorAsc. -func (a *SortedIntArray) Iterator(f func(k int, v int) bool) { - a.IteratorAsc(f) -} - -// IteratorAsc iterates the array readonly in ascending order with given callback function `f`. -// If `f` returns true, then it continues iterating; or false to stop. -func (a *SortedIntArray) IteratorAsc(f func(k int, v int) bool) { - a.mu.RLock() - defer a.mu.RUnlock() - for k, v := range a.array { - if !f(k, v) { - break - } - } -} - -// IteratorDesc iterates the array readonly in descending order with given callback function `f`. -// If `f` returns true, then it continues iterating; or false to stop. -func (a *SortedIntArray) IteratorDesc(f func(k int, v int) bool) { - a.mu.RLock() - defer a.mu.RUnlock() - for i := len(a.array) - 1; i >= 0; i-- { - if !f(i, a.array[i]) { - break - } - } -} - -// String returns current array as a string, which implements like json.Marshal does. -func (a *SortedIntArray) String() string { - if a == nil { - return "" - } - return "[" + a.Join(",") + "]" -} - -// MarshalJSON implements the interface MarshalJSON for json.Marshal. -// Note that do not use pointer as its receiver here. -func (a SortedIntArray) MarshalJSON() ([]byte, error) { - a.mu.RLock() - defer a.mu.RUnlock() - return json.Marshal(a.array) -} - -// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. -func (a *SortedIntArray) UnmarshalJSON(b []byte) error { - if a.comparator == nil { - a.array = make([]int, 0) - a.comparator = defaultComparatorInt - } - a.mu.Lock() - defer a.mu.Unlock() - if err := json.UnmarshalUseNumber(b, &a.array); err != nil { - return err - } - if a.array != nil { - sort.Ints(a.array) - } - return nil -} - -// UnmarshalValue is an interface implement which sets any type of value for array. -func (a *SortedIntArray) UnmarshalValue(value interface{}) (err error) { - if a.comparator == nil { - a.comparator = defaultComparatorInt - } - a.mu.Lock() - defer a.mu.Unlock() - switch value.(type) { - case string, []byte: - err = json.UnmarshalUseNumber(gconv.Bytes(value), &a.array) - default: - a.array = gconv.SliceInt(value) - } - if a.array != nil { - sort.Ints(a.array) - } - return err -} - -// Filter iterates array and filters elements using custom callback function. -// It removes the element from array if callback function `filter` returns true, -// it or else does nothing and continues iterating. -func (a *SortedIntArray) Filter(filter func(index int, value int) bool) *SortedIntArray { - a.mu.Lock() - defer a.mu.Unlock() - for i := 0; i < len(a.array); { - if filter(i, a.array[i]) { - a.array = append(a.array[:i], a.array[i+1:]...) - } else { - i++ - } - } - return a -} - -// FilterEmpty removes all zero value of the array. -func (a *SortedIntArray) FilterEmpty() *SortedIntArray { - a.mu.Lock() - defer a.mu.Unlock() - for i := 0; i < len(a.array); { - if a.array[i] == 0 { - a.array = append(a.array[:i], a.array[i+1:]...) - } else { - break - } - } - for i := len(a.array) - 1; i >= 0; { - if a.array[i] == 0 { - a.array = append(a.array[:i], a.array[i+1:]...) - } else { - break - } - } - return a -} - -// Walk applies a user supplied function `f` to every item of array. -func (a *SortedIntArray) Walk(f func(value int) int) *SortedIntArray { - a.mu.Lock() - defer a.mu.Unlock() - - // Keep the array always sorted. - defer quickSortInt(a.array, a.getComparator()) - - for i, v := range a.array { - a.array[i] = f(v) - } - return a -} - -// IsEmpty checks whether the array is empty. -func (a *SortedIntArray) IsEmpty() bool { - return a.Len() == 0 -} - -// getComparator returns the comparator if it's previously set, -// or else it returns a default comparator. -func (a *SortedIntArray) getComparator() func(a, b int) int { - if a.comparator == nil { - return defaultComparatorInt - } - return a.comparator -} - -// DeepCopy implements interface for deep copy of current type. -func (a *SortedIntArray) DeepCopy() interface{} { - if a == nil { - return nil - } - a.mu.RLock() - defer a.mu.RUnlock() - newSlice := make([]int, len(a.array)) - copy(newSlice, a.array) - return NewSortedIntArrayFrom(newSlice, a.mu.IsSafe()) -} diff --git a/vendor/github.com/gogf/gf/v2/container/garray/garray_sorted_str.go b/vendor/github.com/gogf/gf/v2/container/garray/garray_sorted_str.go deleted file mode 100644 index fa00b7c7..00000000 --- a/vendor/github.com/gogf/gf/v2/container/garray/garray_sorted_str.go +++ /dev/null @@ -1,800 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package garray - -import ( - "bytes" - "math" - "sort" - "strings" - - "github.com/gogf/gf/v2/internal/json" - "github.com/gogf/gf/v2/internal/rwmutex" - "github.com/gogf/gf/v2/text/gstr" - "github.com/gogf/gf/v2/util/gconv" - "github.com/gogf/gf/v2/util/grand" -) - -// SortedStrArray is a golang sorted string array with rich features. -// It is using increasing order in default, which can be changed by -// setting it a custom comparator. -// It contains a concurrent-safe/unsafe switch, which should be set -// when its initialization and cannot be changed then. -type SortedStrArray struct { - mu rwmutex.RWMutex - array []string - unique bool // Whether enable unique feature(false) - comparator func(a, b string) int // Comparison function(it returns -1: a < b; 0: a == b; 1: a > b) -} - -// NewSortedStrArray creates and returns an empty sorted array. -// The parameter `safe` is used to specify whether using array in concurrent-safety, -// which is false in default. -func NewSortedStrArray(safe ...bool) *SortedStrArray { - return NewSortedStrArraySize(0, safe...) -} - -// NewSortedStrArrayComparator creates and returns an empty sorted array with specified comparator. -// The parameter `safe` is used to specify whether using array in concurrent-safety which is false in default. -func NewSortedStrArrayComparator(comparator func(a, b string) int, safe ...bool) *SortedStrArray { - array := NewSortedStrArray(safe...) - array.comparator = comparator - return array -} - -// NewSortedStrArraySize create and returns an sorted array with given size and cap. -// The parameter `safe` is used to specify whether using array in concurrent-safety, -// which is false in default. -func NewSortedStrArraySize(cap int, safe ...bool) *SortedStrArray { - return &SortedStrArray{ - mu: rwmutex.Create(safe...), - array: make([]string, 0, cap), - comparator: defaultComparatorStr, - } -} - -// NewSortedStrArrayFrom creates and returns an sorted array with given slice `array`. -// The parameter `safe` is used to specify whether using array in concurrent-safety, -// which is false in default. -func NewSortedStrArrayFrom(array []string, safe ...bool) *SortedStrArray { - a := NewSortedStrArraySize(0, safe...) - a.array = array - quickSortStr(a.array, a.getComparator()) - return a -} - -// NewSortedStrArrayFromCopy creates and returns an sorted array from a copy of given slice `array`. -// The parameter `safe` is used to specify whether using array in concurrent-safety, -// which is false in default. -func NewSortedStrArrayFromCopy(array []string, safe ...bool) *SortedStrArray { - newArray := make([]string, len(array)) - copy(newArray, array) - return NewSortedStrArrayFrom(newArray, safe...) -} - -// SetArray sets the underlying slice array with the given `array`. -func (a *SortedStrArray) SetArray(array []string) *SortedStrArray { - a.mu.Lock() - defer a.mu.Unlock() - a.array = array - quickSortStr(a.array, a.getComparator()) - return a -} - -// At returns the value by the specified index. -// If the given `index` is out of range of the array, it returns an empty string. -func (a *SortedStrArray) At(index int) (value string) { - value, _ = a.Get(index) - return -} - -// Sort sorts the array in increasing order. -// The parameter `reverse` controls whether sort -// in increasing order(default) or decreasing order. -func (a *SortedStrArray) Sort() *SortedStrArray { - a.mu.Lock() - defer a.mu.Unlock() - quickSortStr(a.array, a.getComparator()) - return a -} - -// Add adds one or multiple values to sorted array, the array always keeps sorted. -// It's alias of function Append, see Append. -func (a *SortedStrArray) Add(values ...string) *SortedStrArray { - return a.Append(values...) -} - -// Append adds one or multiple values to sorted array, the array always keeps sorted. -func (a *SortedStrArray) Append(values ...string) *SortedStrArray { - if len(values) == 0 { - return a - } - a.mu.Lock() - defer a.mu.Unlock() - for _, value := range values { - index, cmp := a.binSearch(value, false) - if a.unique && cmp == 0 { - continue - } - if index < 0 { - a.array = append(a.array, value) - continue - } - if cmp > 0 { - index++ - } - rear := append([]string{}, a.array[index:]...) - a.array = append(a.array[0:index], value) - a.array = append(a.array, rear...) - } - return a -} - -// Get returns the value by the specified index. -// If the given `index` is out of range of the array, the `found` is false. -func (a *SortedStrArray) Get(index int) (value string, found bool) { - a.mu.RLock() - defer a.mu.RUnlock() - if index < 0 || index >= len(a.array) { - return "", false - } - return a.array[index], true -} - -// Remove removes an item by index. -// If the given `index` is out of range of the array, the `found` is false. -func (a *SortedStrArray) Remove(index int) (value string, found bool) { - a.mu.Lock() - defer a.mu.Unlock() - return a.doRemoveWithoutLock(index) -} - -// doRemoveWithoutLock removes an item by index without lock. -func (a *SortedStrArray) doRemoveWithoutLock(index int) (value string, found bool) { - if index < 0 || index >= len(a.array) { - return "", false - } - // Determine array boundaries when deleting to improve deletion efficiency. - if index == 0 { - value := a.array[0] - a.array = a.array[1:] - return value, true - } else if index == len(a.array)-1 { - value := a.array[index] - a.array = a.array[:index] - return value, true - } - // If it is a non-boundary delete, - // it will involve the creation of an array, - // then the deletion is less efficient. - value = a.array[index] - a.array = append(a.array[:index], a.array[index+1:]...) - return value, true -} - -// RemoveValue removes an item by value. -// It returns true if value is found in the array, or else false if not found. -func (a *SortedStrArray) RemoveValue(value string) bool { - a.mu.Lock() - defer a.mu.Unlock() - if i, r := a.binSearch(value, false); r == 0 { - _, res := a.doRemoveWithoutLock(i) - return res - } - return false -} - -// RemoveValues removes an item by `values`. -func (a *SortedStrArray) RemoveValues(values ...string) { - a.mu.Lock() - defer a.mu.Unlock() - for _, value := range values { - if i, r := a.binSearch(value, false); r == 0 { - a.doRemoveWithoutLock(i) - } - } -} - -// PopLeft pops and returns an item from the beginning of array. -// Note that if the array is empty, the `found` is false. -func (a *SortedStrArray) PopLeft() (value string, found bool) { - a.mu.Lock() - defer a.mu.Unlock() - if len(a.array) == 0 { - return "", false - } - value = a.array[0] - a.array = a.array[1:] - return value, true -} - -// PopRight pops and returns an item from the end of array. -// Note that if the array is empty, the `found` is false. -func (a *SortedStrArray) PopRight() (value string, found bool) { - a.mu.Lock() - defer a.mu.Unlock() - index := len(a.array) - 1 - if index < 0 { - return "", false - } - value = a.array[index] - a.array = a.array[:index] - return value, true -} - -// PopRand randomly pops and return an item out of array. -// Note that if the array is empty, the `found` is false. -func (a *SortedStrArray) PopRand() (value string, found bool) { - a.mu.Lock() - defer a.mu.Unlock() - return a.doRemoveWithoutLock(grand.Intn(len(a.array))) -} - -// PopRands randomly pops and returns `size` items out of array. -// If the given `size` is greater than size of the array, it returns all elements of the array. -// Note that if given `size` <= 0 or the array is empty, it returns nil. -func (a *SortedStrArray) PopRands(size int) []string { - a.mu.Lock() - defer a.mu.Unlock() - if size <= 0 || len(a.array) == 0 { - return nil - } - if size >= len(a.array) { - size = len(a.array) - } - array := make([]string, size) - for i := 0; i < size; i++ { - array[i], _ = a.doRemoveWithoutLock(grand.Intn(len(a.array))) - } - return array -} - -// PopLefts pops and returns `size` items from the beginning of array. -// If the given `size` is greater than size of the array, it returns all elements of the array. -// Note that if given `size` <= 0 or the array is empty, it returns nil. -func (a *SortedStrArray) PopLefts(size int) []string { - a.mu.Lock() - defer a.mu.Unlock() - if size <= 0 || len(a.array) == 0 { - return nil - } - if size >= len(a.array) { - array := a.array - a.array = a.array[:0] - return array - } - value := a.array[0:size] - a.array = a.array[size:] - return value -} - -// PopRights pops and returns `size` items from the end of array. -// If the given `size` is greater than size of the array, it returns all elements of the array. -// Note that if given `size` <= 0 or the array is empty, it returns nil. -func (a *SortedStrArray) PopRights(size int) []string { - a.mu.Lock() - defer a.mu.Unlock() - if size <= 0 || len(a.array) == 0 { - return nil - } - index := len(a.array) - size - if index <= 0 { - array := a.array - a.array = a.array[:0] - return array - } - value := a.array[index:] - a.array = a.array[:index] - return value -} - -// Range picks and returns items by range, like array[start:end]. -// Notice, if in concurrent-safe usage, it returns a copy of slice; -// else a pointer to the underlying data. -// -// If `end` is negative, then the offset will start from the end of array. -// If `end` is omitted, then the sequence will have everything from start up -// until the end of the array. -func (a *SortedStrArray) Range(start int, end ...int) []string { - a.mu.RLock() - defer a.mu.RUnlock() - offsetEnd := len(a.array) - if len(end) > 0 && end[0] < offsetEnd { - offsetEnd = end[0] - } - if start > offsetEnd { - return nil - } - if start < 0 { - start = 0 - } - array := ([]string)(nil) - if a.mu.IsSafe() { - array = make([]string, offsetEnd-start) - copy(array, a.array[start:offsetEnd]) - } else { - array = a.array[start:offsetEnd] - } - return array -} - -// SubSlice returns a slice of elements from the array as specified -// by the `offset` and `size` parameters. -// If in concurrent safe usage, it returns a copy of the slice; else a pointer. -// -// If offset is non-negative, the sequence will start at that offset in the array. -// If offset is negative, the sequence will start that far from the end of the array. -// -// If length is given and is positive, then the sequence will have up to that many elements in it. -// If the array is shorter than the length, then only the available array elements will be present. -// If length is given and is negative then the sequence will stop that many elements from the end of the array. -// If it is omitted, then the sequence will have everything from offset up until the end of the array. -// -// Any possibility crossing the left border of array, it will fail. -func (a *SortedStrArray) SubSlice(offset int, length ...int) []string { - a.mu.RLock() - defer a.mu.RUnlock() - size := len(a.array) - if len(length) > 0 { - size = length[0] - } - if offset > len(a.array) { - return nil - } - if offset < 0 { - offset = len(a.array) + offset - if offset < 0 { - return nil - } - } - if size < 0 { - offset += size - size = -size - if offset < 0 { - return nil - } - } - end := offset + size - if end > len(a.array) { - end = len(a.array) - size = len(a.array) - offset - } - if a.mu.IsSafe() { - s := make([]string, size) - copy(s, a.array[offset:]) - return s - } else { - return a.array[offset:end] - } -} - -// Sum returns the sum of values in an array. -func (a *SortedStrArray) Sum() (sum int) { - a.mu.RLock() - defer a.mu.RUnlock() - for _, v := range a.array { - sum += gconv.Int(v) - } - return -} - -// Len returns the length of array. -func (a *SortedStrArray) Len() int { - a.mu.RLock() - length := len(a.array) - a.mu.RUnlock() - return length -} - -// Slice returns the underlying data of array. -// Note that, if it's in concurrent-safe usage, it returns a copy of underlying data, -// or else a pointer to the underlying data. -func (a *SortedStrArray) Slice() []string { - array := ([]string)(nil) - if a.mu.IsSafe() { - a.mu.RLock() - defer a.mu.RUnlock() - array = make([]string, len(a.array)) - copy(array, a.array) - } else { - array = a.array - } - return array -} - -// Interfaces returns current array as []interface{}. -func (a *SortedStrArray) Interfaces() []interface{} { - a.mu.RLock() - defer a.mu.RUnlock() - array := make([]interface{}, len(a.array)) - for k, v := range a.array { - array[k] = v - } - return array -} - -// Contains checks whether a value exists in the array. -func (a *SortedStrArray) Contains(value string) bool { - return a.Search(value) != -1 -} - -// ContainsI checks whether a value exists in the array with case-insensitively. -// Note that it internally iterates the whole array to do the comparison with case-insensitively. -func (a *SortedStrArray) ContainsI(value string) bool { - a.mu.RLock() - defer a.mu.RUnlock() - if len(a.array) == 0 { - return false - } - for _, v := range a.array { - if strings.EqualFold(v, value) { - return true - } - } - return false -} - -// Search searches array by `value`, returns the index of `value`, -// or returns -1 if not exists. -func (a *SortedStrArray) Search(value string) (index int) { - if i, r := a.binSearch(value, true); r == 0 { - return i - } - return -1 -} - -// Binary search. -// It returns the last compared index and the result. -// If `result` equals to 0, it means the value at `index` is equals to `value`. -// If `result` lesser than 0, it means the value at `index` is lesser than `value`. -// If `result` greater than 0, it means the value at `index` is greater than `value`. -func (a *SortedStrArray) binSearch(value string, lock bool) (index int, result int) { - if lock { - a.mu.RLock() - defer a.mu.RUnlock() - } - if len(a.array) == 0 { - return -1, -2 - } - min := 0 - max := len(a.array) - 1 - mid := 0 - cmp := -2 - for min <= max { - mid = min + int((max-min)/2) - cmp = a.getComparator()(value, a.array[mid]) - switch { - case cmp < 0: - max = mid - 1 - case cmp > 0: - min = mid + 1 - default: - return mid, cmp - } - } - return mid, cmp -} - -// SetUnique sets unique mark to the array, -// which means it does not contain any repeated items. -// It also do unique check, remove all repeated items. -func (a *SortedStrArray) SetUnique(unique bool) *SortedStrArray { - oldUnique := a.unique - a.unique = unique - if unique && oldUnique != unique { - a.Unique() - } - return a -} - -// Unique uniques the array, clear repeated items. -func (a *SortedStrArray) Unique() *SortedStrArray { - a.mu.Lock() - defer a.mu.Unlock() - if len(a.array) == 0 { - return a - } - i := 0 - for { - if i == len(a.array)-1 { - break - } - if a.getComparator()(a.array[i], a.array[i+1]) == 0 { - a.array = append(a.array[:i+1], a.array[i+1+1:]...) - } else { - i++ - } - } - return a -} - -// Clone returns a new array, which is a copy of current array. -func (a *SortedStrArray) Clone() (newArray *SortedStrArray) { - a.mu.RLock() - array := make([]string, len(a.array)) - copy(array, a.array) - a.mu.RUnlock() - return NewSortedStrArrayFrom(array, a.mu.IsSafe()) -} - -// Clear deletes all items of current array. -func (a *SortedStrArray) Clear() *SortedStrArray { - a.mu.Lock() - if len(a.array) > 0 { - a.array = make([]string, 0) - } - a.mu.Unlock() - return a -} - -// LockFunc locks writing by callback function `f`. -func (a *SortedStrArray) LockFunc(f func(array []string)) *SortedStrArray { - a.mu.Lock() - defer a.mu.Unlock() - f(a.array) - return a -} - -// RLockFunc locks reading by callback function `f`. -func (a *SortedStrArray) RLockFunc(f func(array []string)) *SortedStrArray { - a.mu.RLock() - defer a.mu.RUnlock() - f(a.array) - return a -} - -// Merge merges `array` into current array. -// The parameter `array` can be any garray or slice type. -// The difference between Merge and Append is Append supports only specified slice type, -// but Merge supports more parameter types. -func (a *SortedStrArray) Merge(array interface{}) *SortedStrArray { - return a.Add(gconv.Strings(array)...) -} - -// Chunk splits an array into multiple arrays, -// the size of each array is determined by `size`. -// The last chunk may contain less than size elements. -func (a *SortedStrArray) Chunk(size int) [][]string { - if size < 1 { - return nil - } - a.mu.RLock() - defer a.mu.RUnlock() - length := len(a.array) - chunks := int(math.Ceil(float64(length) / float64(size))) - var n [][]string - for i, end := 0, 0; chunks > 0; chunks-- { - end = (i + 1) * size - if end > length { - end = length - } - n = append(n, a.array[i*size:end]) - i++ - } - return n -} - -// Rand randomly returns one item from array(no deleting). -func (a *SortedStrArray) Rand() (value string, found bool) { - a.mu.RLock() - defer a.mu.RUnlock() - if len(a.array) == 0 { - return "", false - } - return a.array[grand.Intn(len(a.array))], true -} - -// Rands randomly returns `size` items from array(no deleting). -func (a *SortedStrArray) Rands(size int) []string { - a.mu.RLock() - defer a.mu.RUnlock() - if size <= 0 || len(a.array) == 0 { - return nil - } - array := make([]string, size) - for i := 0; i < size; i++ { - array[i] = a.array[grand.Intn(len(a.array))] - } - return array -} - -// Join joins array elements with a string `glue`. -func (a *SortedStrArray) Join(glue string) string { - a.mu.RLock() - defer a.mu.RUnlock() - if len(a.array) == 0 { - return "" - } - buffer := bytes.NewBuffer(nil) - for k, v := range a.array { - buffer.WriteString(v) - if k != len(a.array)-1 { - buffer.WriteString(glue) - } - } - return buffer.String() -} - -// CountValues counts the number of occurrences of all values in the array. -func (a *SortedStrArray) CountValues() map[string]int { - m := make(map[string]int) - a.mu.RLock() - defer a.mu.RUnlock() - for _, v := range a.array { - m[v]++ - } - return m -} - -// Iterator is alias of IteratorAsc. -func (a *SortedStrArray) Iterator(f func(k int, v string) bool) { - a.IteratorAsc(f) -} - -// IteratorAsc iterates the array readonly in ascending order with given callback function `f`. -// If `f` returns true, then it continues iterating; or false to stop. -func (a *SortedStrArray) IteratorAsc(f func(k int, v string) bool) { - a.mu.RLock() - defer a.mu.RUnlock() - for k, v := range a.array { - if !f(k, v) { - break - } - } -} - -// IteratorDesc iterates the array readonly in descending order with given callback function `f`. -// If `f` returns true, then it continues iterating; or false to stop. -func (a *SortedStrArray) IteratorDesc(f func(k int, v string) bool) { - a.mu.RLock() - defer a.mu.RUnlock() - for i := len(a.array) - 1; i >= 0; i-- { - if !f(i, a.array[i]) { - break - } - } -} - -// String returns current array as a string, which implements like json.Marshal does. -func (a *SortedStrArray) String() string { - if a == nil { - return "" - } - a.mu.RLock() - defer a.mu.RUnlock() - buffer := bytes.NewBuffer(nil) - buffer.WriteByte('[') - for k, v := range a.array { - buffer.WriteString(`"` + gstr.QuoteMeta(v, `"\`) + `"`) - if k != len(a.array)-1 { - buffer.WriteByte(',') - } - } - buffer.WriteByte(']') - return buffer.String() -} - -// MarshalJSON implements the interface MarshalJSON for json.Marshal. -// Note that do not use pointer as its receiver here. -func (a SortedStrArray) MarshalJSON() ([]byte, error) { - a.mu.RLock() - defer a.mu.RUnlock() - return json.Marshal(a.array) -} - -// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. -func (a *SortedStrArray) UnmarshalJSON(b []byte) error { - if a.comparator == nil { - a.array = make([]string, 0) - a.comparator = defaultComparatorStr - } - a.mu.Lock() - defer a.mu.Unlock() - if err := json.UnmarshalUseNumber(b, &a.array); err != nil { - return err - } - if a.array != nil { - sort.Strings(a.array) - } - return nil -} - -// UnmarshalValue is an interface implement which sets any type of value for array. -func (a *SortedStrArray) UnmarshalValue(value interface{}) (err error) { - if a.comparator == nil { - a.comparator = defaultComparatorStr - } - a.mu.Lock() - defer a.mu.Unlock() - switch value.(type) { - case string, []byte: - err = json.UnmarshalUseNumber(gconv.Bytes(value), &a.array) - default: - a.array = gconv.SliceStr(value) - } - if a.array != nil { - sort.Strings(a.array) - } - return err -} - -// Filter iterates array and filters elements using custom callback function. -// It removes the element from array if callback function `filter` returns true, -// it or else does nothing and continues iterating. -func (a *SortedStrArray) Filter(filter func(index int, value string) bool) *SortedStrArray { - a.mu.Lock() - defer a.mu.Unlock() - for i := 0; i < len(a.array); { - if filter(i, a.array[i]) { - a.array = append(a.array[:i], a.array[i+1:]...) - } else { - i++ - } - } - return a -} - -// FilterEmpty removes all empty string value of the array. -func (a *SortedStrArray) FilterEmpty() *SortedStrArray { - a.mu.Lock() - defer a.mu.Unlock() - for i := 0; i < len(a.array); { - if a.array[i] == "" { - a.array = append(a.array[:i], a.array[i+1:]...) - } else { - break - } - } - for i := len(a.array) - 1; i >= 0; { - if a.array[i] == "" { - a.array = append(a.array[:i], a.array[i+1:]...) - } else { - break - } - } - return a -} - -// Walk applies a user supplied function `f` to every item of array. -func (a *SortedStrArray) Walk(f func(value string) string) *SortedStrArray { - a.mu.Lock() - defer a.mu.Unlock() - - // Keep the array always sorted. - defer quickSortStr(a.array, a.getComparator()) - - for i, v := range a.array { - a.array[i] = f(v) - } - return a -} - -// IsEmpty checks whether the array is empty. -func (a *SortedStrArray) IsEmpty() bool { - return a.Len() == 0 -} - -// getComparator returns the comparator if it's previously set, -// or else it returns a default comparator. -func (a *SortedStrArray) getComparator() func(a, b string) int { - if a.comparator == nil { - return defaultComparatorStr - } - return a.comparator -} - -// DeepCopy implements interface for deep copy of current type. -func (a *SortedStrArray) DeepCopy() interface{} { - if a == nil { - return nil - } - a.mu.RLock() - defer a.mu.RUnlock() - newSlice := make([]string, len(a.array)) - copy(newSlice, a.array) - return NewSortedStrArrayFrom(newSlice, a.mu.IsSafe()) -} diff --git a/vendor/github.com/gogf/gf/v2/container/glist/glist.go b/vendor/github.com/gogf/gf/v2/container/glist/glist.go deleted file mode 100644 index 1c6212ea..00000000 --- a/vendor/github.com/gogf/gf/v2/container/glist/glist.go +++ /dev/null @@ -1,572 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with l file, -// You can obtain one at https://github.com/gogf/gf. -// - -// Package glist provides most commonly used doubly linked list container which also supports concurrent-safe/unsafe switch feature. -package glist - -import ( - "bytes" - "container/list" - - "github.com/gogf/gf/v2/internal/deepcopy" - "github.com/gogf/gf/v2/internal/json" - "github.com/gogf/gf/v2/internal/rwmutex" - "github.com/gogf/gf/v2/util/gconv" -) - -type ( - // List is a doubly linked list containing a concurrent-safe/unsafe switch. - // The switch should be set when its initialization and cannot be changed then. - List struct { - mu rwmutex.RWMutex - list *list.List - } - // Element the item type of the list. - Element = list.Element -) - -// New creates and returns a new empty doubly linked list. -func New(safe ...bool) *List { - return &List{ - mu: rwmutex.Create(safe...), - list: list.New(), - } -} - -// NewFrom creates and returns a list from a copy of given slice `array`. -// The parameter `safe` is used to specify whether using list in concurrent-safety, -// which is false in default. -func NewFrom(array []interface{}, safe ...bool) *List { - l := list.New() - for _, v := range array { - l.PushBack(v) - } - return &List{ - mu: rwmutex.Create(safe...), - list: l, - } -} - -// PushFront inserts a new element `e` with value `v` at the front of list `l` and returns `e`. -func (l *List) PushFront(v interface{}) (e *Element) { - l.mu.Lock() - if l.list == nil { - l.list = list.New() - } - e = l.list.PushFront(v) - l.mu.Unlock() - return -} - -// PushBack inserts a new element `e` with value `v` at the back of list `l` and returns `e`. -func (l *List) PushBack(v interface{}) (e *Element) { - l.mu.Lock() - if l.list == nil { - l.list = list.New() - } - e = l.list.PushBack(v) - l.mu.Unlock() - return -} - -// PushFronts inserts multiple new elements with values `values` at the front of list `l`. -func (l *List) PushFronts(values []interface{}) { - l.mu.Lock() - if l.list == nil { - l.list = list.New() - } - for _, v := range values { - l.list.PushFront(v) - } - l.mu.Unlock() -} - -// PushBacks inserts multiple new elements with values `values` at the back of list `l`. -func (l *List) PushBacks(values []interface{}) { - l.mu.Lock() - if l.list == nil { - l.list = list.New() - } - for _, v := range values { - l.list.PushBack(v) - } - l.mu.Unlock() -} - -// PopBack removes the element from back of `l` and returns the value of the element. -func (l *List) PopBack() (value interface{}) { - l.mu.Lock() - defer l.mu.Unlock() - if l.list == nil { - l.list = list.New() - return - } - if e := l.list.Back(); e != nil { - value = l.list.Remove(e) - } - return -} - -// PopFront removes the element from front of `l` and returns the value of the element. -func (l *List) PopFront() (value interface{}) { - l.mu.Lock() - defer l.mu.Unlock() - if l.list == nil { - l.list = list.New() - return - } - if e := l.list.Front(); e != nil { - value = l.list.Remove(e) - } - return -} - -// PopBacks removes `max` elements from back of `l` -// and returns values of the removed elements as slice. -func (l *List) PopBacks(max int) (values []interface{}) { - l.mu.Lock() - defer l.mu.Unlock() - if l.list == nil { - l.list = list.New() - return - } - length := l.list.Len() - if length > 0 { - if max > 0 && max < length { - length = max - } - values = make([]interface{}, length) - for i := 0; i < length; i++ { - values[i] = l.list.Remove(l.list.Back()) - } - } - return -} - -// PopFronts removes `max` elements from front of `l` -// and returns values of the removed elements as slice. -func (l *List) PopFronts(max int) (values []interface{}) { - l.mu.Lock() - defer l.mu.Unlock() - if l.list == nil { - l.list = list.New() - return - } - length := l.list.Len() - if length > 0 { - if max > 0 && max < length { - length = max - } - values = make([]interface{}, length) - for i := 0; i < length; i++ { - values[i] = l.list.Remove(l.list.Front()) - } - } - return -} - -// PopBackAll removes all elements from back of `l` -// and returns values of the removed elements as slice. -func (l *List) PopBackAll() []interface{} { - return l.PopBacks(-1) -} - -// PopFrontAll removes all elements from front of `l` -// and returns values of the removed elements as slice. -func (l *List) PopFrontAll() []interface{} { - return l.PopFronts(-1) -} - -// FrontAll copies and returns values of all elements from front of `l` as slice. -func (l *List) FrontAll() (values []interface{}) { - l.mu.RLock() - defer l.mu.RUnlock() - if l.list == nil { - return - } - length := l.list.Len() - if length > 0 { - values = make([]interface{}, length) - for i, e := 0, l.list.Front(); i < length; i, e = i+1, e.Next() { - values[i] = e.Value - } - } - return -} - -// BackAll copies and returns values of all elements from back of `l` as slice. -func (l *List) BackAll() (values []interface{}) { - l.mu.RLock() - defer l.mu.RUnlock() - if l.list == nil { - return - } - length := l.list.Len() - if length > 0 { - values = make([]interface{}, length) - for i, e := 0, l.list.Back(); i < length; i, e = i+1, e.Prev() { - values[i] = e.Value - } - } - return -} - -// FrontValue returns value of the first element of `l` or nil if the list is empty. -func (l *List) FrontValue() (value interface{}) { - l.mu.RLock() - defer l.mu.RUnlock() - if l.list == nil { - return - } - if e := l.list.Front(); e != nil { - value = e.Value - } - return -} - -// BackValue returns value of the last element of `l` or nil if the list is empty. -func (l *List) BackValue() (value interface{}) { - l.mu.RLock() - defer l.mu.RUnlock() - if l.list == nil { - return - } - if e := l.list.Back(); e != nil { - value = e.Value - } - return -} - -// Front returns the first element of list `l` or nil if the list is empty. -func (l *List) Front() (e *Element) { - l.mu.RLock() - defer l.mu.RUnlock() - if l.list == nil { - return - } - e = l.list.Front() - return -} - -// Back returns the last element of list `l` or nil if the list is empty. -func (l *List) Back() (e *Element) { - l.mu.RLock() - defer l.mu.RUnlock() - if l.list == nil { - return - } - e = l.list.Back() - return -} - -// Len returns the number of elements of list `l`. -// The complexity is O(1). -func (l *List) Len() (length int) { - l.mu.RLock() - defer l.mu.RUnlock() - if l.list == nil { - return - } - length = l.list.Len() - return -} - -// Size is alias of Len. -func (l *List) Size() int { - return l.Len() -} - -// MoveBefore moves element `e` to its new position before `p`. -// If `e` or `p` is not an element of `l`, or `e` == `p`, the list is not modified. -// The element and `p` must not be nil. -func (l *List) MoveBefore(e, p *Element) { - l.mu.Lock() - defer l.mu.Unlock() - if l.list == nil { - l.list = list.New() - } - l.list.MoveBefore(e, p) -} - -// MoveAfter moves element `e` to its new position after `p`. -// If `e` or `p` is not an element of `l`, or `e` == `p`, the list is not modified. -// The element and `p` must not be nil. -func (l *List) MoveAfter(e, p *Element) { - l.mu.Lock() - defer l.mu.Unlock() - if l.list == nil { - l.list = list.New() - } - l.list.MoveAfter(e, p) -} - -// MoveToFront moves element `e` to the front of list `l`. -// If `e` is not an element of `l`, the list is not modified. -// The element must not be nil. -func (l *List) MoveToFront(e *Element) { - l.mu.Lock() - defer l.mu.Unlock() - if l.list == nil { - l.list = list.New() - } - l.list.MoveToFront(e) -} - -// MoveToBack moves element `e` to the back of list `l`. -// If `e` is not an element of `l`, the list is not modified. -// The element must not be nil. -func (l *List) MoveToBack(e *Element) { - l.mu.Lock() - defer l.mu.Unlock() - if l.list == nil { - l.list = list.New() - } - l.list.MoveToBack(e) -} - -// PushBackList inserts a copy of an other list at the back of list `l`. -// The lists `l` and `other` may be the same, but they must not be nil. -func (l *List) PushBackList(other *List) { - if l != other { - other.mu.RLock() - defer other.mu.RUnlock() - } - l.mu.Lock() - defer l.mu.Unlock() - if l.list == nil { - l.list = list.New() - } - l.list.PushBackList(other.list) -} - -// PushFrontList inserts a copy of an other list at the front of list `l`. -// The lists `l` and `other` may be the same, but they must not be nil. -func (l *List) PushFrontList(other *List) { - if l != other { - other.mu.RLock() - defer other.mu.RUnlock() - } - l.mu.Lock() - defer l.mu.Unlock() - if l.list == nil { - l.list = list.New() - } - l.list.PushFrontList(other.list) -} - -// InsertAfter inserts a new element `e` with value `v` immediately after `p` and returns `e`. -// If `p` is not an element of `l`, the list is not modified. -// The `p` must not be nil. -func (l *List) InsertAfter(p *Element, v interface{}) (e *Element) { - l.mu.Lock() - defer l.mu.Unlock() - if l.list == nil { - l.list = list.New() - } - e = l.list.InsertAfter(v, p) - return -} - -// InsertBefore inserts a new element `e` with value `v` immediately before `p` and returns `e`. -// If `p` is not an element of `l`, the list is not modified. -// The `p` must not be nil. -func (l *List) InsertBefore(p *Element, v interface{}) (e *Element) { - l.mu.Lock() - defer l.mu.Unlock() - if l.list == nil { - l.list = list.New() - } - e = l.list.InsertBefore(v, p) - return -} - -// Remove removes `e` from `l` if `e` is an element of list `l`. -// It returns the element value e.Value. -// The element must not be nil. -func (l *List) Remove(e *Element) (value interface{}) { - l.mu.Lock() - defer l.mu.Unlock() - if l.list == nil { - l.list = list.New() - } - value = l.list.Remove(e) - return -} - -// Removes removes multiple elements `es` from `l` if `es` are elements of list `l`. -func (l *List) Removes(es []*Element) { - l.mu.Lock() - defer l.mu.Unlock() - if l.list == nil { - l.list = list.New() - } - for _, e := range es { - l.list.Remove(e) - } -} - -// RemoveAll removes all elements from list `l`. -func (l *List) RemoveAll() { - l.mu.Lock() - l.list = list.New() - l.mu.Unlock() -} - -// Clear is alias of RemoveAll. -func (l *List) Clear() { - l.RemoveAll() -} - -// RLockFunc locks reading with given callback function `f` within RWMutex.RLock. -func (l *List) RLockFunc(f func(list *list.List)) { - l.mu.RLock() - defer l.mu.RUnlock() - if l.list != nil { - f(l.list) - } -} - -// LockFunc locks writing with given callback function `f` within RWMutex.Lock. -func (l *List) LockFunc(f func(list *list.List)) { - l.mu.Lock() - defer l.mu.Unlock() - if l.list == nil { - l.list = list.New() - } - f(l.list) -} - -// Iterator is alias of IteratorAsc. -func (l *List) Iterator(f func(e *Element) bool) { - l.IteratorAsc(f) -} - -// IteratorAsc iterates the list readonly in ascending order with given callback function `f`. -// If `f` returns true, then it continues iterating; or false to stop. -func (l *List) IteratorAsc(f func(e *Element) bool) { - l.mu.RLock() - defer l.mu.RUnlock() - if l.list == nil { - return - } - length := l.list.Len() - if length > 0 { - for i, e := 0, l.list.Front(); i < length; i, e = i+1, e.Next() { - if !f(e) { - break - } - } - } -} - -// IteratorDesc iterates the list readonly in descending order with given callback function `f`. -// If `f` returns true, then it continues iterating; or false to stop. -func (l *List) IteratorDesc(f func(e *Element) bool) { - l.mu.RLock() - defer l.mu.RUnlock() - if l.list == nil { - return - } - length := l.list.Len() - if length > 0 { - for i, e := 0, l.list.Back(); i < length; i, e = i+1, e.Prev() { - if !f(e) { - break - } - } - } -} - -// Join joins list elements with a string `glue`. -func (l *List) Join(glue string) string { - l.mu.RLock() - defer l.mu.RUnlock() - if l.list == nil { - return "" - } - buffer := bytes.NewBuffer(nil) - length := l.list.Len() - if length > 0 { - for i, e := 0, l.list.Front(); i < length; i, e = i+1, e.Next() { - buffer.WriteString(gconv.String(e.Value)) - if i != length-1 { - buffer.WriteString(glue) - } - } - } - return buffer.String() -} - -// String returns current list as a string. -func (l *List) String() string { - if l == nil { - return "" - } - return "[" + l.Join(",") + "]" -} - -// MarshalJSON implements the interface MarshalJSON for json.Marshal. -func (l List) MarshalJSON() ([]byte, error) { - return json.Marshal(l.FrontAll()) -} - -// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. -func (l *List) UnmarshalJSON(b []byte) error { - l.mu.Lock() - defer l.mu.Unlock() - if l.list == nil { - l.list = list.New() - } - var array []interface{} - if err := json.UnmarshalUseNumber(b, &array); err != nil { - return err - } - l.PushBacks(array) - return nil -} - -// UnmarshalValue is an interface implement which sets any type of value for list. -func (l *List) UnmarshalValue(value interface{}) (err error) { - l.mu.Lock() - defer l.mu.Unlock() - if l.list == nil { - l.list = list.New() - } - var array []interface{} - switch value.(type) { - case string, []byte: - err = json.UnmarshalUseNumber(gconv.Bytes(value), &array) - default: - array = gconv.SliceAny(value) - } - l.PushBacks(array) - return err -} - -// DeepCopy implements interface for deep copy of current type. -func (l *List) DeepCopy() interface{} { - if l == nil { - return nil - } - - l.mu.RLock() - defer l.mu.RUnlock() - - if l.list == nil { - return nil - } - var ( - length = l.list.Len() - values = make([]interface{}, length) - ) - if length > 0 { - for i, e := 0, l.list.Front(); i < length; i, e = i+1, e.Next() { - values[i] = deepcopy.Copy(e.Value) - } - } - return NewFrom(values, l.mu.IsSafe()) -} diff --git a/vendor/github.com/gogf/gf/v2/container/gmap/gmap.go b/vendor/github.com/gogf/gf/v2/container/gmap/gmap.go deleted file mode 100644 index 4cff99d3..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gmap/gmap.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with gm file, -// You can obtain one at https://github.com/gogf/gf. - -// Package gmap provides most commonly used map container which also support concurrent-safe/unsafe switch feature. -package gmap - -type ( - Map = AnyAnyMap // Map is alias of AnyAnyMap. - HashMap = AnyAnyMap // HashMap is alias of AnyAnyMap. -) - -// New creates and returns an empty hash map. -// The parameter `safe` is used to specify whether using map in concurrent-safety, -// which is false in default. -func New(safe ...bool) *Map { - return NewAnyAnyMap(safe...) -} - -// NewFrom creates and returns a hash map from given map `data`. -// Note that, the param `data` map will be set as the underlying data map(no deep copy), -// there might be some concurrent-safe issues when changing the map outside. -// The parameter `safe` is used to specify whether using tree in concurrent-safety, -// which is false in default. -func NewFrom(data map[interface{}]interface{}, safe ...bool) *Map { - return NewAnyAnyMapFrom(data, safe...) -} - -// NewHashMap creates and returns an empty hash map. -// The parameter `safe` is used to specify whether using map in concurrent-safety, -// which is false in default. -func NewHashMap(safe ...bool) *Map { - return NewAnyAnyMap(safe...) -} - -// NewHashMapFrom creates and returns a hash map from given map `data`. -// Note that, the param `data` map will be set as the underlying data map(no deep copy), -// there might be some concurrent-safe issues when changing the map outside. -// The parameter `safe` is used to specify whether using tree in concurrent-safety, -// which is false in default. -func NewHashMapFrom(data map[interface{}]interface{}, safe ...bool) *Map { - return NewAnyAnyMapFrom(data, safe...) -} diff --git a/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_any_any_map.go b/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_any_any_map.go deleted file mode 100644 index 37da5173..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_any_any_map.go +++ /dev/null @@ -1,563 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with gm file, -// You can obtain one at https://github.com/gogf/gf. - -package gmap - -import ( - "github.com/gogf/gf/v2/container/gvar" - "github.com/gogf/gf/v2/internal/deepcopy" - "github.com/gogf/gf/v2/internal/empty" - "github.com/gogf/gf/v2/internal/json" - "github.com/gogf/gf/v2/internal/rwmutex" - "github.com/gogf/gf/v2/util/gconv" - "reflect" -) - -// AnyAnyMap wraps map type `map[interface{}]interface{}` and provides more map features. -type AnyAnyMap struct { - mu rwmutex.RWMutex - data map[interface{}]interface{} -} - -// NewAnyAnyMap creates and returns an empty hash map. -// The parameter `safe` is used to specify whether using map in concurrent-safety, -// which is false in default. -func NewAnyAnyMap(safe ...bool) *AnyAnyMap { - return &AnyAnyMap{ - mu: rwmutex.Create(safe...), - data: make(map[interface{}]interface{}), - } -} - -// NewAnyAnyMapFrom creates and returns a hash map from given map `data`. -// Note that, the param `data` map will be set as the underlying data map(no deep copy), -// there might be some concurrent-safe issues when changing the map outside. -func NewAnyAnyMapFrom(data map[interface{}]interface{}, safe ...bool) *AnyAnyMap { - return &AnyAnyMap{ - mu: rwmutex.Create(safe...), - data: data, - } -} - -// Iterator iterates the hash map readonly with custom callback function `f`. -// If `f` returns true, then it continues iterating; or false to stop. -func (m *AnyAnyMap) Iterator(f func(k interface{}, v interface{}) bool) { - m.mu.RLock() - defer m.mu.RUnlock() - for k, v := range m.data { - if !f(k, v) { - break - } - } -} - -// Clone returns a new hash map with copy of current map data. -func (m *AnyAnyMap) Clone(safe ...bool) *AnyAnyMap { - return NewFrom(m.MapCopy(), safe...) -} - -// Map returns the underlying data map. -// Note that, if it's in concurrent-safe usage, it returns a copy of underlying data, -// or else a pointer to the underlying data. -func (m *AnyAnyMap) Map() map[interface{}]interface{} { - m.mu.RLock() - defer m.mu.RUnlock() - if !m.mu.IsSafe() { - return m.data - } - data := make(map[interface{}]interface{}, len(m.data)) - for k, v := range m.data { - data[k] = v - } - return data -} - -// MapCopy returns a shallow copy of the underlying data of the hash map. -func (m *AnyAnyMap) MapCopy() map[interface{}]interface{} { - m.mu.RLock() - defer m.mu.RUnlock() - data := make(map[interface{}]interface{}, len(m.data)) - for k, v := range m.data { - data[k] = v - } - return data -} - -// MapStrAny returns a copy of the underlying data of the map as map[string]interface{}. -func (m *AnyAnyMap) MapStrAny() map[string]interface{} { - m.mu.RLock() - defer m.mu.RUnlock() - data := make(map[string]interface{}, len(m.data)) - for k, v := range m.data { - data[gconv.String(k)] = v - } - return data -} - -// FilterEmpty deletes all key-value pair of which the value is empty. -// Values like: 0, nil, false, "", len(slice/map/chan) == 0 are considered empty. -func (m *AnyAnyMap) FilterEmpty() { - m.mu.Lock() - defer m.mu.Unlock() - for k, v := range m.data { - if empty.IsEmpty(v) { - delete(m.data, k) - } - } -} - -// FilterNil deletes all key-value pair of which the value is nil. -func (m *AnyAnyMap) FilterNil() { - m.mu.Lock() - defer m.mu.Unlock() - for k, v := range m.data { - if empty.IsNil(v) { - delete(m.data, k) - } - } -} - -// Set sets key-value to the hash map. -func (m *AnyAnyMap) Set(key interface{}, value interface{}) { - m.mu.Lock() - if m.data == nil { - m.data = make(map[interface{}]interface{}) - } - m.data[key] = value - m.mu.Unlock() -} - -// Sets batch sets key-values to the hash map. -func (m *AnyAnyMap) Sets(data map[interface{}]interface{}) { - m.mu.Lock() - if m.data == nil { - m.data = data - } else { - for k, v := range data { - m.data[k] = v - } - } - m.mu.Unlock() -} - -// Search searches the map with given `key`. -// Second return parameter `found` is true if key was found, otherwise false. -func (m *AnyAnyMap) Search(key interface{}) (value interface{}, found bool) { - m.mu.RLock() - if m.data != nil { - value, found = m.data[key] - } - m.mu.RUnlock() - return -} - -// Get returns the value by given `key`. -func (m *AnyAnyMap) Get(key interface{}) (value interface{}) { - m.mu.RLock() - if m.data != nil { - value = m.data[key] - } - m.mu.RUnlock() - return -} - -// Pop retrieves and deletes an item from the map. -func (m *AnyAnyMap) Pop() (key, value interface{}) { - m.mu.Lock() - defer m.mu.Unlock() - for key, value = range m.data { - delete(m.data, key) - return - } - return -} - -// Pops retrieves and deletes `size` items from the map. -// It returns all items if size == -1. -func (m *AnyAnyMap) Pops(size int) map[interface{}]interface{} { - m.mu.Lock() - defer m.mu.Unlock() - if size > len(m.data) || size == -1 { - size = len(m.data) - } - if size == 0 { - return nil - } - var ( - index = 0 - newMap = make(map[interface{}]interface{}, size) - ) - for k, v := range m.data { - delete(m.data, k) - newMap[k] = v - index++ - if index == size { - break - } - } - return newMap -} - -// doSetWithLockCheck checks whether value of the key exists with mutex.Lock, -// if not exists, set value to the map with given `key`, -// or else just return the existing value. -// -// When setting value, if `value` is type of `func() interface {}`, -// it will be executed with mutex.Lock of the hash map, -// and its return value will be set to the map with `key`. -// -// It returns value with given `key`. -func (m *AnyAnyMap) doSetWithLockCheck(key interface{}, value interface{}) interface{} { - m.mu.Lock() - defer m.mu.Unlock() - if m.data == nil { - m.data = make(map[interface{}]interface{}) - } - if v, ok := m.data[key]; ok { - return v - } - if f, ok := value.(func() interface{}); ok { - value = f() - } - if value != nil { - m.data[key] = value - } - return value -} - -// GetOrSet returns the value by key, -// or sets value with given `value` if it does not exist and then returns this value. -func (m *AnyAnyMap) GetOrSet(key interface{}, value interface{}) interface{} { - if v, ok := m.Search(key); !ok { - return m.doSetWithLockCheck(key, value) - } else { - return v - } -} - -// GetOrSetFunc returns the value by key, -// or sets value with returned value of callback function `f` if it does not exist -// and then returns this value. -func (m *AnyAnyMap) GetOrSetFunc(key interface{}, f func() interface{}) interface{} { - if v, ok := m.Search(key); !ok { - return m.doSetWithLockCheck(key, f()) - } else { - return v - } -} - -// GetOrSetFuncLock returns the value by key, -// or sets value with returned value of callback function `f` if it does not exist -// and then returns this value. -// -// GetOrSetFuncLock differs with GetOrSetFunc function is that it executes function `f` -// with mutex.Lock of the hash map. -func (m *AnyAnyMap) GetOrSetFuncLock(key interface{}, f func() interface{}) interface{} { - if v, ok := m.Search(key); !ok { - return m.doSetWithLockCheck(key, f) - } else { - return v - } -} - -// GetVar returns a Var with the value by given `key`. -// The returned Var is un-concurrent safe. -func (m *AnyAnyMap) GetVar(key interface{}) *gvar.Var { - return gvar.New(m.Get(key)) -} - -// GetVarOrSet returns a Var with result from GetOrSet. -// The returned Var is un-concurrent safe. -func (m *AnyAnyMap) GetVarOrSet(key interface{}, value interface{}) *gvar.Var { - return gvar.New(m.GetOrSet(key, value)) -} - -// GetVarOrSetFunc returns a Var with result from GetOrSetFunc. -// The returned Var is un-concurrent safe. -func (m *AnyAnyMap) GetVarOrSetFunc(key interface{}, f func() interface{}) *gvar.Var { - return gvar.New(m.GetOrSetFunc(key, f)) -} - -// GetVarOrSetFuncLock returns a Var with result from GetOrSetFuncLock. -// The returned Var is un-concurrent safe. -func (m *AnyAnyMap) GetVarOrSetFuncLock(key interface{}, f func() interface{}) *gvar.Var { - return gvar.New(m.GetOrSetFuncLock(key, f)) -} - -// SetIfNotExist sets `value` to the map if the `key` does not exist, and then returns true. -// It returns false if `key` exists, and `value` would be ignored. -func (m *AnyAnyMap) SetIfNotExist(key interface{}, value interface{}) bool { - if !m.Contains(key) { - m.doSetWithLockCheck(key, value) - return true - } - return false -} - -// SetIfNotExistFunc sets value with return value of callback function `f`, and then returns true. -// It returns false if `key` exists, and `value` would be ignored. -func (m *AnyAnyMap) SetIfNotExistFunc(key interface{}, f func() interface{}) bool { - if !m.Contains(key) { - m.doSetWithLockCheck(key, f()) - return true - } - return false -} - -// SetIfNotExistFuncLock sets value with return value of callback function `f`, and then returns true. -// It returns false if `key` exists, and `value` would be ignored. -// -// SetIfNotExistFuncLock differs with SetIfNotExistFunc function is that -// it executes function `f` with mutex.Lock of the hash map. -func (m *AnyAnyMap) SetIfNotExistFuncLock(key interface{}, f func() interface{}) bool { - if !m.Contains(key) { - m.doSetWithLockCheck(key, f) - return true - } - return false -} - -// Remove deletes value from map by given `key`, and return this deleted value. -func (m *AnyAnyMap) Remove(key interface{}) (value interface{}) { - m.mu.Lock() - if m.data != nil { - var ok bool - if value, ok = m.data[key]; ok { - delete(m.data, key) - } - } - m.mu.Unlock() - return -} - -// Removes batch deletes values of the map by keys. -func (m *AnyAnyMap) Removes(keys []interface{}) { - m.mu.Lock() - if m.data != nil { - for _, key := range keys { - delete(m.data, key) - } - } - m.mu.Unlock() -} - -// Keys returns all keys of the map as a slice. -func (m *AnyAnyMap) Keys() []interface{} { - m.mu.RLock() - defer m.mu.RUnlock() - var ( - keys = make([]interface{}, len(m.data)) - index = 0 - ) - for key := range m.data { - keys[index] = key - index++ - } - return keys -} - -// Values returns all values of the map as a slice. -func (m *AnyAnyMap) Values() []interface{} { - m.mu.RLock() - defer m.mu.RUnlock() - var ( - values = make([]interface{}, len(m.data)) - index = 0 - ) - for _, value := range m.data { - values[index] = value - index++ - } - return values -} - -// Contains checks whether a key exists. -// It returns true if the `key` exists, or else false. -func (m *AnyAnyMap) Contains(key interface{}) bool { - var ok bool - m.mu.RLock() - if m.data != nil { - _, ok = m.data[key] - } - m.mu.RUnlock() - return ok -} - -// Size returns the size of the map. -func (m *AnyAnyMap) Size() int { - m.mu.RLock() - length := len(m.data) - m.mu.RUnlock() - return length -} - -// IsEmpty checks whether the map is empty. -// It returns true if map is empty, or else false. -func (m *AnyAnyMap) IsEmpty() bool { - return m.Size() == 0 -} - -// Clear deletes all data of the map, it will remake a new underlying data map. -func (m *AnyAnyMap) Clear() { - m.mu.Lock() - m.data = make(map[interface{}]interface{}) - m.mu.Unlock() -} - -// Replace the data of the map with given `data`. -func (m *AnyAnyMap) Replace(data map[interface{}]interface{}) { - m.mu.Lock() - m.data = data - m.mu.Unlock() -} - -// LockFunc locks writing with given callback function `f` within RWMutex.Lock. -func (m *AnyAnyMap) LockFunc(f func(m map[interface{}]interface{})) { - m.mu.Lock() - defer m.mu.Unlock() - f(m.data) -} - -// RLockFunc locks reading with given callback function `f` within RWMutex.RLock. -func (m *AnyAnyMap) RLockFunc(f func(m map[interface{}]interface{})) { - m.mu.RLock() - defer m.mu.RUnlock() - f(m.data) -} - -// Flip exchanges key-value of the map to value-key. -func (m *AnyAnyMap) Flip() { - m.mu.Lock() - defer m.mu.Unlock() - n := make(map[interface{}]interface{}, len(m.data)) - for k, v := range m.data { - n[v] = k - } - m.data = n -} - -// Merge merges two hash maps. -// The `other` map will be merged into the map `m`. -func (m *AnyAnyMap) Merge(other *AnyAnyMap) { - m.mu.Lock() - defer m.mu.Unlock() - if m.data == nil { - m.data = other.MapCopy() - return - } - if other != m { - other.mu.RLock() - defer other.mu.RUnlock() - } - for k, v := range other.data { - m.data[k] = v - } -} - -// String returns the map as a string. -func (m *AnyAnyMap) String() string { - if m == nil { - return "" - } - b, _ := m.MarshalJSON() - return string(b) -} - -// MarshalJSON implements the interface MarshalJSON for json.Marshal. -func (m AnyAnyMap) MarshalJSON() ([]byte, error) { - return json.Marshal(gconv.Map(m.Map())) -} - -// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. -func (m *AnyAnyMap) UnmarshalJSON(b []byte) error { - m.mu.Lock() - defer m.mu.Unlock() - if m.data == nil { - m.data = make(map[interface{}]interface{}) - } - var data map[string]interface{} - if err := json.UnmarshalUseNumber(b, &data); err != nil { - return err - } - for k, v := range data { - m.data[k] = v - } - return nil -} - -// UnmarshalValue is an interface implement which sets any type of value for map. -func (m *AnyAnyMap) UnmarshalValue(value interface{}) (err error) { - m.mu.Lock() - defer m.mu.Unlock() - if m.data == nil { - m.data = make(map[interface{}]interface{}) - } - for k, v := range gconv.Map(value) { - m.data[k] = v - } - return -} - -// DeepCopy implements interface for deep copy of current type. -func (m *AnyAnyMap) DeepCopy() interface{} { - if m == nil { - return nil - } - - m.mu.RLock() - defer m.mu.RUnlock() - data := make(map[interface{}]interface{}, len(m.data)) - for k, v := range m.data { - data[k] = deepcopy.Copy(v) - } - return NewFrom(data, m.mu.IsSafe()) -} - -// IsSubOf checks whether the current map is a sub-map of `other`. -func (m *AnyAnyMap) IsSubOf(other *AnyAnyMap) bool { - if m == other { - return true - } - m.mu.RLock() - defer m.mu.RUnlock() - other.mu.RLock() - defer other.mu.RUnlock() - for key, value := range m.data { - otherValue, ok := other.data[key] - if !ok { - return false - } - if otherValue != value { - return false - } - } - return true -} - -// Diff compares current map `m` with map `other` and returns their different keys. -// The returned `addedKeys` are the keys that are in map `m` but not in map `other`. -// The returned `removedKeys` are the keys that are in map `other` but not in map `m`. -// The returned `updatedKeys` are the keys that are both in map `m` and `other` but their values and not equal (`!=`). -func (m *AnyAnyMap) Diff(other *AnyAnyMap) (addedKeys, removedKeys, updatedKeys []interface{}) { - m.mu.RLock() - defer m.mu.RUnlock() - other.mu.RLock() - defer other.mu.RUnlock() - - for key := range m.data { - if _, ok := other.data[key]; !ok { - removedKeys = append(removedKeys, key) - } else if !reflect.DeepEqual(m.data[key], other.data[key]) { - updatedKeys = append(updatedKeys, key) - } - } - for key := range other.data { - if _, ok := m.data[key]; !ok { - addedKeys = append(addedKeys, key) - } - } - return -} diff --git a/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_int_any_map.go b/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_int_any_map.go deleted file mode 100644 index 64d68775..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_int_any_map.go +++ /dev/null @@ -1,564 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with gm file, -// You can obtain one at https://github.com/gogf/gf. -// - -package gmap - -import ( - "github.com/gogf/gf/v2/container/gvar" - "github.com/gogf/gf/v2/internal/deepcopy" - "github.com/gogf/gf/v2/internal/empty" - "github.com/gogf/gf/v2/internal/json" - "github.com/gogf/gf/v2/internal/rwmutex" - "github.com/gogf/gf/v2/util/gconv" - "reflect" -) - -// IntAnyMap implements map[int]interface{} with RWMutex that has switch. -type IntAnyMap struct { - mu rwmutex.RWMutex - data map[int]interface{} -} - -// NewIntAnyMap returns an empty IntAnyMap object. -// The parameter `safe` is used to specify whether using map in concurrent-safety, -// which is false in default. -func NewIntAnyMap(safe ...bool) *IntAnyMap { - return &IntAnyMap{ - mu: rwmutex.Create(safe...), - data: make(map[int]interface{}), - } -} - -// NewIntAnyMapFrom creates and returns a hash map from given map `data`. -// Note that, the param `data` map will be set as the underlying data map(no deep copy), -// there might be some concurrent-safe issues when changing the map outside. -func NewIntAnyMapFrom(data map[int]interface{}, safe ...bool) *IntAnyMap { - return &IntAnyMap{ - mu: rwmutex.Create(safe...), - data: data, - } -} - -// Iterator iterates the hash map readonly with custom callback function `f`. -// If `f` returns true, then it continues iterating; or false to stop. -func (m *IntAnyMap) Iterator(f func(k int, v interface{}) bool) { - m.mu.RLock() - defer m.mu.RUnlock() - for k, v := range m.data { - if !f(k, v) { - break - } - } -} - -// Clone returns a new hash map with copy of current map data. -func (m *IntAnyMap) Clone() *IntAnyMap { - return NewIntAnyMapFrom(m.MapCopy(), m.mu.IsSafe()) -} - -// Map returns the underlying data map. -// Note that, if it's in concurrent-safe usage, it returns a copy of underlying data, -// or else a pointer to the underlying data. -func (m *IntAnyMap) Map() map[int]interface{} { - m.mu.RLock() - defer m.mu.RUnlock() - if !m.mu.IsSafe() { - return m.data - } - data := make(map[int]interface{}, len(m.data)) - for k, v := range m.data { - data[k] = v - } - return data -} - -// MapStrAny returns a copy of the underlying data of the map as map[string]interface{}. -func (m *IntAnyMap) MapStrAny() map[string]interface{} { - m.mu.RLock() - data := make(map[string]interface{}, len(m.data)) - for k, v := range m.data { - data[gconv.String(k)] = v - } - m.mu.RUnlock() - return data -} - -// MapCopy returns a copy of the underlying data of the hash map. -func (m *IntAnyMap) MapCopy() map[int]interface{} { - m.mu.RLock() - defer m.mu.RUnlock() - data := make(map[int]interface{}, len(m.data)) - for k, v := range m.data { - data[k] = v - } - return data -} - -// FilterEmpty deletes all key-value pair of which the value is empty. -// Values like: 0, nil, false, "", len(slice/map/chan) == 0 are considered empty. -func (m *IntAnyMap) FilterEmpty() { - m.mu.Lock() - for k, v := range m.data { - if empty.IsEmpty(v) { - delete(m.data, k) - } - } - m.mu.Unlock() -} - -// FilterNil deletes all key-value pair of which the value is nil. -func (m *IntAnyMap) FilterNil() { - m.mu.Lock() - defer m.mu.Unlock() - for k, v := range m.data { - if empty.IsNil(v) { - delete(m.data, k) - } - } -} - -// Set sets key-value to the hash map. -func (m *IntAnyMap) Set(key int, val interface{}) { - m.mu.Lock() - if m.data == nil { - m.data = make(map[int]interface{}) - } - m.data[key] = val - m.mu.Unlock() -} - -// Sets batch sets key-values to the hash map. -func (m *IntAnyMap) Sets(data map[int]interface{}) { - m.mu.Lock() - if m.data == nil { - m.data = data - } else { - for k, v := range data { - m.data[k] = v - } - } - m.mu.Unlock() -} - -// Search searches the map with given `key`. -// Second return parameter `found` is true if key was found, otherwise false. -func (m *IntAnyMap) Search(key int) (value interface{}, found bool) { - m.mu.RLock() - if m.data != nil { - value, found = m.data[key] - } - m.mu.RUnlock() - return -} - -// Get returns the value by given `key`. -func (m *IntAnyMap) Get(key int) (value interface{}) { - m.mu.RLock() - if m.data != nil { - value = m.data[key] - } - m.mu.RUnlock() - return -} - -// Pop retrieves and deletes an item from the map. -func (m *IntAnyMap) Pop() (key int, value interface{}) { - m.mu.Lock() - defer m.mu.Unlock() - for key, value = range m.data { - delete(m.data, key) - return - } - return -} - -// Pops retrieves and deletes `size` items from the map. -// It returns all items if size == -1. -func (m *IntAnyMap) Pops(size int) map[int]interface{} { - m.mu.Lock() - defer m.mu.Unlock() - if size > len(m.data) || size == -1 { - size = len(m.data) - } - if size == 0 { - return nil - } - var ( - index = 0 - newMap = make(map[int]interface{}, size) - ) - for k, v := range m.data { - delete(m.data, k) - newMap[k] = v - index++ - if index == size { - break - } - } - return newMap -} - -// doSetWithLockCheck checks whether value of the key exists with mutex.Lock, -// if not exists, set value to the map with given `key`, -// or else just return the existing value. -// -// When setting value, if `value` is type of `func() interface {}`, -// it will be executed with mutex.Lock of the hash map, -// and its return value will be set to the map with `key`. -// -// It returns value with given `key`. -func (m *IntAnyMap) doSetWithLockCheck(key int, value interface{}) interface{} { - m.mu.Lock() - defer m.mu.Unlock() - if m.data == nil { - m.data = make(map[int]interface{}) - } - if v, ok := m.data[key]; ok { - return v - } - if f, ok := value.(func() interface{}); ok { - value = f() - } - if value != nil { - m.data[key] = value - } - return value -} - -// GetOrSet returns the value by key, -// or sets value with given `value` if it does not exist and then returns this value. -func (m *IntAnyMap) GetOrSet(key int, value interface{}) interface{} { - if v, ok := m.Search(key); !ok { - return m.doSetWithLockCheck(key, value) - } else { - return v - } -} - -// GetOrSetFunc returns the value by key, -// or sets value with returned value of callback function `f` if it does not exist and returns this value. -func (m *IntAnyMap) GetOrSetFunc(key int, f func() interface{}) interface{} { - if v, ok := m.Search(key); !ok { - return m.doSetWithLockCheck(key, f()) - } else { - return v - } -} - -// GetOrSetFuncLock returns the value by key, -// or sets value with returned value of callback function `f` if it does not exist and returns this value. -// -// GetOrSetFuncLock differs with GetOrSetFunc function is that it executes function `f` -// with mutex.Lock of the hash map. -func (m *IntAnyMap) GetOrSetFuncLock(key int, f func() interface{}) interface{} { - if v, ok := m.Search(key); !ok { - return m.doSetWithLockCheck(key, f) - } else { - return v - } -} - -// GetVar returns a Var with the value by given `key`. -// The returned Var is un-concurrent safe. -func (m *IntAnyMap) GetVar(key int) *gvar.Var { - return gvar.New(m.Get(key)) -} - -// GetVarOrSet returns a Var with result from GetVarOrSet. -// The returned Var is un-concurrent safe. -func (m *IntAnyMap) GetVarOrSet(key int, value interface{}) *gvar.Var { - return gvar.New(m.GetOrSet(key, value)) -} - -// GetVarOrSetFunc returns a Var with result from GetOrSetFunc. -// The returned Var is un-concurrent safe. -func (m *IntAnyMap) GetVarOrSetFunc(key int, f func() interface{}) *gvar.Var { - return gvar.New(m.GetOrSetFunc(key, f)) -} - -// GetVarOrSetFuncLock returns a Var with result from GetOrSetFuncLock. -// The returned Var is un-concurrent safe. -func (m *IntAnyMap) GetVarOrSetFuncLock(key int, f func() interface{}) *gvar.Var { - return gvar.New(m.GetOrSetFuncLock(key, f)) -} - -// SetIfNotExist sets `value` to the map if the `key` does not exist, and then returns true. -// It returns false if `key` exists, and `value` would be ignored. -func (m *IntAnyMap) SetIfNotExist(key int, value interface{}) bool { - if !m.Contains(key) { - m.doSetWithLockCheck(key, value) - return true - } - return false -} - -// SetIfNotExistFunc sets value with return value of callback function `f`, and then returns true. -// It returns false if `key` exists, and `value` would be ignored. -func (m *IntAnyMap) SetIfNotExistFunc(key int, f func() interface{}) bool { - if !m.Contains(key) { - m.doSetWithLockCheck(key, f()) - return true - } - return false -} - -// SetIfNotExistFuncLock sets value with return value of callback function `f`, and then returns true. -// It returns false if `key` exists, and `value` would be ignored. -// -// SetIfNotExistFuncLock differs with SetIfNotExistFunc function is that -// it executes function `f` with mutex.Lock of the hash map. -func (m *IntAnyMap) SetIfNotExistFuncLock(key int, f func() interface{}) bool { - if !m.Contains(key) { - m.doSetWithLockCheck(key, f) - return true - } - return false -} - -// Removes batch deletes values of the map by keys. -func (m *IntAnyMap) Removes(keys []int) { - m.mu.Lock() - if m.data != nil { - for _, key := range keys { - delete(m.data, key) - } - } - m.mu.Unlock() -} - -// Remove deletes value from map by given `key`, and return this deleted value. -func (m *IntAnyMap) Remove(key int) (value interface{}) { - m.mu.Lock() - if m.data != nil { - var ok bool - if value, ok = m.data[key]; ok { - delete(m.data, key) - } - } - m.mu.Unlock() - return -} - -// Keys returns all keys of the map as a slice. -func (m *IntAnyMap) Keys() []int { - m.mu.RLock() - var ( - keys = make([]int, len(m.data)) - index = 0 - ) - for key := range m.data { - keys[index] = key - index++ - } - m.mu.RUnlock() - return keys -} - -// Values returns all values of the map as a slice. -func (m *IntAnyMap) Values() []interface{} { - m.mu.RLock() - var ( - values = make([]interface{}, len(m.data)) - index = 0 - ) - for _, value := range m.data { - values[index] = value - index++ - } - m.mu.RUnlock() - return values -} - -// Contains checks whether a key exists. -// It returns true if the `key` exists, or else false. -func (m *IntAnyMap) Contains(key int) bool { - var ok bool - m.mu.RLock() - if m.data != nil { - _, ok = m.data[key] - } - m.mu.RUnlock() - return ok -} - -// Size returns the size of the map. -func (m *IntAnyMap) Size() int { - m.mu.RLock() - length := len(m.data) - m.mu.RUnlock() - return length -} - -// IsEmpty checks whether the map is empty. -// It returns true if map is empty, or else false. -func (m *IntAnyMap) IsEmpty() bool { - return m.Size() == 0 -} - -// Clear deletes all data of the map, it will remake a new underlying data map. -func (m *IntAnyMap) Clear() { - m.mu.Lock() - m.data = make(map[int]interface{}) - m.mu.Unlock() -} - -// Replace the data of the map with given `data`. -func (m *IntAnyMap) Replace(data map[int]interface{}) { - m.mu.Lock() - m.data = data - m.mu.Unlock() -} - -// LockFunc locks writing with given callback function `f` within RWMutex.Lock. -func (m *IntAnyMap) LockFunc(f func(m map[int]interface{})) { - m.mu.Lock() - defer m.mu.Unlock() - f(m.data) -} - -// RLockFunc locks reading with given callback function `f` within RWMutex.RLock. -func (m *IntAnyMap) RLockFunc(f func(m map[int]interface{})) { - m.mu.RLock() - defer m.mu.RUnlock() - f(m.data) -} - -// Flip exchanges key-value of the map to value-key. -func (m *IntAnyMap) Flip() { - m.mu.Lock() - defer m.mu.Unlock() - n := make(map[int]interface{}, len(m.data)) - for k, v := range m.data { - n[gconv.Int(v)] = k - } - m.data = n -} - -// Merge merges two hash maps. -// The `other` map will be merged into the map `m`. -func (m *IntAnyMap) Merge(other *IntAnyMap) { - m.mu.Lock() - defer m.mu.Unlock() - if m.data == nil { - m.data = other.MapCopy() - return - } - if other != m { - other.mu.RLock() - defer other.mu.RUnlock() - } - for k, v := range other.data { - m.data[k] = v - } -} - -// String returns the map as a string. -func (m *IntAnyMap) String() string { - if m == nil { - return "" - } - b, _ := m.MarshalJSON() - return string(b) -} - -// MarshalJSON implements the interface MarshalJSON for json.Marshal. -func (m IntAnyMap) MarshalJSON() ([]byte, error) { - m.mu.RLock() - defer m.mu.RUnlock() - return json.Marshal(m.data) -} - -// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. -func (m *IntAnyMap) UnmarshalJSON(b []byte) error { - m.mu.Lock() - defer m.mu.Unlock() - if m.data == nil { - m.data = make(map[int]interface{}) - } - if err := json.UnmarshalUseNumber(b, &m.data); err != nil { - return err - } - return nil -} - -// UnmarshalValue is an interface implement which sets any type of value for map. -func (m *IntAnyMap) UnmarshalValue(value interface{}) (err error) { - m.mu.Lock() - defer m.mu.Unlock() - if m.data == nil { - m.data = make(map[int]interface{}) - } - switch value.(type) { - case string, []byte: - return json.UnmarshalUseNumber(gconv.Bytes(value), &m.data) - default: - for k, v := range gconv.Map(value) { - m.data[gconv.Int(k)] = v - } - } - return -} - -// DeepCopy implements interface for deep copy of current type. -func (m *IntAnyMap) DeepCopy() interface{} { - if m == nil { - return nil - } - m.mu.RLock() - defer m.mu.RUnlock() - data := make(map[int]interface{}, len(m.data)) - for k, v := range m.data { - data[k] = deepcopy.Copy(v) - } - return NewIntAnyMapFrom(data, m.mu.IsSafe()) -} - -// IsSubOf checks whether the current map is a sub-map of `other`. -func (m *IntAnyMap) IsSubOf(other *IntAnyMap) bool { - if m == other { - return true - } - m.mu.RLock() - defer m.mu.RUnlock() - other.mu.RLock() - defer other.mu.RUnlock() - for key, value := range m.data { - otherValue, ok := other.data[key] - if !ok { - return false - } - if otherValue != value { - return false - } - } - return true -} - -// Diff compares current map `m` with map `other` and returns their different keys. -// The returned `addedKeys` are the keys that are in map `m` but not in map `other`. -// The returned `removedKeys` are the keys that are in map `other` but not in map `m`. -// The returned `updatedKeys` are the keys that are both in map `m` and `other` but their values and not equal (`!=`). -func (m *IntAnyMap) Diff(other *IntAnyMap) (addedKeys, removedKeys, updatedKeys []int) { - m.mu.RLock() - defer m.mu.RUnlock() - other.mu.RLock() - defer other.mu.RUnlock() - - for key := range m.data { - if _, ok := other.data[key]; !ok { - removedKeys = append(removedKeys, key) - } else if !reflect.DeepEqual(m.data[key], other.data[key]) { - updatedKeys = append(updatedKeys, key) - } - } - for key := range other.data { - if _, ok := m.data[key]; !ok { - addedKeys = append(addedKeys, key) - } - } - return -} diff --git a/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_int_int_map.go b/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_int_int_map.go deleted file mode 100644 index 5fb8c440..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_int_int_map.go +++ /dev/null @@ -1,533 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with gm file, -// You can obtain one at https://github.com/gogf/gf. - -package gmap - -import ( - "github.com/gogf/gf/v2/internal/empty" - "github.com/gogf/gf/v2/internal/json" - "github.com/gogf/gf/v2/internal/rwmutex" - "github.com/gogf/gf/v2/util/gconv" -) - -// IntIntMap implements map[int]int with RWMutex that has switch. -type IntIntMap struct { - mu rwmutex.RWMutex - data map[int]int -} - -// NewIntIntMap returns an empty IntIntMap object. -// The parameter `safe` is used to specify whether using map in concurrent-safety, -// which is false in default. -func NewIntIntMap(safe ...bool) *IntIntMap { - return &IntIntMap{ - mu: rwmutex.Create(safe...), - data: make(map[int]int), - } -} - -// NewIntIntMapFrom creates and returns a hash map from given map `data`. -// Note that, the param `data` map will be set as the underlying data map(no deep copy), -// there might be some concurrent-safe issues when changing the map outside. -func NewIntIntMapFrom(data map[int]int, safe ...bool) *IntIntMap { - return &IntIntMap{ - mu: rwmutex.Create(safe...), - data: data, - } -} - -// Iterator iterates the hash map readonly with custom callback function `f`. -// If `f` returns true, then it continues iterating; or false to stop. -func (m *IntIntMap) Iterator(f func(k int, v int) bool) { - m.mu.RLock() - defer m.mu.RUnlock() - for k, v := range m.data { - if !f(k, v) { - break - } - } -} - -// Clone returns a new hash map with copy of current map data. -func (m *IntIntMap) Clone() *IntIntMap { - return NewIntIntMapFrom(m.MapCopy(), m.mu.IsSafe()) -} - -// Map returns the underlying data map. -// Note that, if it's in concurrent-safe usage, it returns a copy of underlying data, -// or else a pointer to the underlying data. -func (m *IntIntMap) Map() map[int]int { - m.mu.RLock() - defer m.mu.RUnlock() - if !m.mu.IsSafe() { - return m.data - } - data := make(map[int]int, len(m.data)) - for k, v := range m.data { - data[k] = v - } - return data -} - -// MapStrAny returns a copy of the underlying data of the map as map[string]interface{}. -func (m *IntIntMap) MapStrAny() map[string]interface{} { - m.mu.RLock() - data := make(map[string]interface{}, len(m.data)) - for k, v := range m.data { - data[gconv.String(k)] = v - } - m.mu.RUnlock() - return data -} - -// MapCopy returns a copy of the underlying data of the hash map. -func (m *IntIntMap) MapCopy() map[int]int { - m.mu.RLock() - defer m.mu.RUnlock() - data := make(map[int]int, len(m.data)) - for k, v := range m.data { - data[k] = v - } - return data -} - -// FilterEmpty deletes all key-value pair of which the value is empty. -// Values like: 0, nil, false, "", len(slice/map/chan) == 0 are considered empty. -func (m *IntIntMap) FilterEmpty() { - m.mu.Lock() - for k, v := range m.data { - if empty.IsEmpty(v) { - delete(m.data, k) - } - } - m.mu.Unlock() -} - -// Set sets key-value to the hash map. -func (m *IntIntMap) Set(key int, val int) { - m.mu.Lock() - if m.data == nil { - m.data = make(map[int]int) - } - m.data[key] = val - m.mu.Unlock() -} - -// Sets batch sets key-values to the hash map. -func (m *IntIntMap) Sets(data map[int]int) { - m.mu.Lock() - if m.data == nil { - m.data = data - } else { - for k, v := range data { - m.data[k] = v - } - } - m.mu.Unlock() -} - -// Search searches the map with given `key`. -// Second return parameter `found` is true if key was found, otherwise false. -func (m *IntIntMap) Search(key int) (value int, found bool) { - m.mu.RLock() - if m.data != nil { - value, found = m.data[key] - } - m.mu.RUnlock() - return -} - -// Get returns the value by given `key`. -func (m *IntIntMap) Get(key int) (value int) { - m.mu.RLock() - if m.data != nil { - value = m.data[key] - } - m.mu.RUnlock() - return -} - -// Pop retrieves and deletes an item from the map. -func (m *IntIntMap) Pop() (key, value int) { - m.mu.Lock() - defer m.mu.Unlock() - for key, value = range m.data { - delete(m.data, key) - return - } - return -} - -// Pops retrieves and deletes `size` items from the map. -// It returns all items if size == -1. -func (m *IntIntMap) Pops(size int) map[int]int { - m.mu.Lock() - defer m.mu.Unlock() - if size > len(m.data) || size == -1 { - size = len(m.data) - } - if size == 0 { - return nil - } - var ( - index = 0 - newMap = make(map[int]int, size) - ) - for k, v := range m.data { - delete(m.data, k) - newMap[k] = v - index++ - if index == size { - break - } - } - return newMap -} - -// doSetWithLockCheck checks whether value of the key exists with mutex.Lock, -// if not exists, set value to the map with given `key`, -// or else just return the existing value. -// -// It returns value with given `key`. -func (m *IntIntMap) doSetWithLockCheck(key int, value int) int { - m.mu.Lock() - defer m.mu.Unlock() - if m.data == nil { - m.data = make(map[int]int) - } - if v, ok := m.data[key]; ok { - return v - } - m.data[key] = value - return value -} - -// GetOrSet returns the value by key, -// or sets value with given `value` if it does not exist and then returns this value. -func (m *IntIntMap) GetOrSet(key int, value int) int { - if v, ok := m.Search(key); !ok { - return m.doSetWithLockCheck(key, value) - } else { - return v - } -} - -// GetOrSetFunc returns the value by key, -// or sets value with returned value of callback function `f` if it does not exist and returns this value. -func (m *IntIntMap) GetOrSetFunc(key int, f func() int) int { - if v, ok := m.Search(key); !ok { - return m.doSetWithLockCheck(key, f()) - } else { - return v - } -} - -// GetOrSetFuncLock returns the value by key, -// or sets value with returned value of callback function `f` if it does not exist and returns this value. -// -// GetOrSetFuncLock differs with GetOrSetFunc function is that it executes function `f` -// with mutex.Lock of the hash map. -func (m *IntIntMap) GetOrSetFuncLock(key int, f func() int) int { - if v, ok := m.Search(key); !ok { - m.mu.Lock() - defer m.mu.Unlock() - if m.data == nil { - m.data = make(map[int]int) - } - if v, ok = m.data[key]; ok { - return v - } - v = f() - m.data[key] = v - return v - } else { - return v - } -} - -// SetIfNotExist sets `value` to the map if the `key` does not exist, and then returns true. -// It returns false if `key` exists, and `value` would be ignored. -func (m *IntIntMap) SetIfNotExist(key int, value int) bool { - if !m.Contains(key) { - m.doSetWithLockCheck(key, value) - return true - } - return false -} - -// SetIfNotExistFunc sets value with return value of callback function `f`, and then returns true. -// It returns false if `key` exists, and `value` would be ignored. -func (m *IntIntMap) SetIfNotExistFunc(key int, f func() int) bool { - if !m.Contains(key) { - m.doSetWithLockCheck(key, f()) - return true - } - return false -} - -// SetIfNotExistFuncLock sets value with return value of callback function `f`, and then returns true. -// It returns false if `key` exists, and `value` would be ignored. -// -// SetIfNotExistFuncLock differs with SetIfNotExistFunc function is that -// it executes function `f` with mutex.Lock of the hash map. -func (m *IntIntMap) SetIfNotExistFuncLock(key int, f func() int) bool { - if !m.Contains(key) { - m.mu.Lock() - defer m.mu.Unlock() - if m.data == nil { - m.data = make(map[int]int) - } - if _, ok := m.data[key]; !ok { - m.data[key] = f() - } - return true - } - return false -} - -// Removes batch deletes values of the map by keys. -func (m *IntIntMap) Removes(keys []int) { - m.mu.Lock() - if m.data != nil { - for _, key := range keys { - delete(m.data, key) - } - } - m.mu.Unlock() -} - -// Remove deletes value from map by given `key`, and return this deleted value. -func (m *IntIntMap) Remove(key int) (value int) { - m.mu.Lock() - if m.data != nil { - var ok bool - if value, ok = m.data[key]; ok { - delete(m.data, key) - } - } - m.mu.Unlock() - return -} - -// Keys returns all keys of the map as a slice. -func (m *IntIntMap) Keys() []int { - m.mu.RLock() - var ( - keys = make([]int, len(m.data)) - index = 0 - ) - for key := range m.data { - keys[index] = key - index++ - } - m.mu.RUnlock() - return keys -} - -// Values returns all values of the map as a slice. -func (m *IntIntMap) Values() []int { - m.mu.RLock() - var ( - values = make([]int, len(m.data)) - index = 0 - ) - for _, value := range m.data { - values[index] = value - index++ - } - m.mu.RUnlock() - return values -} - -// Contains checks whether a key exists. -// It returns true if the `key` exists, or else false. -func (m *IntIntMap) Contains(key int) bool { - var ok bool - m.mu.RLock() - if m.data != nil { - _, ok = m.data[key] - } - m.mu.RUnlock() - return ok -} - -// Size returns the size of the map. -func (m *IntIntMap) Size() int { - m.mu.RLock() - length := len(m.data) - m.mu.RUnlock() - return length -} - -// IsEmpty checks whether the map is empty. -// It returns true if map is empty, or else false. -func (m *IntIntMap) IsEmpty() bool { - return m.Size() == 0 -} - -// Clear deletes all data of the map, it will remake a new underlying data map. -func (m *IntIntMap) Clear() { - m.mu.Lock() - m.data = make(map[int]int) - m.mu.Unlock() -} - -// Replace the data of the map with given `data`. -func (m *IntIntMap) Replace(data map[int]int) { - m.mu.Lock() - m.data = data - m.mu.Unlock() -} - -// LockFunc locks writing with given callback function `f` within RWMutex.Lock. -func (m *IntIntMap) LockFunc(f func(m map[int]int)) { - m.mu.Lock() - defer m.mu.Unlock() - f(m.data) -} - -// RLockFunc locks reading with given callback function `f` within RWMutex.RLock. -func (m *IntIntMap) RLockFunc(f func(m map[int]int)) { - m.mu.RLock() - defer m.mu.RUnlock() - f(m.data) -} - -// Flip exchanges key-value of the map to value-key. -func (m *IntIntMap) Flip() { - m.mu.Lock() - defer m.mu.Unlock() - n := make(map[int]int, len(m.data)) - for k, v := range m.data { - n[v] = k - } - m.data = n -} - -// Merge merges two hash maps. -// The `other` map will be merged into the map `m`. -func (m *IntIntMap) Merge(other *IntIntMap) { - m.mu.Lock() - defer m.mu.Unlock() - if m.data == nil { - m.data = other.MapCopy() - return - } - if other != m { - other.mu.RLock() - defer other.mu.RUnlock() - } - for k, v := range other.data { - m.data[k] = v - } -} - -// String returns the map as a string. -func (m *IntIntMap) String() string { - if m == nil { - return "" - } - b, _ := m.MarshalJSON() - return string(b) -} - -// MarshalJSON implements the interface MarshalJSON for json.Marshal. -func (m IntIntMap) MarshalJSON() ([]byte, error) { - m.mu.RLock() - defer m.mu.RUnlock() - return json.Marshal(m.data) -} - -// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. -func (m *IntIntMap) UnmarshalJSON(b []byte) error { - m.mu.Lock() - defer m.mu.Unlock() - if m.data == nil { - m.data = make(map[int]int) - } - if err := json.UnmarshalUseNumber(b, &m.data); err != nil { - return err - } - return nil -} - -// UnmarshalValue is an interface implement which sets any type of value for map. -func (m *IntIntMap) UnmarshalValue(value interface{}) (err error) { - m.mu.Lock() - defer m.mu.Unlock() - if m.data == nil { - m.data = make(map[int]int) - } - switch value.(type) { - case string, []byte: - return json.UnmarshalUseNumber(gconv.Bytes(value), &m.data) - default: - for k, v := range gconv.Map(value) { - m.data[gconv.Int(k)] = gconv.Int(v) - } - } - return -} - -// DeepCopy implements interface for deep copy of current type. -func (m *IntIntMap) DeepCopy() interface{} { - if m == nil { - return nil - } - m.mu.RLock() - defer m.mu.RUnlock() - data := make(map[int]int, len(m.data)) - for k, v := range m.data { - data[k] = v - } - return NewIntIntMapFrom(data, m.mu.IsSafe()) -} - -// IsSubOf checks whether the current map is a sub-map of `other`. -func (m *IntIntMap) IsSubOf(other *IntIntMap) bool { - if m == other { - return true - } - m.mu.RLock() - defer m.mu.RUnlock() - other.mu.RLock() - defer other.mu.RUnlock() - for key, value := range m.data { - otherValue, ok := other.data[key] - if !ok { - return false - } - if otherValue != value { - return false - } - } - return true -} - -// Diff compares current map `m` with map `other` and returns their different keys. -// The returned `addedKeys` are the keys that are in map `m` but not in map `other`. -// The returned `removedKeys` are the keys that are in map `other` but not in map `m`. -// The returned `updatedKeys` are the keys that are both in map `m` and `other` but their values and not equal (`!=`). -func (m *IntIntMap) Diff(other *IntIntMap) (addedKeys, removedKeys, updatedKeys []int) { - m.mu.RLock() - defer m.mu.RUnlock() - other.mu.RLock() - defer other.mu.RUnlock() - - for key := range m.data { - if _, ok := other.data[key]; !ok { - removedKeys = append(removedKeys, key) - } else if m.data[key] != other.data[key] { - updatedKeys = append(updatedKeys, key) - } - } - for key := range other.data { - if _, ok := m.data[key]; !ok { - addedKeys = append(addedKeys, key) - } - } - return -} diff --git a/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_int_str_map.go b/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_int_str_map.go deleted file mode 100644 index ffba090b..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_int_str_map.go +++ /dev/null @@ -1,533 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with gm file, -// You can obtain one at https://github.com/gogf/gf. - -package gmap - -import ( - "github.com/gogf/gf/v2/internal/empty" - "github.com/gogf/gf/v2/internal/json" - "github.com/gogf/gf/v2/internal/rwmutex" - "github.com/gogf/gf/v2/util/gconv" -) - -// IntStrMap implements map[int]string with RWMutex that has switch. -type IntStrMap struct { - mu rwmutex.RWMutex - data map[int]string -} - -// NewIntStrMap returns an empty IntStrMap object. -// The parameter `safe` is used to specify whether using map in concurrent-safety, -// which is false in default. -func NewIntStrMap(safe ...bool) *IntStrMap { - return &IntStrMap{ - mu: rwmutex.Create(safe...), - data: make(map[int]string), - } -} - -// NewIntStrMapFrom creates and returns a hash map from given map `data`. -// Note that, the param `data` map will be set as the underlying data map(no deep copy), -// there might be some concurrent-safe issues when changing the map outside. -func NewIntStrMapFrom(data map[int]string, safe ...bool) *IntStrMap { - return &IntStrMap{ - mu: rwmutex.Create(safe...), - data: data, - } -} - -// Iterator iterates the hash map readonly with custom callback function `f`. -// If `f` returns true, then it continues iterating; or false to stop. -func (m *IntStrMap) Iterator(f func(k int, v string) bool) { - m.mu.RLock() - defer m.mu.RUnlock() - for k, v := range m.data { - if !f(k, v) { - break - } - } -} - -// Clone returns a new hash map with copy of current map data. -func (m *IntStrMap) Clone() *IntStrMap { - return NewIntStrMapFrom(m.MapCopy(), m.mu.IsSafe()) -} - -// Map returns the underlying data map. -// Note that, if it's in concurrent-safe usage, it returns a copy of underlying data, -// or else a pointer to the underlying data. -func (m *IntStrMap) Map() map[int]string { - m.mu.RLock() - defer m.mu.RUnlock() - if !m.mu.IsSafe() { - return m.data - } - data := make(map[int]string, len(m.data)) - for k, v := range m.data { - data[k] = v - } - return data -} - -// MapStrAny returns a copy of the underlying data of the map as map[string]interface{}. -func (m *IntStrMap) MapStrAny() map[string]interface{} { - m.mu.RLock() - data := make(map[string]interface{}, len(m.data)) - for k, v := range m.data { - data[gconv.String(k)] = v - } - m.mu.RUnlock() - return data -} - -// MapCopy returns a copy of the underlying data of the hash map. -func (m *IntStrMap) MapCopy() map[int]string { - m.mu.RLock() - defer m.mu.RUnlock() - data := make(map[int]string, len(m.data)) - for k, v := range m.data { - data[k] = v - } - return data -} - -// FilterEmpty deletes all key-value pair of which the value is empty. -// Values like: 0, nil, false, "", len(slice/map/chan) == 0 are considered empty. -func (m *IntStrMap) FilterEmpty() { - m.mu.Lock() - for k, v := range m.data { - if empty.IsEmpty(v) { - delete(m.data, k) - } - } - m.mu.Unlock() -} - -// Set sets key-value to the hash map. -func (m *IntStrMap) Set(key int, val string) { - m.mu.Lock() - if m.data == nil { - m.data = make(map[int]string) - } - m.data[key] = val - m.mu.Unlock() -} - -// Sets batch sets key-values to the hash map. -func (m *IntStrMap) Sets(data map[int]string) { - m.mu.Lock() - if m.data == nil { - m.data = data - } else { - for k, v := range data { - m.data[k] = v - } - } - m.mu.Unlock() -} - -// Search searches the map with given `key`. -// Second return parameter `found` is true if key was found, otherwise false. -func (m *IntStrMap) Search(key int) (value string, found bool) { - m.mu.RLock() - if m.data != nil { - value, found = m.data[key] - } - m.mu.RUnlock() - return -} - -// Get returns the value by given `key`. -func (m *IntStrMap) Get(key int) (value string) { - m.mu.RLock() - if m.data != nil { - value = m.data[key] - } - m.mu.RUnlock() - return -} - -// Pop retrieves and deletes an item from the map. -func (m *IntStrMap) Pop() (key int, value string) { - m.mu.Lock() - defer m.mu.Unlock() - for key, value = range m.data { - delete(m.data, key) - return - } - return -} - -// Pops retrieves and deletes `size` items from the map. -// It returns all items if size == -1. -func (m *IntStrMap) Pops(size int) map[int]string { - m.mu.Lock() - defer m.mu.Unlock() - if size > len(m.data) || size == -1 { - size = len(m.data) - } - if size == 0 { - return nil - } - var ( - index = 0 - newMap = make(map[int]string, size) - ) - for k, v := range m.data { - delete(m.data, k) - newMap[k] = v - index++ - if index == size { - break - } - } - return newMap -} - -// doSetWithLockCheck checks whether value of the key exists with mutex.Lock, -// if not exists, set value to the map with given `key`, -// or else just return the existing value. -// -// It returns value with given `key`. -func (m *IntStrMap) doSetWithLockCheck(key int, value string) string { - m.mu.Lock() - defer m.mu.Unlock() - if m.data == nil { - m.data = make(map[int]string) - } - if v, ok := m.data[key]; ok { - return v - } - m.data[key] = value - return value -} - -// GetOrSet returns the value by key, -// or sets value with given `value` if it does not exist and then returns this value. -func (m *IntStrMap) GetOrSet(key int, value string) string { - if v, ok := m.Search(key); !ok { - return m.doSetWithLockCheck(key, value) - } else { - return v - } -} - -// GetOrSetFunc returns the value by key, -// or sets value with returned value of callback function `f` if it does not exist and returns this value. -func (m *IntStrMap) GetOrSetFunc(key int, f func() string) string { - if v, ok := m.Search(key); !ok { - return m.doSetWithLockCheck(key, f()) - } else { - return v - } -} - -// GetOrSetFuncLock returns the value by key, -// or sets value with returned value of callback function `f` if it does not exist and returns this value. -// -// GetOrSetFuncLock differs with GetOrSetFunc function is that it executes function `f` -// with mutex.Lock of the hash map. -func (m *IntStrMap) GetOrSetFuncLock(key int, f func() string) string { - if v, ok := m.Search(key); !ok { - m.mu.Lock() - defer m.mu.Unlock() - if m.data == nil { - m.data = make(map[int]string) - } - if v, ok = m.data[key]; ok { - return v - } - v = f() - m.data[key] = v - return v - } else { - return v - } -} - -// SetIfNotExist sets `value` to the map if the `key` does not exist, and then returns true. -// It returns false if `key` exists, and `value` would be ignored. -func (m *IntStrMap) SetIfNotExist(key int, value string) bool { - if !m.Contains(key) { - m.doSetWithLockCheck(key, value) - return true - } - return false -} - -// SetIfNotExistFunc sets value with return value of callback function `f`, and then returns true. -// It returns false if `key` exists, and `value` would be ignored. -func (m *IntStrMap) SetIfNotExistFunc(key int, f func() string) bool { - if !m.Contains(key) { - m.doSetWithLockCheck(key, f()) - return true - } - return false -} - -// SetIfNotExistFuncLock sets value with return value of callback function `f`, and then returns true. -// It returns false if `key` exists, and `value` would be ignored. -// -// SetIfNotExistFuncLock differs with SetIfNotExistFunc function is that -// it executes function `f` with mutex.Lock of the hash map. -func (m *IntStrMap) SetIfNotExistFuncLock(key int, f func() string) bool { - if !m.Contains(key) { - m.mu.Lock() - defer m.mu.Unlock() - if m.data == nil { - m.data = make(map[int]string) - } - if _, ok := m.data[key]; !ok { - m.data[key] = f() - } - return true - } - return false -} - -// Removes batch deletes values of the map by keys. -func (m *IntStrMap) Removes(keys []int) { - m.mu.Lock() - if m.data != nil { - for _, key := range keys { - delete(m.data, key) - } - } - m.mu.Unlock() -} - -// Remove deletes value from map by given `key`, and return this deleted value. -func (m *IntStrMap) Remove(key int) (value string) { - m.mu.Lock() - if m.data != nil { - var ok bool - if value, ok = m.data[key]; ok { - delete(m.data, key) - } - } - m.mu.Unlock() - return -} - -// Keys returns all keys of the map as a slice. -func (m *IntStrMap) Keys() []int { - m.mu.RLock() - var ( - keys = make([]int, len(m.data)) - index = 0 - ) - for key := range m.data { - keys[index] = key - index++ - } - m.mu.RUnlock() - return keys -} - -// Values returns all values of the map as a slice. -func (m *IntStrMap) Values() []string { - m.mu.RLock() - var ( - values = make([]string, len(m.data)) - index = 0 - ) - for _, value := range m.data { - values[index] = value - index++ - } - m.mu.RUnlock() - return values -} - -// Contains checks whether a key exists. -// It returns true if the `key` exists, or else false. -func (m *IntStrMap) Contains(key int) bool { - var ok bool - m.mu.RLock() - if m.data != nil { - _, ok = m.data[key] - } - m.mu.RUnlock() - return ok -} - -// Size returns the size of the map. -func (m *IntStrMap) Size() int { - m.mu.RLock() - length := len(m.data) - m.mu.RUnlock() - return length -} - -// IsEmpty checks whether the map is empty. -// It returns true if map is empty, or else false. -func (m *IntStrMap) IsEmpty() bool { - return m.Size() == 0 -} - -// Clear deletes all data of the map, it will remake a new underlying data map. -func (m *IntStrMap) Clear() { - m.mu.Lock() - m.data = make(map[int]string) - m.mu.Unlock() -} - -// Replace the data of the map with given `data`. -func (m *IntStrMap) Replace(data map[int]string) { - m.mu.Lock() - m.data = data - m.mu.Unlock() -} - -// LockFunc locks writing with given callback function `f` within RWMutex.Lock. -func (m *IntStrMap) LockFunc(f func(m map[int]string)) { - m.mu.Lock() - defer m.mu.Unlock() - f(m.data) -} - -// RLockFunc locks reading with given callback function `f` within RWMutex.RLock. -func (m *IntStrMap) RLockFunc(f func(m map[int]string)) { - m.mu.RLock() - defer m.mu.RUnlock() - f(m.data) -} - -// Flip exchanges key-value of the map to value-key. -func (m *IntStrMap) Flip() { - m.mu.Lock() - defer m.mu.Unlock() - n := make(map[int]string, len(m.data)) - for k, v := range m.data { - n[gconv.Int(v)] = gconv.String(k) - } - m.data = n -} - -// Merge merges two hash maps. -// The `other` map will be merged into the map `m`. -func (m *IntStrMap) Merge(other *IntStrMap) { - m.mu.Lock() - defer m.mu.Unlock() - if m.data == nil { - m.data = other.MapCopy() - return - } - if other != m { - other.mu.RLock() - defer other.mu.RUnlock() - } - for k, v := range other.data { - m.data[k] = v - } -} - -// String returns the map as a string. -func (m *IntStrMap) String() string { - if m == nil { - return "" - } - b, _ := m.MarshalJSON() - return string(b) -} - -// MarshalJSON implements the interface MarshalJSON for json.Marshal. -func (m IntStrMap) MarshalJSON() ([]byte, error) { - m.mu.RLock() - defer m.mu.RUnlock() - return json.Marshal(m.data) -} - -// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. -func (m *IntStrMap) UnmarshalJSON(b []byte) error { - m.mu.Lock() - defer m.mu.Unlock() - if m.data == nil { - m.data = make(map[int]string) - } - if err := json.UnmarshalUseNumber(b, &m.data); err != nil { - return err - } - return nil -} - -// UnmarshalValue is an interface implement which sets any type of value for map. -func (m *IntStrMap) UnmarshalValue(value interface{}) (err error) { - m.mu.Lock() - defer m.mu.Unlock() - if m.data == nil { - m.data = make(map[int]string) - } - switch value.(type) { - case string, []byte: - return json.UnmarshalUseNumber(gconv.Bytes(value), &m.data) - default: - for k, v := range gconv.Map(value) { - m.data[gconv.Int(k)] = gconv.String(v) - } - } - return -} - -// DeepCopy implements interface for deep copy of current type. -func (m *IntStrMap) DeepCopy() interface{} { - if m == nil { - return nil - } - m.mu.RLock() - defer m.mu.RUnlock() - data := make(map[int]string, len(m.data)) - for k, v := range m.data { - data[k] = v - } - return NewIntStrMapFrom(data, m.mu.IsSafe()) -} - -// IsSubOf checks whether the current map is a sub-map of `other`. -func (m *IntStrMap) IsSubOf(other *IntStrMap) bool { - if m == other { - return true - } - m.mu.RLock() - defer m.mu.RUnlock() - other.mu.RLock() - defer other.mu.RUnlock() - for key, value := range m.data { - otherValue, ok := other.data[key] - if !ok { - return false - } - if otherValue != value { - return false - } - } - return true -} - -// Diff compares current map `m` with map `other` and returns their different keys. -// The returned `addedKeys` are the keys that are in map `m` but not in map `other`. -// The returned `removedKeys` are the keys that are in map `other` but not in map `m`. -// The returned `updatedKeys` are the keys that are both in map `m` and `other` but their values and not equal (`!=`). -func (m *IntStrMap) Diff(other *IntStrMap) (addedKeys, removedKeys, updatedKeys []int) { - m.mu.RLock() - defer m.mu.RUnlock() - other.mu.RLock() - defer other.mu.RUnlock() - - for key := range m.data { - if _, ok := other.data[key]; !ok { - removedKeys = append(removedKeys, key) - } else if m.data[key] != other.data[key] { - updatedKeys = append(updatedKeys, key) - } - } - for key := range other.data { - if _, ok := m.data[key]; !ok { - addedKeys = append(addedKeys, key) - } - } - return -} diff --git a/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_str_any_map.go b/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_str_any_map.go deleted file mode 100644 index c3749c31..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_str_any_map.go +++ /dev/null @@ -1,550 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with gm file, -// You can obtain one at https://github.com/gogf/gf. -// - -package gmap - -import ( - "github.com/gogf/gf/v2/container/gvar" - "github.com/gogf/gf/v2/internal/deepcopy" - "github.com/gogf/gf/v2/internal/empty" - "github.com/gogf/gf/v2/internal/json" - "github.com/gogf/gf/v2/internal/rwmutex" - "github.com/gogf/gf/v2/util/gconv" - "reflect" -) - -// StrAnyMap implements map[string]interface{} with RWMutex that has switch. -type StrAnyMap struct { - mu rwmutex.RWMutex - data map[string]interface{} -} - -// NewStrAnyMap returns an empty StrAnyMap object. -// The parameter `safe` is used to specify whether using map in concurrent-safety, -// which is false in default. -func NewStrAnyMap(safe ...bool) *StrAnyMap { - return &StrAnyMap{ - mu: rwmutex.Create(safe...), - data: make(map[string]interface{}), - } -} - -// NewStrAnyMapFrom creates and returns a hash map from given map `data`. -// Note that, the param `data` map will be set as the underlying data map(no deep copy), -// there might be some concurrent-safe issues when changing the map outside. -func NewStrAnyMapFrom(data map[string]interface{}, safe ...bool) *StrAnyMap { - return &StrAnyMap{ - mu: rwmutex.Create(safe...), - data: data, - } -} - -// Iterator iterates the hash map readonly with custom callback function `f`. -// If `f` returns true, then it continues iterating; or false to stop. -func (m *StrAnyMap) Iterator(f func(k string, v interface{}) bool) { - m.mu.RLock() - defer m.mu.RUnlock() - for k, v := range m.data { - if !f(k, v) { - break - } - } -} - -// Clone returns a new hash map with copy of current map data. -func (m *StrAnyMap) Clone() *StrAnyMap { - return NewStrAnyMapFrom(m.MapCopy(), m.mu.IsSafe()) -} - -// Map returns the underlying data map. -// Note that, if it's in concurrent-safe usage, it returns a copy of underlying data, -// or else a pointer to the underlying data. -func (m *StrAnyMap) Map() map[string]interface{} { - m.mu.RLock() - defer m.mu.RUnlock() - if !m.mu.IsSafe() { - return m.data - } - data := make(map[string]interface{}, len(m.data)) - for k, v := range m.data { - data[k] = v - } - return data -} - -// MapStrAny returns a copy of the underlying data of the map as map[string]interface{}. -func (m *StrAnyMap) MapStrAny() map[string]interface{} { - return m.Map() -} - -// MapCopy returns a copy of the underlying data of the hash map. -func (m *StrAnyMap) MapCopy() map[string]interface{} { - m.mu.RLock() - defer m.mu.RUnlock() - data := make(map[string]interface{}, len(m.data)) - for k, v := range m.data { - data[k] = v - } - return data -} - -// FilterEmpty deletes all key-value pair of which the value is empty. -// Values like: 0, nil, false, "", len(slice/map/chan) == 0 are considered empty. -func (m *StrAnyMap) FilterEmpty() { - m.mu.Lock() - for k, v := range m.data { - if empty.IsEmpty(v) { - delete(m.data, k) - } - } - m.mu.Unlock() -} - -// FilterNil deletes all key-value pair of which the value is nil. -func (m *StrAnyMap) FilterNil() { - m.mu.Lock() - defer m.mu.Unlock() - for k, v := range m.data { - if empty.IsNil(v) { - delete(m.data, k) - } - } -} - -// Set sets key-value to the hash map. -func (m *StrAnyMap) Set(key string, val interface{}) { - m.mu.Lock() - if m.data == nil { - m.data = make(map[string]interface{}) - } - m.data[key] = val - m.mu.Unlock() -} - -// Sets batch sets key-values to the hash map. -func (m *StrAnyMap) Sets(data map[string]interface{}) { - m.mu.Lock() - if m.data == nil { - m.data = data - } else { - for k, v := range data { - m.data[k] = v - } - } - m.mu.Unlock() -} - -// Search searches the map with given `key`. -// Second return parameter `found` is true if key was found, otherwise false. -func (m *StrAnyMap) Search(key string) (value interface{}, found bool) { - m.mu.RLock() - if m.data != nil { - value, found = m.data[key] - } - m.mu.RUnlock() - return -} - -// Get returns the value by given `key`. -func (m *StrAnyMap) Get(key string) (value interface{}) { - m.mu.RLock() - if m.data != nil { - value = m.data[key] - } - m.mu.RUnlock() - return -} - -// Pop retrieves and deletes an item from the map. -func (m *StrAnyMap) Pop() (key string, value interface{}) { - m.mu.Lock() - defer m.mu.Unlock() - for key, value = range m.data { - delete(m.data, key) - return - } - return -} - -// Pops retrieves and deletes `size` items from the map. -// It returns all items if size == -1. -func (m *StrAnyMap) Pops(size int) map[string]interface{} { - m.mu.Lock() - defer m.mu.Unlock() - if size > len(m.data) || size == -1 { - size = len(m.data) - } - if size == 0 { - return nil - } - var ( - index = 0 - newMap = make(map[string]interface{}, size) - ) - for k, v := range m.data { - delete(m.data, k) - newMap[k] = v - index++ - if index == size { - break - } - } - return newMap -} - -// doSetWithLockCheck checks whether value of the key exists with mutex.Lock, -// if not exists, set value to the map with given `key`, -// or else just return the existing value. -// -// When setting value, if `value` is type of `func() interface {}`, -// it will be executed with mutex.Lock of the hash map, -// and its return value will be set to the map with `key`. -// -// It returns value with given `key`. -func (m *StrAnyMap) doSetWithLockCheck(key string, value interface{}) interface{} { - m.mu.Lock() - defer m.mu.Unlock() - if m.data == nil { - m.data = make(map[string]interface{}) - } - if v, ok := m.data[key]; ok { - return v - } - if f, ok := value.(func() interface{}); ok { - value = f() - } - if value != nil { - m.data[key] = value - } - return value -} - -// GetOrSet returns the value by key, -// or sets value with given `value` if it does not exist and then returns this value. -func (m *StrAnyMap) GetOrSet(key string, value interface{}) interface{} { - if v, ok := m.Search(key); !ok { - return m.doSetWithLockCheck(key, value) - } else { - return v - } -} - -// GetOrSetFunc returns the value by key, -// or sets value with returned value of callback function `f` if it does not exist -// and then returns this value. -func (m *StrAnyMap) GetOrSetFunc(key string, f func() interface{}) interface{} { - if v, ok := m.Search(key); !ok { - return m.doSetWithLockCheck(key, f()) - } else { - return v - } -} - -// GetOrSetFuncLock returns the value by key, -// or sets value with returned value of callback function `f` if it does not exist -// and then returns this value. -// -// GetOrSetFuncLock differs with GetOrSetFunc function is that it executes function `f` -// with mutex.Lock of the hash map. -func (m *StrAnyMap) GetOrSetFuncLock(key string, f func() interface{}) interface{} { - if v, ok := m.Search(key); !ok { - return m.doSetWithLockCheck(key, f) - } else { - return v - } -} - -// GetVar returns a Var with the value by given `key`. -// The returned Var is un-concurrent safe. -func (m *StrAnyMap) GetVar(key string) *gvar.Var { - return gvar.New(m.Get(key)) -} - -// GetVarOrSet returns a Var with result from GetVarOrSet. -// The returned Var is un-concurrent safe. -func (m *StrAnyMap) GetVarOrSet(key string, value interface{}) *gvar.Var { - return gvar.New(m.GetOrSet(key, value)) -} - -// GetVarOrSetFunc returns a Var with result from GetOrSetFunc. -// The returned Var is un-concurrent safe. -func (m *StrAnyMap) GetVarOrSetFunc(key string, f func() interface{}) *gvar.Var { - return gvar.New(m.GetOrSetFunc(key, f)) -} - -// GetVarOrSetFuncLock returns a Var with result from GetOrSetFuncLock. -// The returned Var is un-concurrent safe. -func (m *StrAnyMap) GetVarOrSetFuncLock(key string, f func() interface{}) *gvar.Var { - return gvar.New(m.GetOrSetFuncLock(key, f)) -} - -// SetIfNotExist sets `value` to the map if the `key` does not exist, and then returns true. -// It returns false if `key` exists, and `value` would be ignored. -func (m *StrAnyMap) SetIfNotExist(key string, value interface{}) bool { - if !m.Contains(key) { - m.doSetWithLockCheck(key, value) - return true - } - return false -} - -// SetIfNotExistFunc sets value with return value of callback function `f`, and then returns true. -// It returns false if `key` exists, and `value` would be ignored. -func (m *StrAnyMap) SetIfNotExistFunc(key string, f func() interface{}) bool { - if !m.Contains(key) { - m.doSetWithLockCheck(key, f()) - return true - } - return false -} - -// SetIfNotExistFuncLock sets value with return value of callback function `f`, and then returns true. -// It returns false if `key` exists, and `value` would be ignored. -// -// SetIfNotExistFuncLock differs with SetIfNotExistFunc function is that -// it executes function `f` with mutex.Lock of the hash map. -func (m *StrAnyMap) SetIfNotExistFuncLock(key string, f func() interface{}) bool { - if !m.Contains(key) { - m.doSetWithLockCheck(key, f) - return true - } - return false -} - -// Removes batch deletes values of the map by keys. -func (m *StrAnyMap) Removes(keys []string) { - m.mu.Lock() - if m.data != nil { - for _, key := range keys { - delete(m.data, key) - } - } - m.mu.Unlock() -} - -// Remove deletes value from map by given `key`, and return this deleted value. -func (m *StrAnyMap) Remove(key string) (value interface{}) { - m.mu.Lock() - if m.data != nil { - var ok bool - if value, ok = m.data[key]; ok { - delete(m.data, key) - } - } - m.mu.Unlock() - return -} - -// Keys returns all keys of the map as a slice. -func (m *StrAnyMap) Keys() []string { - m.mu.RLock() - var ( - keys = make([]string, len(m.data)) - index = 0 - ) - for key := range m.data { - keys[index] = key - index++ - } - m.mu.RUnlock() - return keys -} - -// Values returns all values of the map as a slice. -func (m *StrAnyMap) Values() []interface{} { - m.mu.RLock() - var ( - values = make([]interface{}, len(m.data)) - index = 0 - ) - for _, value := range m.data { - values[index] = value - index++ - } - m.mu.RUnlock() - return values -} - -// Contains checks whether a key exists. -// It returns true if the `key` exists, or else false. -func (m *StrAnyMap) Contains(key string) bool { - var ok bool - m.mu.RLock() - if m.data != nil { - _, ok = m.data[key] - } - m.mu.RUnlock() - return ok -} - -// Size returns the size of the map. -func (m *StrAnyMap) Size() int { - m.mu.RLock() - length := len(m.data) - m.mu.RUnlock() - return length -} - -// IsEmpty checks whether the map is empty. -// It returns true if map is empty, or else false. -func (m *StrAnyMap) IsEmpty() bool { - return m.Size() == 0 -} - -// Clear deletes all data of the map, it will remake a new underlying data map. -func (m *StrAnyMap) Clear() { - m.mu.Lock() - m.data = make(map[string]interface{}) - m.mu.Unlock() -} - -// Replace the data of the map with given `data`. -func (m *StrAnyMap) Replace(data map[string]interface{}) { - m.mu.Lock() - m.data = data - m.mu.Unlock() -} - -// LockFunc locks writing with given callback function `f` within RWMutex.Lock. -func (m *StrAnyMap) LockFunc(f func(m map[string]interface{})) { - m.mu.Lock() - defer m.mu.Unlock() - f(m.data) -} - -// RLockFunc locks reading with given callback function `f` within RWMutex.RLock. -func (m *StrAnyMap) RLockFunc(f func(m map[string]interface{})) { - m.mu.RLock() - defer m.mu.RUnlock() - f(m.data) -} - -// Flip exchanges key-value of the map to value-key. -func (m *StrAnyMap) Flip() { - m.mu.Lock() - defer m.mu.Unlock() - n := make(map[string]interface{}, len(m.data)) - for k, v := range m.data { - n[gconv.String(v)] = k - } - m.data = n -} - -// Merge merges two hash maps. -// The `other` map will be merged into the map `m`. -func (m *StrAnyMap) Merge(other *StrAnyMap) { - m.mu.Lock() - defer m.mu.Unlock() - if m.data == nil { - m.data = other.MapCopy() - return - } - if other != m { - other.mu.RLock() - defer other.mu.RUnlock() - } - for k, v := range other.data { - m.data[k] = v - } -} - -// String returns the map as a string. -func (m *StrAnyMap) String() string { - if m == nil { - return "" - } - b, _ := m.MarshalJSON() - return string(b) -} - -// MarshalJSON implements the interface MarshalJSON for json.Marshal. -func (m StrAnyMap) MarshalJSON() ([]byte, error) { - m.mu.RLock() - defer m.mu.RUnlock() - return json.Marshal(m.data) -} - -// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. -func (m *StrAnyMap) UnmarshalJSON(b []byte) error { - m.mu.Lock() - defer m.mu.Unlock() - if m.data == nil { - m.data = make(map[string]interface{}) - } - if err := json.UnmarshalUseNumber(b, &m.data); err != nil { - return err - } - return nil -} - -// UnmarshalValue is an interface implement which sets any type of value for map. -func (m *StrAnyMap) UnmarshalValue(value interface{}) (err error) { - m.mu.Lock() - defer m.mu.Unlock() - m.data = gconv.Map(value) - return -} - -// DeepCopy implements interface for deep copy of current type. -func (m *StrAnyMap) DeepCopy() interface{} { - if m == nil { - return nil - } - m.mu.RLock() - defer m.mu.RUnlock() - data := make(map[string]interface{}, len(m.data)) - for k, v := range m.data { - data[k] = deepcopy.Copy(v) - } - return NewStrAnyMapFrom(data, m.mu.IsSafe()) -} - -// IsSubOf checks whether the current map is a sub-map of `other`. -func (m *StrAnyMap) IsSubOf(other *StrAnyMap) bool { - if m == other { - return true - } - m.mu.RLock() - defer m.mu.RUnlock() - other.mu.RLock() - defer other.mu.RUnlock() - for key, value := range m.data { - otherValue, ok := other.data[key] - if !ok { - return false - } - if otherValue != value { - return false - } - } - return true -} - -// Diff compares current map `m` with map `other` and returns their different keys. -// The returned `addedKeys` are the keys that are in map `m` but not in map `other`. -// The returned `removedKeys` are the keys that are in map `other` but not in map `m`. -// The returned `updatedKeys` are the keys that are both in map `m` and `other` but their values and not equal (`!=`). -func (m *StrAnyMap) Diff(other *StrAnyMap) (addedKeys, removedKeys, updatedKeys []string) { - m.mu.RLock() - defer m.mu.RUnlock() - other.mu.RLock() - defer other.mu.RUnlock() - - for key := range m.data { - if _, ok := other.data[key]; !ok { - removedKeys = append(removedKeys, key) - } else if !reflect.DeepEqual(m.data[key], other.data[key]) { - updatedKeys = append(updatedKeys, key) - } - } - for key := range other.data { - if _, ok := m.data[key]; !ok { - addedKeys = append(addedKeys, key) - } - } - return -} diff --git a/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_str_int_map.go b/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_str_int_map.go deleted file mode 100644 index 55582efa..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_str_int_map.go +++ /dev/null @@ -1,537 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with gm file, -// You can obtain one at https://github.com/gogf/gf. -// - -package gmap - -import ( - "github.com/gogf/gf/v2/internal/empty" - "github.com/gogf/gf/v2/internal/json" - "github.com/gogf/gf/v2/internal/rwmutex" - "github.com/gogf/gf/v2/util/gconv" -) - -// StrIntMap implements map[string]int with RWMutex that has switch. -type StrIntMap struct { - mu rwmutex.RWMutex - data map[string]int -} - -// NewStrIntMap returns an empty StrIntMap object. -// The parameter `safe` is used to specify whether using map in concurrent-safety, -// which is false in default. -func NewStrIntMap(safe ...bool) *StrIntMap { - return &StrIntMap{ - mu: rwmutex.Create(safe...), - data: make(map[string]int), - } -} - -// NewStrIntMapFrom creates and returns a hash map from given map `data`. -// Note that, the param `data` map will be set as the underlying data map(no deep copy), -// there might be some concurrent-safe issues when changing the map outside. -func NewStrIntMapFrom(data map[string]int, safe ...bool) *StrIntMap { - return &StrIntMap{ - mu: rwmutex.Create(safe...), - data: data, - } -} - -// Iterator iterates the hash map readonly with custom callback function `f`. -// If `f` returns true, then it continues iterating; or false to stop. -func (m *StrIntMap) Iterator(f func(k string, v int) bool) { - m.mu.RLock() - defer m.mu.RUnlock() - for k, v := range m.data { - if !f(k, v) { - break - } - } -} - -// Clone returns a new hash map with copy of current map data. -func (m *StrIntMap) Clone() *StrIntMap { - return NewStrIntMapFrom(m.MapCopy(), m.mu.IsSafe()) -} - -// Map returns the underlying data map. -// Note that, if it's in concurrent-safe usage, it returns a copy of underlying data, -// or else a pointer to the underlying data. -func (m *StrIntMap) Map() map[string]int { - m.mu.RLock() - defer m.mu.RUnlock() - if !m.mu.IsSafe() { - return m.data - } - data := make(map[string]int, len(m.data)) - for k, v := range m.data { - data[k] = v - } - return data -} - -// MapStrAny returns a copy of the underlying data of the map as map[string]interface{}. -func (m *StrIntMap) MapStrAny() map[string]interface{} { - m.mu.RLock() - defer m.mu.RUnlock() - data := make(map[string]interface{}, len(m.data)) - for k, v := range m.data { - data[k] = v - } - return data -} - -// MapCopy returns a copy of the underlying data of the hash map. -func (m *StrIntMap) MapCopy() map[string]int { - m.mu.RLock() - defer m.mu.RUnlock() - data := make(map[string]int, len(m.data)) - for k, v := range m.data { - data[k] = v - } - return data -} - -// FilterEmpty deletes all key-value pair of which the value is empty. -// Values like: 0, nil, false, "", len(slice/map/chan) == 0 are considered empty. -func (m *StrIntMap) FilterEmpty() { - m.mu.Lock() - for k, v := range m.data { - if empty.IsEmpty(v) { - delete(m.data, k) - } - } - m.mu.Unlock() -} - -// Set sets key-value to the hash map. -func (m *StrIntMap) Set(key string, val int) { - m.mu.Lock() - if m.data == nil { - m.data = make(map[string]int) - } - m.data[key] = val - m.mu.Unlock() -} - -// Sets batch sets key-values to the hash map. -func (m *StrIntMap) Sets(data map[string]int) { - m.mu.Lock() - if m.data == nil { - m.data = data - } else { - for k, v := range data { - m.data[k] = v - } - } - m.mu.Unlock() -} - -// Search searches the map with given `key`. -// Second return parameter `found` is true if key was found, otherwise false. -func (m *StrIntMap) Search(key string) (value int, found bool) { - m.mu.RLock() - if m.data != nil { - value, found = m.data[key] - } - m.mu.RUnlock() - return -} - -// Get returns the value by given `key`. -func (m *StrIntMap) Get(key string) (value int) { - m.mu.RLock() - if m.data != nil { - value = m.data[key] - } - m.mu.RUnlock() - return -} - -// Pop retrieves and deletes an item from the map. -func (m *StrIntMap) Pop() (key string, value int) { - m.mu.Lock() - defer m.mu.Unlock() - for key, value = range m.data { - delete(m.data, key) - return - } - return -} - -// Pops retrieves and deletes `size` items from the map. -// It returns all items if size == -1. -func (m *StrIntMap) Pops(size int) map[string]int { - m.mu.Lock() - defer m.mu.Unlock() - if size > len(m.data) || size == -1 { - size = len(m.data) - } - if size == 0 { - return nil - } - var ( - index = 0 - newMap = make(map[string]int, size) - ) - for k, v := range m.data { - delete(m.data, k) - newMap[k] = v - index++ - if index == size { - break - } - } - return newMap -} - -// doSetWithLockCheck checks whether value of the key exists with mutex.Lock, -// if not exists, set value to the map with given `key`, -// or else just return the existing value. -// -// It returns value with given `key`. -func (m *StrIntMap) doSetWithLockCheck(key string, value int) int { - m.mu.Lock() - if m.data == nil { - m.data = make(map[string]int) - } - if v, ok := m.data[key]; ok { - m.mu.Unlock() - return v - } - m.data[key] = value - m.mu.Unlock() - return value -} - -// GetOrSet returns the value by key, -// or sets value with given `value` if it does not exist and then returns this value. -func (m *StrIntMap) GetOrSet(key string, value int) int { - if v, ok := m.Search(key); !ok { - return m.doSetWithLockCheck(key, value) - } else { - return v - } -} - -// GetOrSetFunc returns the value by key, -// or sets value with returned value of callback function `f` if it does not exist -// and then returns this value. -func (m *StrIntMap) GetOrSetFunc(key string, f func() int) int { - if v, ok := m.Search(key); !ok { - return m.doSetWithLockCheck(key, f()) - } else { - return v - } -} - -// GetOrSetFuncLock returns the value by key, -// or sets value with returned value of callback function `f` if it does not exist -// and then returns this value. -// -// GetOrSetFuncLock differs with GetOrSetFunc function is that it executes function `f` -// with mutex.Lock of the hash map. -func (m *StrIntMap) GetOrSetFuncLock(key string, f func() int) int { - if v, ok := m.Search(key); !ok { - m.mu.Lock() - defer m.mu.Unlock() - if m.data == nil { - m.data = make(map[string]int) - } - if v, ok = m.data[key]; ok { - return v - } - v = f() - m.data[key] = v - return v - } else { - return v - } -} - -// SetIfNotExist sets `value` to the map if the `key` does not exist, and then returns true. -// It returns false if `key` exists, and `value` would be ignored. -func (m *StrIntMap) SetIfNotExist(key string, value int) bool { - if !m.Contains(key) { - m.doSetWithLockCheck(key, value) - return true - } - return false -} - -// SetIfNotExistFunc sets value with return value of callback function `f`, and then returns true. -// It returns false if `key` exists, and `value` would be ignored. -func (m *StrIntMap) SetIfNotExistFunc(key string, f func() int) bool { - if !m.Contains(key) { - m.doSetWithLockCheck(key, f()) - return true - } - return false -} - -// SetIfNotExistFuncLock sets value with return value of callback function `f`, and then returns true. -// It returns false if `key` exists, and `value` would be ignored. -// -// SetIfNotExistFuncLock differs with SetIfNotExistFunc function is that -// it executes function `f` with mutex.Lock of the hash map. -func (m *StrIntMap) SetIfNotExistFuncLock(key string, f func() int) bool { - if !m.Contains(key) { - m.mu.Lock() - defer m.mu.Unlock() - if m.data == nil { - m.data = make(map[string]int) - } - if _, ok := m.data[key]; !ok { - m.data[key] = f() - } - return true - } - return false -} - -// Removes batch deletes values of the map by keys. -func (m *StrIntMap) Removes(keys []string) { - m.mu.Lock() - if m.data != nil { - for _, key := range keys { - delete(m.data, key) - } - } - m.mu.Unlock() -} - -// Remove deletes value from map by given `key`, and return this deleted value. -func (m *StrIntMap) Remove(key string) (value int) { - m.mu.Lock() - if m.data != nil { - var ok bool - if value, ok = m.data[key]; ok { - delete(m.data, key) - } - } - m.mu.Unlock() - return -} - -// Keys returns all keys of the map as a slice. -func (m *StrIntMap) Keys() []string { - m.mu.RLock() - var ( - keys = make([]string, len(m.data)) - index = 0 - ) - for key := range m.data { - keys[index] = key - index++ - } - m.mu.RUnlock() - return keys -} - -// Values returns all values of the map as a slice. -func (m *StrIntMap) Values() []int { - m.mu.RLock() - var ( - values = make([]int, len(m.data)) - index = 0 - ) - for _, value := range m.data { - values[index] = value - index++ - } - m.mu.RUnlock() - return values -} - -// Contains checks whether a key exists. -// It returns true if the `key` exists, or else false. -func (m *StrIntMap) Contains(key string) bool { - var ok bool - m.mu.RLock() - if m.data != nil { - _, ok = m.data[key] - } - m.mu.RUnlock() - return ok -} - -// Size returns the size of the map. -func (m *StrIntMap) Size() int { - m.mu.RLock() - length := len(m.data) - m.mu.RUnlock() - return length -} - -// IsEmpty checks whether the map is empty. -// It returns true if map is empty, or else false. -func (m *StrIntMap) IsEmpty() bool { - return m.Size() == 0 -} - -// Clear deletes all data of the map, it will remake a new underlying data map. -func (m *StrIntMap) Clear() { - m.mu.Lock() - m.data = make(map[string]int) - m.mu.Unlock() -} - -// Replace the data of the map with given `data`. -func (m *StrIntMap) Replace(data map[string]int) { - m.mu.Lock() - m.data = data - m.mu.Unlock() -} - -// LockFunc locks writing with given callback function `f` within RWMutex.Lock. -func (m *StrIntMap) LockFunc(f func(m map[string]int)) { - m.mu.Lock() - defer m.mu.Unlock() - f(m.data) -} - -// RLockFunc locks reading with given callback function `f` within RWMutex.RLock. -func (m *StrIntMap) RLockFunc(f func(m map[string]int)) { - m.mu.RLock() - defer m.mu.RUnlock() - f(m.data) -} - -// Flip exchanges key-value of the map to value-key. -func (m *StrIntMap) Flip() { - m.mu.Lock() - defer m.mu.Unlock() - n := make(map[string]int, len(m.data)) - for k, v := range m.data { - n[gconv.String(v)] = gconv.Int(k) - } - m.data = n -} - -// Merge merges two hash maps. -// The `other` map will be merged into the map `m`. -func (m *StrIntMap) Merge(other *StrIntMap) { - m.mu.Lock() - defer m.mu.Unlock() - if m.data == nil { - m.data = other.MapCopy() - return - } - if other != m { - other.mu.RLock() - defer other.mu.RUnlock() - } - for k, v := range other.data { - m.data[k] = v - } -} - -// String returns the map as a string. -func (m *StrIntMap) String() string { - if m == nil { - return "" - } - b, _ := m.MarshalJSON() - return string(b) -} - -// MarshalJSON implements the interface MarshalJSON for json.Marshal. -func (m StrIntMap) MarshalJSON() ([]byte, error) { - m.mu.RLock() - defer m.mu.RUnlock() - return json.Marshal(m.data) -} - -// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. -func (m *StrIntMap) UnmarshalJSON(b []byte) error { - m.mu.Lock() - defer m.mu.Unlock() - if m.data == nil { - m.data = make(map[string]int) - } - if err := json.UnmarshalUseNumber(b, &m.data); err != nil { - return err - } - return nil -} - -// UnmarshalValue is an interface implement which sets any type of value for map. -func (m *StrIntMap) UnmarshalValue(value interface{}) (err error) { - m.mu.Lock() - defer m.mu.Unlock() - if m.data == nil { - m.data = make(map[string]int) - } - switch value.(type) { - case string, []byte: - return json.UnmarshalUseNumber(gconv.Bytes(value), &m.data) - default: - for k, v := range gconv.Map(value) { - m.data[k] = gconv.Int(v) - } - } - return -} - -// DeepCopy implements interface for deep copy of current type. -func (m *StrIntMap) DeepCopy() interface{} { - if m == nil { - return nil - } - m.mu.RLock() - defer m.mu.RUnlock() - data := make(map[string]int, len(m.data)) - for k, v := range m.data { - data[k] = v - } - return NewStrIntMapFrom(data, m.mu.IsSafe()) -} - -// IsSubOf checks whether the current map is a sub-map of `other`. -func (m *StrIntMap) IsSubOf(other *StrIntMap) bool { - if m == other { - return true - } - m.mu.RLock() - defer m.mu.RUnlock() - other.mu.RLock() - defer other.mu.RUnlock() - for key, value := range m.data { - otherValue, ok := other.data[key] - if !ok { - return false - } - if otherValue != value { - return false - } - } - return true -} - -// Diff compares current map `m` with map `other` and returns their different keys. -// The returned `addedKeys` are the keys that are in map `m` but not in map `other`. -// The returned `removedKeys` are the keys that are in map `other` but not in map `m`. -// The returned `updatedKeys` are the keys that are both in map `m` and `other` but their values and not equal (`!=`). -func (m *StrIntMap) Diff(other *StrIntMap) (addedKeys, removedKeys, updatedKeys []string) { - m.mu.RLock() - defer m.mu.RUnlock() - other.mu.RLock() - defer other.mu.RUnlock() - - for key := range m.data { - if _, ok := other.data[key]; !ok { - removedKeys = append(removedKeys, key) - } else if m.data[key] != other.data[key] { - updatedKeys = append(updatedKeys, key) - } - } - for key := range other.data { - if _, ok := m.data[key]; !ok { - addedKeys = append(addedKeys, key) - } - } - return -} diff --git a/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_str_str_map.go b/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_str_str_map.go deleted file mode 100644 index 066107a7..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_str_str_map.go +++ /dev/null @@ -1,526 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with gm file, -// You can obtain one at https://github.com/gogf/gf. -// - -package gmap - -import ( - "github.com/gogf/gf/v2/internal/empty" - "github.com/gogf/gf/v2/internal/json" - "github.com/gogf/gf/v2/internal/rwmutex" - "github.com/gogf/gf/v2/util/gconv" -) - -// StrStrMap implements map[string]string with RWMutex that has switch. -type StrStrMap struct { - mu rwmutex.RWMutex - data map[string]string -} - -// NewStrStrMap returns an empty StrStrMap object. -// The parameter `safe` is used to specify whether using map in concurrent-safety, -// which is false in default. -func NewStrStrMap(safe ...bool) *StrStrMap { - return &StrStrMap{ - data: make(map[string]string), - mu: rwmutex.Create(safe...), - } -} - -// NewStrStrMapFrom creates and returns a hash map from given map `data`. -// Note that, the param `data` map will be set as the underlying data map(no deep copy), -// there might be some concurrent-safe issues when changing the map outside. -func NewStrStrMapFrom(data map[string]string, safe ...bool) *StrStrMap { - return &StrStrMap{ - mu: rwmutex.Create(safe...), - data: data, - } -} - -// Iterator iterates the hash map readonly with custom callback function `f`. -// If `f` returns true, then it continues iterating; or false to stop. -func (m *StrStrMap) Iterator(f func(k string, v string) bool) { - m.mu.RLock() - defer m.mu.RUnlock() - for k, v := range m.data { - if !f(k, v) { - break - } - } -} - -// Clone returns a new hash map with copy of current map data. -func (m *StrStrMap) Clone() *StrStrMap { - return NewStrStrMapFrom(m.MapCopy(), m.mu.IsSafe()) -} - -// Map returns the underlying data map. -// Note that, if it's in concurrent-safe usage, it returns a copy of underlying data, -// or else a pointer to the underlying data. -func (m *StrStrMap) Map() map[string]string { - m.mu.RLock() - defer m.mu.RUnlock() - if !m.mu.IsSafe() { - return m.data - } - data := make(map[string]string, len(m.data)) - for k, v := range m.data { - data[k] = v - } - return data -} - -// MapStrAny returns a copy of the underlying data of the map as map[string]interface{}. -func (m *StrStrMap) MapStrAny() map[string]interface{} { - m.mu.RLock() - data := make(map[string]interface{}, len(m.data)) - for k, v := range m.data { - data[k] = v - } - m.mu.RUnlock() - return data -} - -// MapCopy returns a copy of the underlying data of the hash map. -func (m *StrStrMap) MapCopy() map[string]string { - m.mu.RLock() - defer m.mu.RUnlock() - data := make(map[string]string, len(m.data)) - for k, v := range m.data { - data[k] = v - } - return data -} - -// FilterEmpty deletes all key-value pair of which the value is empty. -// Values like: 0, nil, false, "", len(slice/map/chan) == 0 are considered empty. -func (m *StrStrMap) FilterEmpty() { - m.mu.Lock() - for k, v := range m.data { - if empty.IsEmpty(v) { - delete(m.data, k) - } - } - m.mu.Unlock() -} - -// Set sets key-value to the hash map. -func (m *StrStrMap) Set(key string, val string) { - m.mu.Lock() - if m.data == nil { - m.data = make(map[string]string) - } - m.data[key] = val - m.mu.Unlock() -} - -// Sets batch sets key-values to the hash map. -func (m *StrStrMap) Sets(data map[string]string) { - m.mu.Lock() - if m.data == nil { - m.data = data - } else { - for k, v := range data { - m.data[k] = v - } - } - m.mu.Unlock() -} - -// Search searches the map with given `key`. -// Second return parameter `found` is true if key was found, otherwise false. -func (m *StrStrMap) Search(key string) (value string, found bool) { - m.mu.RLock() - if m.data != nil { - value, found = m.data[key] - } - m.mu.RUnlock() - return -} - -// Get returns the value by given `key`. -func (m *StrStrMap) Get(key string) (value string) { - m.mu.RLock() - if m.data != nil { - value = m.data[key] - } - m.mu.RUnlock() - return -} - -// Pop retrieves and deletes an item from the map. -func (m *StrStrMap) Pop() (key, value string) { - m.mu.Lock() - defer m.mu.Unlock() - for key, value = range m.data { - delete(m.data, key) - return - } - return -} - -// Pops retrieves and deletes `size` items from the map. -// It returns all items if size == -1. -func (m *StrStrMap) Pops(size int) map[string]string { - m.mu.Lock() - defer m.mu.Unlock() - if size > len(m.data) || size == -1 { - size = len(m.data) - } - if size == 0 { - return nil - } - var ( - index = 0 - newMap = make(map[string]string, size) - ) - for k, v := range m.data { - delete(m.data, k) - newMap[k] = v - index++ - if index == size { - break - } - } - return newMap -} - -// doSetWithLockCheck checks whether value of the key exists with mutex.Lock, -// if not exists, set value to the map with given `key`, -// or else just return the existing value. -// -// It returns value with given `key`. -func (m *StrStrMap) doSetWithLockCheck(key string, value string) string { - m.mu.Lock() - defer m.mu.Unlock() - if m.data == nil { - m.data = make(map[string]string) - } - if v, ok := m.data[key]; ok { - return v - } - m.data[key] = value - return value -} - -// GetOrSet returns the value by key, -// or sets value with given `value` if it does not exist and then returns this value. -func (m *StrStrMap) GetOrSet(key string, value string) string { - if v, ok := m.Search(key); !ok { - return m.doSetWithLockCheck(key, value) - } else { - return v - } -} - -// GetOrSetFunc returns the value by key, -// or sets value with returned value of callback function `f` if it does not exist -// and then returns this value. -func (m *StrStrMap) GetOrSetFunc(key string, f func() string) string { - if v, ok := m.Search(key); !ok { - return m.doSetWithLockCheck(key, f()) - } else { - return v - } -} - -// GetOrSetFuncLock returns the value by key, -// or sets value with returned value of callback function `f` if it does not exist -// and then returns this value. -// -// GetOrSetFuncLock differs with GetOrSetFunc function is that it executes function `f` -// with mutex.Lock of the hash map. -func (m *StrStrMap) GetOrSetFuncLock(key string, f func() string) string { - if v, ok := m.Search(key); !ok { - m.mu.Lock() - defer m.mu.Unlock() - if m.data == nil { - m.data = make(map[string]string) - } - if v, ok = m.data[key]; ok { - return v - } - v = f() - m.data[key] = v - return v - } else { - return v - } -} - -// SetIfNotExist sets `value` to the map if the `key` does not exist, and then returns true. -// It returns false if `key` exists, and `value` would be ignored. -func (m *StrStrMap) SetIfNotExist(key string, value string) bool { - if !m.Contains(key) { - m.doSetWithLockCheck(key, value) - return true - } - return false -} - -// SetIfNotExistFunc sets value with return value of callback function `f`, and then returns true. -// It returns false if `key` exists, and `value` would be ignored. -func (m *StrStrMap) SetIfNotExistFunc(key string, f func() string) bool { - if !m.Contains(key) { - m.doSetWithLockCheck(key, f()) - return true - } - return false -} - -// SetIfNotExistFuncLock sets value with return value of callback function `f`, and then returns true. -// It returns false if `key` exists, and `value` would be ignored. -// -// SetIfNotExistFuncLock differs with SetIfNotExistFunc function is that -// it executes function `f` with mutex.Lock of the hash map. -func (m *StrStrMap) SetIfNotExistFuncLock(key string, f func() string) bool { - if !m.Contains(key) { - m.mu.Lock() - defer m.mu.Unlock() - if m.data == nil { - m.data = make(map[string]string) - } - if _, ok := m.data[key]; !ok { - m.data[key] = f() - } - return true - } - return false -} - -// Removes batch deletes values of the map by keys. -func (m *StrStrMap) Removes(keys []string) { - m.mu.Lock() - if m.data != nil { - for _, key := range keys { - delete(m.data, key) - } - } - m.mu.Unlock() -} - -// Remove deletes value from map by given `key`, and return this deleted value. -func (m *StrStrMap) Remove(key string) (value string) { - m.mu.Lock() - if m.data != nil { - var ok bool - if value, ok = m.data[key]; ok { - delete(m.data, key) - } - } - m.mu.Unlock() - return -} - -// Keys returns all keys of the map as a slice. -func (m *StrStrMap) Keys() []string { - m.mu.RLock() - var ( - keys = make([]string, len(m.data)) - index = 0 - ) - for key := range m.data { - keys[index] = key - index++ - } - m.mu.RUnlock() - return keys -} - -// Values returns all values of the map as a slice. -func (m *StrStrMap) Values() []string { - m.mu.RLock() - var ( - values = make([]string, len(m.data)) - index = 0 - ) - for _, value := range m.data { - values[index] = value - index++ - } - m.mu.RUnlock() - return values -} - -// Contains checks whether a key exists. -// It returns true if the `key` exists, or else false. -func (m *StrStrMap) Contains(key string) bool { - var ok bool - m.mu.RLock() - if m.data != nil { - _, ok = m.data[key] - } - m.mu.RUnlock() - return ok -} - -// Size returns the size of the map. -func (m *StrStrMap) Size() int { - m.mu.RLock() - length := len(m.data) - m.mu.RUnlock() - return length -} - -// IsEmpty checks whether the map is empty. -// It returns true if map is empty, or else false. -func (m *StrStrMap) IsEmpty() bool { - return m.Size() == 0 -} - -// Clear deletes all data of the map, it will remake a new underlying data map. -func (m *StrStrMap) Clear() { - m.mu.Lock() - m.data = make(map[string]string) - m.mu.Unlock() -} - -// Replace the data of the map with given `data`. -func (m *StrStrMap) Replace(data map[string]string) { - m.mu.Lock() - m.data = data - m.mu.Unlock() -} - -// LockFunc locks writing with given callback function `f` within RWMutex.Lock. -func (m *StrStrMap) LockFunc(f func(m map[string]string)) { - m.mu.Lock() - defer m.mu.Unlock() - f(m.data) -} - -// RLockFunc locks reading with given callback function `f` within RWMutex.RLock. -func (m *StrStrMap) RLockFunc(f func(m map[string]string)) { - m.mu.RLock() - defer m.mu.RUnlock() - f(m.data) -} - -// Flip exchanges key-value of the map to value-key. -func (m *StrStrMap) Flip() { - m.mu.Lock() - defer m.mu.Unlock() - n := make(map[string]string, len(m.data)) - for k, v := range m.data { - n[v] = k - } - m.data = n -} - -// Merge merges two hash maps. -// The `other` map will be merged into the map `m`. -func (m *StrStrMap) Merge(other *StrStrMap) { - m.mu.Lock() - defer m.mu.Unlock() - if m.data == nil { - m.data = other.MapCopy() - return - } - if other != m { - other.mu.RLock() - defer other.mu.RUnlock() - } - for k, v := range other.data { - m.data[k] = v - } -} - -// String returns the map as a string. -func (m *StrStrMap) String() string { - if m == nil { - return "" - } - b, _ := m.MarshalJSON() - return string(b) -} - -// MarshalJSON implements the interface MarshalJSON for json.Marshal. -func (m StrStrMap) MarshalJSON() ([]byte, error) { - m.mu.RLock() - defer m.mu.RUnlock() - return json.Marshal(m.data) -} - -// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. -func (m *StrStrMap) UnmarshalJSON(b []byte) error { - m.mu.Lock() - defer m.mu.Unlock() - if m.data == nil { - m.data = make(map[string]string) - } - if err := json.UnmarshalUseNumber(b, &m.data); err != nil { - return err - } - return nil -} - -// UnmarshalValue is an interface implement which sets any type of value for map. -func (m *StrStrMap) UnmarshalValue(value interface{}) (err error) { - m.mu.Lock() - defer m.mu.Unlock() - m.data = gconv.MapStrStr(value) - return -} - -// DeepCopy implements interface for deep copy of current type. -func (m *StrStrMap) DeepCopy() interface{} { - if m == nil { - return nil - } - m.mu.RLock() - defer m.mu.RUnlock() - data := make(map[string]string, len(m.data)) - for k, v := range m.data { - data[k] = v - } - return NewStrStrMapFrom(data, m.mu.IsSafe()) -} - -// IsSubOf checks whether the current map is a sub-map of `other`. -func (m *StrStrMap) IsSubOf(other *StrStrMap) bool { - if m == other { - return true - } - m.mu.RLock() - defer m.mu.RUnlock() - other.mu.RLock() - defer other.mu.RUnlock() - for key, value := range m.data { - otherValue, ok := other.data[key] - if !ok { - return false - } - if otherValue != value { - return false - } - } - return true -} - -// Diff compares current map `m` with map `other` and returns their different keys. -// The returned `addedKeys` are the keys that are in map `m` but not in map `other`. -// The returned `removedKeys` are the keys that are in map `other` but not in map `m`. -// The returned `updatedKeys` are the keys that are both in map `m` and `other` but their values and not equal (`!=`). -func (m *StrStrMap) Diff(other *StrStrMap) (addedKeys, removedKeys, updatedKeys []string) { - m.mu.RLock() - defer m.mu.RUnlock() - other.mu.RLock() - defer other.mu.RUnlock() - - for key := range m.data { - if _, ok := other.data[key]; !ok { - removedKeys = append(removedKeys, key) - } else if m.data[key] != other.data[key] { - updatedKeys = append(updatedKeys, key) - } - } - for key := range other.data { - if _, ok := m.data[key]; !ok { - addedKeys = append(addedKeys, key) - } - } - return -} diff --git a/vendor/github.com/gogf/gf/v2/container/gmap/gmap_list_map.go b/vendor/github.com/gogf/gf/v2/container/gmap/gmap_list_map.go deleted file mode 100644 index 3197a5ca..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gmap/gmap_list_map.go +++ /dev/null @@ -1,612 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with gm file, -// You can obtain one at https://github.com/gogf/gf. - -package gmap - -import ( - "bytes" - "fmt" - - "github.com/gogf/gf/v2/container/glist" - "github.com/gogf/gf/v2/container/gvar" - "github.com/gogf/gf/v2/internal/deepcopy" - "github.com/gogf/gf/v2/internal/empty" - "github.com/gogf/gf/v2/internal/json" - "github.com/gogf/gf/v2/internal/rwmutex" - "github.com/gogf/gf/v2/util/gconv" -) - -// ListMap is a map that preserves insertion-order. -// -// It is backed by a hash table to store values and doubly-linked list to store ordering. -// -// Structure is not thread safe. -// -// Reference: http://en.wikipedia.org/wiki/Associative_array -type ListMap struct { - mu rwmutex.RWMutex - data map[interface{}]*glist.Element - list *glist.List -} - -type gListMapNode struct { - key interface{} - value interface{} -} - -// NewListMap returns an empty link map. -// ListMap is backed by a hash table to store values and doubly-linked list to store ordering. -// The parameter `safe` is used to specify whether using map in concurrent-safety, -// which is false in default. -func NewListMap(safe ...bool) *ListMap { - return &ListMap{ - mu: rwmutex.Create(safe...), - data: make(map[interface{}]*glist.Element), - list: glist.New(), - } -} - -// NewListMapFrom returns a link map from given map `data`. -// Note that, the param `data` map will be set as the underlying data map(no deep copy), -// there might be some concurrent-safe issues when changing the map outside. -func NewListMapFrom(data map[interface{}]interface{}, safe ...bool) *ListMap { - m := NewListMap(safe...) - m.Sets(data) - return m -} - -// Iterator is alias of IteratorAsc. -func (m *ListMap) Iterator(f func(key, value interface{}) bool) { - m.IteratorAsc(f) -} - -// IteratorAsc iterates the map readonly in ascending order with given callback function `f`. -// If `f` returns true, then it continues iterating; or false to stop. -func (m *ListMap) IteratorAsc(f func(key interface{}, value interface{}) bool) { - m.mu.RLock() - defer m.mu.RUnlock() - if m.list != nil { - var node *gListMapNode - m.list.IteratorAsc(func(e *glist.Element) bool { - node = e.Value.(*gListMapNode) - return f(node.key, node.value) - }) - } -} - -// IteratorDesc iterates the map readonly in descending order with given callback function `f`. -// If `f` returns true, then it continues iterating; or false to stop. -func (m *ListMap) IteratorDesc(f func(key interface{}, value interface{}) bool) { - m.mu.RLock() - defer m.mu.RUnlock() - if m.list != nil { - var node *gListMapNode - m.list.IteratorDesc(func(e *glist.Element) bool { - node = e.Value.(*gListMapNode) - return f(node.key, node.value) - }) - } -} - -// Clone returns a new link map with copy of current map data. -func (m *ListMap) Clone(safe ...bool) *ListMap { - return NewListMapFrom(m.Map(), safe...) -} - -// Clear deletes all data of the map, it will remake a new underlying data map. -func (m *ListMap) Clear() { - m.mu.Lock() - m.data = make(map[interface{}]*glist.Element) - m.list = glist.New() - m.mu.Unlock() -} - -// Replace the data of the map with given `data`. -func (m *ListMap) Replace(data map[interface{}]interface{}) { - m.mu.Lock() - m.data = make(map[interface{}]*glist.Element) - m.list = glist.New() - for key, value := range data { - if e, ok := m.data[key]; !ok { - m.data[key] = m.list.PushBack(&gListMapNode{key, value}) - } else { - e.Value = &gListMapNode{key, value} - } - } - m.mu.Unlock() -} - -// Map returns a copy of the underlying data of the map. -func (m *ListMap) Map() map[interface{}]interface{} { - m.mu.RLock() - var node *gListMapNode - var data map[interface{}]interface{} - if m.list != nil { - data = make(map[interface{}]interface{}, len(m.data)) - m.list.IteratorAsc(func(e *glist.Element) bool { - node = e.Value.(*gListMapNode) - data[node.key] = node.value - return true - }) - } - m.mu.RUnlock() - return data -} - -// MapStrAny returns a copy of the underlying data of the map as map[string]interface{}. -func (m *ListMap) MapStrAny() map[string]interface{} { - m.mu.RLock() - var node *gListMapNode - var data map[string]interface{} - if m.list != nil { - data = make(map[string]interface{}, len(m.data)) - m.list.IteratorAsc(func(e *glist.Element) bool { - node = e.Value.(*gListMapNode) - data[gconv.String(node.key)] = node.value - return true - }) - } - m.mu.RUnlock() - return data -} - -// FilterEmpty deletes all key-value pair of which the value is empty. -func (m *ListMap) FilterEmpty() { - m.mu.Lock() - if m.list != nil { - var ( - keys = make([]interface{}, 0) - node *gListMapNode - ) - m.list.IteratorAsc(func(e *glist.Element) bool { - node = e.Value.(*gListMapNode) - if empty.IsEmpty(node.value) { - keys = append(keys, node.key) - } - return true - }) - if len(keys) > 0 { - for _, key := range keys { - if e, ok := m.data[key]; ok { - delete(m.data, key) - m.list.Remove(e) - } - } - } - } - m.mu.Unlock() -} - -// Set sets key-value to the map. -func (m *ListMap) Set(key interface{}, value interface{}) { - m.mu.Lock() - if m.data == nil { - m.data = make(map[interface{}]*glist.Element) - m.list = glist.New() - } - if e, ok := m.data[key]; !ok { - m.data[key] = m.list.PushBack(&gListMapNode{key, value}) - } else { - e.Value = &gListMapNode{key, value} - } - m.mu.Unlock() -} - -// Sets batch sets key-values to the map. -func (m *ListMap) Sets(data map[interface{}]interface{}) { - m.mu.Lock() - if m.data == nil { - m.data = make(map[interface{}]*glist.Element) - m.list = glist.New() - } - for key, value := range data { - if e, ok := m.data[key]; !ok { - m.data[key] = m.list.PushBack(&gListMapNode{key, value}) - } else { - e.Value = &gListMapNode{key, value} - } - } - m.mu.Unlock() -} - -// Search searches the map with given `key`. -// Second return parameter `found` is true if key was found, otherwise false. -func (m *ListMap) Search(key interface{}) (value interface{}, found bool) { - m.mu.RLock() - if m.data != nil { - if e, ok := m.data[key]; ok { - value = e.Value.(*gListMapNode).value - found = ok - } - } - m.mu.RUnlock() - return -} - -// Get returns the value by given `key`. -func (m *ListMap) Get(key interface{}) (value interface{}) { - m.mu.RLock() - if m.data != nil { - if e, ok := m.data[key]; ok { - value = e.Value.(*gListMapNode).value - } - } - m.mu.RUnlock() - return -} - -// Pop retrieves and deletes an item from the map. -func (m *ListMap) Pop() (key, value interface{}) { - m.mu.Lock() - defer m.mu.Unlock() - for k, e := range m.data { - value = e.Value.(*gListMapNode).value - delete(m.data, k) - m.list.Remove(e) - return k, value - } - return -} - -// Pops retrieves and deletes `size` items from the map. -// It returns all items if size == -1. -func (m *ListMap) Pops(size int) map[interface{}]interface{} { - m.mu.Lock() - defer m.mu.Unlock() - if size > len(m.data) || size == -1 { - size = len(m.data) - } - if size == 0 { - return nil - } - index := 0 - newMap := make(map[interface{}]interface{}, size) - for k, e := range m.data { - value := e.Value.(*gListMapNode).value - delete(m.data, k) - m.list.Remove(e) - newMap[k] = value - index++ - if index == size { - break - } - } - return newMap -} - -// doSetWithLockCheck checks whether value of the key exists with mutex.Lock, -// if not exists, set value to the map with given `key`, -// or else just return the existing value. -// -// When setting value, if `value` is type of `func() interface {}`, -// it will be executed with mutex.Lock of the map, -// and its return value will be set to the map with `key`. -// -// It returns value with given `key`. -func (m *ListMap) doSetWithLockCheck(key interface{}, value interface{}) interface{} { - m.mu.Lock() - defer m.mu.Unlock() - if m.data == nil { - m.data = make(map[interface{}]*glist.Element) - m.list = glist.New() - } - if e, ok := m.data[key]; ok { - return e.Value.(*gListMapNode).value - } - if f, ok := value.(func() interface{}); ok { - value = f() - } - if value != nil { - m.data[key] = m.list.PushBack(&gListMapNode{key, value}) - } - return value -} - -// GetOrSet returns the value by key, -// or sets value with given `value` if it does not exist and then returns this value. -func (m *ListMap) GetOrSet(key interface{}, value interface{}) interface{} { - if v, ok := m.Search(key); !ok { - return m.doSetWithLockCheck(key, value) - } else { - return v - } -} - -// GetOrSetFunc returns the value by key, -// or sets value with returned value of callback function `f` if it does not exist -// and then returns this value. -func (m *ListMap) GetOrSetFunc(key interface{}, f func() interface{}) interface{} { - if v, ok := m.Search(key); !ok { - return m.doSetWithLockCheck(key, f()) - } else { - return v - } -} - -// GetOrSetFuncLock returns the value by key, -// or sets value with returned value of callback function `f` if it does not exist -// and then returns this value. -// -// GetOrSetFuncLock differs with GetOrSetFunc function is that it executes function `f` -// with mutex.Lock of the map. -func (m *ListMap) GetOrSetFuncLock(key interface{}, f func() interface{}) interface{} { - if v, ok := m.Search(key); !ok { - return m.doSetWithLockCheck(key, f) - } else { - return v - } -} - -// GetVar returns a Var with the value by given `key`. -// The returned Var is un-concurrent safe. -func (m *ListMap) GetVar(key interface{}) *gvar.Var { - return gvar.New(m.Get(key)) -} - -// GetVarOrSet returns a Var with result from GetVarOrSet. -// The returned Var is un-concurrent safe. -func (m *ListMap) GetVarOrSet(key interface{}, value interface{}) *gvar.Var { - return gvar.New(m.GetOrSet(key, value)) -} - -// GetVarOrSetFunc returns a Var with result from GetOrSetFunc. -// The returned Var is un-concurrent safe. -func (m *ListMap) GetVarOrSetFunc(key interface{}, f func() interface{}) *gvar.Var { - return gvar.New(m.GetOrSetFunc(key, f)) -} - -// GetVarOrSetFuncLock returns a Var with result from GetOrSetFuncLock. -// The returned Var is un-concurrent safe. -func (m *ListMap) GetVarOrSetFuncLock(key interface{}, f func() interface{}) *gvar.Var { - return gvar.New(m.GetOrSetFuncLock(key, f)) -} - -// SetIfNotExist sets `value` to the map if the `key` does not exist, and then returns true. -// It returns false if `key` exists, and `value` would be ignored. -func (m *ListMap) SetIfNotExist(key interface{}, value interface{}) bool { - if !m.Contains(key) { - m.doSetWithLockCheck(key, value) - return true - } - return false -} - -// SetIfNotExistFunc sets value with return value of callback function `f`, and then returns true. -// It returns false if `key` exists, and `value` would be ignored. -func (m *ListMap) SetIfNotExistFunc(key interface{}, f func() interface{}) bool { - if !m.Contains(key) { - m.doSetWithLockCheck(key, f()) - return true - } - return false -} - -// SetIfNotExistFuncLock sets value with return value of callback function `f`, and then returns true. -// It returns false if `key` exists, and `value` would be ignored. -// -// SetIfNotExistFuncLock differs with SetIfNotExistFunc function is that -// it executes function `f` with mutex.Lock of the map. -func (m *ListMap) SetIfNotExistFuncLock(key interface{}, f func() interface{}) bool { - if !m.Contains(key) { - m.doSetWithLockCheck(key, f) - return true - } - return false -} - -// Remove deletes value from map by given `key`, and return this deleted value. -func (m *ListMap) Remove(key interface{}) (value interface{}) { - m.mu.Lock() - if m.data != nil { - if e, ok := m.data[key]; ok { - value = e.Value.(*gListMapNode).value - delete(m.data, key) - m.list.Remove(e) - } - } - m.mu.Unlock() - return -} - -// Removes batch deletes values of the map by keys. -func (m *ListMap) Removes(keys []interface{}) { - m.mu.Lock() - if m.data != nil { - for _, key := range keys { - if e, ok := m.data[key]; ok { - delete(m.data, key) - m.list.Remove(e) - } - } - } - m.mu.Unlock() -} - -// Keys returns all keys of the map as a slice in ascending order. -func (m *ListMap) Keys() []interface{} { - m.mu.RLock() - var ( - keys = make([]interface{}, m.list.Len()) - index = 0 - ) - if m.list != nil { - m.list.IteratorAsc(func(e *glist.Element) bool { - keys[index] = e.Value.(*gListMapNode).key - index++ - return true - }) - } - m.mu.RUnlock() - return keys -} - -// Values returns all values of the map as a slice. -func (m *ListMap) Values() []interface{} { - m.mu.RLock() - var ( - values = make([]interface{}, m.list.Len()) - index = 0 - ) - if m.list != nil { - m.list.IteratorAsc(func(e *glist.Element) bool { - values[index] = e.Value.(*gListMapNode).value - index++ - return true - }) - } - m.mu.RUnlock() - return values -} - -// Contains checks whether a key exists. -// It returns true if the `key` exists, or else false. -func (m *ListMap) Contains(key interface{}) (ok bool) { - m.mu.RLock() - if m.data != nil { - _, ok = m.data[key] - } - m.mu.RUnlock() - return -} - -// Size returns the size of the map. -func (m *ListMap) Size() (size int) { - m.mu.RLock() - size = len(m.data) - m.mu.RUnlock() - return -} - -// IsEmpty checks whether the map is empty. -// It returns true if map is empty, or else false. -func (m *ListMap) IsEmpty() bool { - return m.Size() == 0 -} - -// Flip exchanges key-value of the map to value-key. -func (m *ListMap) Flip() { - data := m.Map() - m.Clear() - for key, value := range data { - m.Set(value, key) - } -} - -// Merge merges two link maps. -// The `other` map will be merged into the map `m`. -func (m *ListMap) Merge(other *ListMap) { - m.mu.Lock() - defer m.mu.Unlock() - if m.data == nil { - m.data = make(map[interface{}]*glist.Element) - m.list = glist.New() - } - if other != m { - other.mu.RLock() - defer other.mu.RUnlock() - } - var node *gListMapNode - other.list.IteratorAsc(func(e *glist.Element) bool { - node = e.Value.(*gListMapNode) - if e, ok := m.data[node.key]; !ok { - m.data[node.key] = m.list.PushBack(&gListMapNode{node.key, node.value}) - } else { - e.Value = &gListMapNode{node.key, node.value} - } - return true - }) -} - -// String returns the map as a string. -func (m *ListMap) String() string { - if m == nil { - return "" - } - b, _ := m.MarshalJSON() - return string(b) -} - -// MarshalJSON implements the interface MarshalJSON for json.Marshal. -func (m ListMap) MarshalJSON() (jsonBytes []byte, err error) { - if m.data == nil { - return []byte("null"), nil - } - buffer := bytes.NewBuffer(nil) - buffer.WriteByte('{') - m.Iterator(func(key, value interface{}) bool { - valueBytes, valueJsonErr := json.Marshal(value) - if valueJsonErr != nil { - err = valueJsonErr - return false - } - if buffer.Len() > 1 { - buffer.WriteByte(',') - } - buffer.WriteString(fmt.Sprintf(`"%v":%s`, key, valueBytes)) - return true - }) - buffer.WriteByte('}') - return buffer.Bytes(), nil -} - -// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. -func (m *ListMap) UnmarshalJSON(b []byte) error { - m.mu.Lock() - defer m.mu.Unlock() - if m.data == nil { - m.data = make(map[interface{}]*glist.Element) - m.list = glist.New() - } - var data map[string]interface{} - if err := json.UnmarshalUseNumber(b, &data); err != nil { - return err - } - for key, value := range data { - if e, ok := m.data[key]; !ok { - m.data[key] = m.list.PushBack(&gListMapNode{key, value}) - } else { - e.Value = &gListMapNode{key, value} - } - } - return nil -} - -// UnmarshalValue is an interface implement which sets any type of value for map. -func (m *ListMap) UnmarshalValue(value interface{}) (err error) { - m.mu.Lock() - defer m.mu.Unlock() - if m.data == nil { - m.data = make(map[interface{}]*glist.Element) - m.list = glist.New() - } - for k, v := range gconv.Map(value) { - if e, ok := m.data[k]; !ok { - m.data[k] = m.list.PushBack(&gListMapNode{k, v}) - } else { - e.Value = &gListMapNode{k, v} - } - } - return -} - -// DeepCopy implements interface for deep copy of current type. -func (m *ListMap) DeepCopy() interface{} { - if m == nil { - return nil - } - m.mu.RLock() - defer m.mu.RUnlock() - data := make(map[interface{}]interface{}, len(m.data)) - if m.list != nil { - var node *gListMapNode - m.list.IteratorAsc(func(e *glist.Element) bool { - node = e.Value.(*gListMapNode) - data[node.key] = deepcopy.Copy(node.value) - return true - }) - } - return NewListMapFrom(data, m.mu.IsSafe()) -} diff --git a/vendor/github.com/gogf/gf/v2/container/gmap/gmap_tree_map.go b/vendor/github.com/gogf/gf/v2/container/gmap/gmap_tree_map.go deleted file mode 100644 index c81caa48..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gmap/gmap_tree_map.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with gm file, -// You can obtain one at https://github.com/gogf/gf. - -package gmap - -import ( - "github.com/gogf/gf/v2/container/gtree" -) - -// TreeMap based on red-black tree, alias of RedBlackTree. -type TreeMap = gtree.RedBlackTree - -// NewTreeMap instantiates a tree map with the custom comparator. -// The parameter `safe` is used to specify whether using tree in concurrent-safety, -// which is false in default. -func NewTreeMap(comparator func(v1, v2 interface{}) int, safe ...bool) *TreeMap { - return gtree.NewRedBlackTree(comparator, safe...) -} - -// NewTreeMapFrom instantiates a tree map with the custom comparator and `data` map. -// Note that, the param `data` map will be set as the underlying data map(no deep copy), -// there might be some concurrent-safe issues when changing the map outside. -// The parameter `safe` is used to specify whether using tree in concurrent-safety, -// which is false in default. -func NewTreeMapFrom(comparator func(v1, v2 interface{}) int, data map[interface{}]interface{}, safe ...bool) *TreeMap { - return gtree.NewRedBlackTreeFrom(comparator, data, safe...) -} diff --git a/vendor/github.com/gogf/gf/v2/container/gpool/gpool.go b/vendor/github.com/gogf/gf/v2/container/gpool/gpool.go deleted file mode 100644 index d58fb781..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gpool/gpool.go +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package gpool provides object-reusable concurrent-safe pool. -package gpool - -import ( - "context" - "time" - - "github.com/gogf/gf/v2/container/glist" - "github.com/gogf/gf/v2/container/gtype" - "github.com/gogf/gf/v2/errors/gcode" - "github.com/gogf/gf/v2/errors/gerror" - "github.com/gogf/gf/v2/os/gtime" - "github.com/gogf/gf/v2/os/gtimer" -) - -// Pool is an Object-Reusable Pool. -type Pool struct { - list *glist.List // Available/idle items list. - closed *gtype.Bool // Whether the pool is closed. - TTL time.Duration // Time To Live for pool items. - NewFunc func() (interface{}, error) // Callback function to create pool item. - // ExpireFunc is the for expired items destruction. - // This function needs to be defined when the pool items - // need to perform additional destruction operations. - // Eg: net.Conn, os.File, etc. - ExpireFunc func(interface{}) -} - -// Pool item. -type poolItem struct { - value interface{} // Item value. - expireAt int64 // Expire timestamp in milliseconds. -} - -// NewFunc Creation function for object. -type NewFunc func() (interface{}, error) - -// ExpireFunc Destruction function for object. -type ExpireFunc func(interface{}) - -// New creates and returns a new object pool. -// To ensure execution efficiency, the expiration time cannot be modified once it is set. -// -// Note the expiration logic: -// ttl = 0 : not expired; -// ttl < 0 : immediate expired after use; -// ttl > 0 : timeout expired; -func New(ttl time.Duration, newFunc NewFunc, expireFunc ...ExpireFunc) *Pool { - r := &Pool{ - list: glist.New(true), - closed: gtype.NewBool(), - TTL: ttl, - NewFunc: newFunc, - } - if len(expireFunc) > 0 { - r.ExpireFunc = expireFunc[0] - } - gtimer.AddSingleton(context.Background(), time.Second, r.checkExpireItems) - return r -} - -// Put puts an item to pool. -func (p *Pool) Put(value interface{}) error { - if p.closed.Val() { - return gerror.NewCode(gcode.CodeInvalidOperation, "pool is closed") - } - item := &poolItem{ - value: value, - } - if p.TTL == 0 { - item.expireAt = 0 - } else { - // As for Golang version < 1.13, there's no method Milliseconds for time.Duration. - // So we need calculate the milliseconds using its nanoseconds value. - item.expireAt = gtime.TimestampMilli() + p.TTL.Nanoseconds()/1000000 - } - p.list.PushBack(item) - return nil -} - -// MustPut puts an item to pool, it panics if any error occurs. -func (p *Pool) MustPut(value interface{}) { - if err := p.Put(value); err != nil { - panic(err) - } -} - -// Clear clears pool, which means it will remove all items from pool. -func (p *Pool) Clear() { - if p.ExpireFunc != nil { - for { - if r := p.list.PopFront(); r != nil { - p.ExpireFunc(r.(*poolItem).value) - } else { - break - } - } - } else { - p.list.RemoveAll() - } -} - -// Get picks and returns an item from pool. If the pool is empty and NewFunc is defined, -// it creates and returns one from NewFunc. -func (p *Pool) Get() (interface{}, error) { - for !p.closed.Val() { - if r := p.list.PopFront(); r != nil { - f := r.(*poolItem) - if f.expireAt == 0 || f.expireAt > gtime.TimestampMilli() { - return f.value, nil - } else if p.ExpireFunc != nil { - // TODO: move expire function calling asynchronously out from `Get` operation. - p.ExpireFunc(f.value) - } - } else { - break - } - } - if p.NewFunc != nil { - return p.NewFunc() - } - return nil, gerror.NewCode(gcode.CodeInvalidOperation, "pool is empty") -} - -// Size returns the count of available items of pool. -func (p *Pool) Size() int { - return p.list.Len() -} - -// Close closes the pool. If `p` has ExpireFunc, -// then it automatically closes all items using this function before it's closed. -// Commonly you do not need to call this function manually. -func (p *Pool) Close() { - p.closed.Set(true) -} - -// checkExpire removes expired items from pool in every second. -func (p *Pool) checkExpireItems(ctx context.Context) { - if p.closed.Val() { - // If p has ExpireFunc, - // then it must close all items using this function. - if p.ExpireFunc != nil { - for { - if r := p.list.PopFront(); r != nil { - p.ExpireFunc(r.(*poolItem).value) - } else { - break - } - } - } - gtimer.Exit() - } - // All items do not expire. - if p.TTL == 0 { - return - } - // The latest item expire timestamp in milliseconds. - var latestExpire int64 = -1 - // Retrieve the current timestamp in milliseconds, it expires the items - // by comparing with this timestamp. It is not accurate comparison for - // every item expired, but high performance. - var timestampMilli = gtime.TimestampMilli() - for { - if latestExpire > timestampMilli { - break - } - if r := p.list.PopFront(); r != nil { - item := r.(*poolItem) - latestExpire = item.expireAt - // TODO improve the auto-expiration mechanism of the pool. - if item.expireAt > timestampMilli { - p.list.PushFront(item) - break - } - if p.ExpireFunc != nil { - p.ExpireFunc(item.value) - } - } else { - break - } - } -} diff --git a/vendor/github.com/gogf/gf/v2/container/gqueue/gqueue.go b/vendor/github.com/gogf/gf/v2/container/gqueue/gqueue.go deleted file mode 100644 index 5923db1b..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gqueue/gqueue.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package gqueue provides dynamic/static concurrent-safe queue. -// -// Features: -// -// 1. FIFO queue(data -> list -> chan); -// -// 2. Fast creation and initialization; -// -// 3. Support dynamic queue size(unlimited queue size); -// -// 4. Blocking when reading data from queue; -package gqueue - -import ( - "math" - - "github.com/gogf/gf/v2/container/glist" - "github.com/gogf/gf/v2/container/gtype" -) - -// Queue is a concurrent-safe queue built on doubly linked list and channel. -type Queue struct { - limit int // Limit for queue size. - list *glist.List // Underlying list structure for data maintaining. - closed *gtype.Bool // Whether queue is closed. - events chan struct{} // Events for data writing. - C chan interface{} // Underlying channel for data reading. -} - -const ( - defaultQueueSize = 10000 // Size for queue buffer. - defaultBatchSize = 10 // Max batch size per-fetching from list. -) - -// New returns an empty queue object. -// Optional parameter `limit` is used to limit the size of the queue, which is unlimited in default. -// When `limit` is given, the queue will be static and high performance which is comparable with stdlib channel. -func New(limit ...int) *Queue { - q := &Queue{ - closed: gtype.NewBool(), - } - if len(limit) > 0 && limit[0] > 0 { - q.limit = limit[0] - q.C = make(chan interface{}, limit[0]) - } else { - q.list = glist.New(true) - q.events = make(chan struct{}, math.MaxInt32) - q.C = make(chan interface{}, defaultQueueSize) - go q.asyncLoopFromListToChannel() - } - return q -} - -// Push pushes the data `v` into the queue. -// Note that it would panic if Push is called after the queue is closed. -func (q *Queue) Push(v interface{}) { - if q.limit > 0 { - q.C <- v - } else { - q.list.PushBack(v) - if len(q.events) < defaultQueueSize { - q.events <- struct{}{} - } - } -} - -// Pop pops an item from the queue in FIFO way. -// Note that it would return nil immediately if Pop is called after the queue is closed. -func (q *Queue) Pop() interface{} { - return <-q.C -} - -// Close closes the queue. -// Notice: It would notify all goroutines return immediately, -// which are being blocked reading using Pop method. -func (q *Queue) Close() { - if !q.closed.Cas(false, true) { - return - } - if q.events != nil { - close(q.events) - } - if q.limit > 0 { - close(q.C) - } else { - for i := 0; i < defaultBatchSize; i++ { - q.Pop() - } - } -} - -// Len returns the length of the queue. -// Note that the result might not be accurate if using unlimited queue size as there's an -// asynchronous channel reading the list constantly. -func (q *Queue) Len() (length int64) { - bufferedSize := int64(len(q.C)) - if q.limit > 0 { - return bufferedSize - } - return int64(q.list.Size()) + bufferedSize -} - -// Size is alias of Len. -// Deprecated: use Len instead. -func (q *Queue) Size() int64 { - return q.Len() -} - -// asyncLoopFromListToChannel starts an asynchronous goroutine, -// which handles the data synchronization from list `q.list` to channel `q.C`. -func (q *Queue) asyncLoopFromListToChannel() { - defer func() { - if q.closed.Val() { - _ = recover() - } - }() - for !q.closed.Val() { - <-q.events - for !q.closed.Val() { - if bufferLength := q.list.Len(); bufferLength > 0 { - // When q.C is closed, it will panic here, especially q.C is being blocked for writing. - // If any error occurs here, it will be caught by recover and be ignored. - for i := 0; i < bufferLength; i++ { - q.C <- q.list.PopFront() - } - } else { - break - } - } - // Clear q.events to remain just one event to do the next synchronization check. - for i := 0; i < len(q.events)-1; i++ { - <-q.events - } - } - // It should be here to close `q.C` if `q` is unlimited size. - // It's the sender's responsibility to close channel when it should be closed. - close(q.C) -} diff --git a/vendor/github.com/gogf/gf/v2/container/gset/gset_any_set.go b/vendor/github.com/gogf/gf/v2/container/gset/gset_any_set.go deleted file mode 100644 index 179862ca..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gset/gset_any_set.go +++ /dev/null @@ -1,526 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package gset provides kinds of concurrent-safe/unsafe sets. -package gset - -import ( - "bytes" - - "github.com/gogf/gf/v2/internal/json" - "github.com/gogf/gf/v2/internal/rwmutex" - "github.com/gogf/gf/v2/text/gstr" - "github.com/gogf/gf/v2/util/gconv" -) - -type Set struct { - mu rwmutex.RWMutex - data map[interface{}]struct{} -} - -// New create and returns a new set, which contains un-repeated items. -// The parameter `safe` is used to specify whether using set in concurrent-safety, -// which is false in default. -func New(safe ...bool) *Set { - return NewSet(safe...) -} - -// NewSet create and returns a new set, which contains un-repeated items. -// Also see New. -func NewSet(safe ...bool) *Set { - return &Set{ - data: make(map[interface{}]struct{}), - mu: rwmutex.Create(safe...), - } -} - -// NewFrom returns a new set from `items`. -// Parameter `items` can be either a variable of any type, or a slice. -func NewFrom(items interface{}, safe ...bool) *Set { - m := make(map[interface{}]struct{}) - for _, v := range gconv.Interfaces(items) { - m[v] = struct{}{} - } - return &Set{ - data: m, - mu: rwmutex.Create(safe...), - } -} - -// Iterator iterates the set readonly with given callback function `f`, -// if `f` returns true then continue iterating; or false to stop. -func (set *Set) Iterator(f func(v interface{}) bool) { - set.mu.RLock() - defer set.mu.RUnlock() - for k := range set.data { - if !f(k) { - break - } - } -} - -// Add adds one or multiple items to the set. -func (set *Set) Add(items ...interface{}) { - set.mu.Lock() - if set.data == nil { - set.data = make(map[interface{}]struct{}) - } - for _, v := range items { - set.data[v] = struct{}{} - } - set.mu.Unlock() -} - -// AddIfNotExist checks whether item exists in the set, -// it adds the item to set and returns true if it does not exists in the set, -// or else it does nothing and returns false. -// -// Note that, if `item` is nil, it does nothing and returns false. -func (set *Set) AddIfNotExist(item interface{}) bool { - if item == nil { - return false - } - if !set.Contains(item) { - set.mu.Lock() - defer set.mu.Unlock() - if set.data == nil { - set.data = make(map[interface{}]struct{}) - } - if _, ok := set.data[item]; !ok { - set.data[item] = struct{}{} - return true - } - } - return false -} - -// AddIfNotExistFunc checks whether item exists in the set, -// it adds the item to set and returns true if it does not exist in the set and -// function `f` returns true, or else it does nothing and returns false. -// -// Note that, if `item` is nil, it does nothing and returns false. The function `f` -// is executed without writing lock. -func (set *Set) AddIfNotExistFunc(item interface{}, f func() bool) bool { - if item == nil { - return false - } - if !set.Contains(item) { - if f() { - set.mu.Lock() - defer set.mu.Unlock() - if set.data == nil { - set.data = make(map[interface{}]struct{}) - } - if _, ok := set.data[item]; !ok { - set.data[item] = struct{}{} - return true - } - } - } - return false -} - -// AddIfNotExistFuncLock checks whether item exists in the set, -// it adds the item to set and returns true if it does not exists in the set and -// function `f` returns true, or else it does nothing and returns false. -// -// Note that, if `item` is nil, it does nothing and returns false. The function `f` -// is executed within writing lock. -func (set *Set) AddIfNotExistFuncLock(item interface{}, f func() bool) bool { - if item == nil { - return false - } - if !set.Contains(item) { - set.mu.Lock() - defer set.mu.Unlock() - if set.data == nil { - set.data = make(map[interface{}]struct{}) - } - if f() { - if _, ok := set.data[item]; !ok { - set.data[item] = struct{}{} - return true - } - } - } - return false -} - -// Contains checks whether the set contains `item`. -func (set *Set) Contains(item interface{}) bool { - var ok bool - set.mu.RLock() - if set.data != nil { - _, ok = set.data[item] - } - set.mu.RUnlock() - return ok -} - -// Remove deletes `item` from set. -func (set *Set) Remove(item interface{}) { - set.mu.Lock() - if set.data != nil { - delete(set.data, item) - } - set.mu.Unlock() -} - -// Size returns the size of the set. -func (set *Set) Size() int { - set.mu.RLock() - l := len(set.data) - set.mu.RUnlock() - return l -} - -// Clear deletes all items of the set. -func (set *Set) Clear() { - set.mu.Lock() - set.data = make(map[interface{}]struct{}) - set.mu.Unlock() -} - -// Slice returns the an of items of the set as slice. -func (set *Set) Slice() []interface{} { - set.mu.RLock() - var ( - i = 0 - ret = make([]interface{}, len(set.data)) - ) - for item := range set.data { - ret[i] = item - i++ - } - set.mu.RUnlock() - return ret -} - -// Join joins items with a string `glue`. -func (set *Set) Join(glue string) string { - set.mu.RLock() - defer set.mu.RUnlock() - if len(set.data) == 0 { - return "" - } - var ( - l = len(set.data) - i = 0 - buffer = bytes.NewBuffer(nil) - ) - for k := range set.data { - buffer.WriteString(gconv.String(k)) - if i != l-1 { - buffer.WriteString(glue) - } - i++ - } - return buffer.String() -} - -// String returns items as a string, which implements like json.Marshal does. -func (set *Set) String() string { - if set == nil { - return "" - } - set.mu.RLock() - defer set.mu.RUnlock() - var ( - s string - l = len(set.data) - i = 0 - buffer = bytes.NewBuffer(nil) - ) - buffer.WriteByte('[') - for k := range set.data { - s = gconv.String(k) - if gstr.IsNumeric(s) { - buffer.WriteString(s) - } else { - buffer.WriteString(`"` + gstr.QuoteMeta(s, `"\`) + `"`) - } - if i != l-1 { - buffer.WriteByte(',') - } - i++ - } - buffer.WriteByte(']') - return buffer.String() -} - -// LockFunc locks writing with callback function `f`. -func (set *Set) LockFunc(f func(m map[interface{}]struct{})) { - set.mu.Lock() - defer set.mu.Unlock() - f(set.data) -} - -// RLockFunc locks reading with callback function `f`. -func (set *Set) RLockFunc(f func(m map[interface{}]struct{})) { - set.mu.RLock() - defer set.mu.RUnlock() - f(set.data) -} - -// Equal checks whether the two sets equal. -func (set *Set) Equal(other *Set) bool { - if set == other { - return true - } - set.mu.RLock() - defer set.mu.RUnlock() - other.mu.RLock() - defer other.mu.RUnlock() - if len(set.data) != len(other.data) { - return false - } - for key := range set.data { - if _, ok := other.data[key]; !ok { - return false - } - } - return true -} - -// IsSubsetOf checks whether the current set is a sub-set of `other`. -func (set *Set) IsSubsetOf(other *Set) bool { - if set == other { - return true - } - set.mu.RLock() - defer set.mu.RUnlock() - other.mu.RLock() - defer other.mu.RUnlock() - for key := range set.data { - if _, ok := other.data[key]; !ok { - return false - } - } - return true -} - -// Union returns a new set which is the union of `set` and `others`. -// Which means, all the items in `newSet` are in `set` or in `others`. -func (set *Set) Union(others ...*Set) (newSet *Set) { - newSet = NewSet() - set.mu.RLock() - defer set.mu.RUnlock() - for _, other := range others { - if set != other { - other.mu.RLock() - } - for k, v := range set.data { - newSet.data[k] = v - } - if set != other { - for k, v := range other.data { - newSet.data[k] = v - } - } - if set != other { - other.mu.RUnlock() - } - } - - return -} - -// Diff returns a new set which is the difference set from `set` to `others`. -// Which means, all the items in `newSet` are in `set` but not in `others`. -func (set *Set) Diff(others ...*Set) (newSet *Set) { - newSet = NewSet() - set.mu.RLock() - defer set.mu.RUnlock() - for _, other := range others { - if set == other { - continue - } - other.mu.RLock() - for k, v := range set.data { - if _, ok := other.data[k]; !ok { - newSet.data[k] = v - } - } - other.mu.RUnlock() - } - return -} - -// Intersect returns a new set which is the intersection from `set` to `others`. -// Which means, all the items in `newSet` are in `set` and also in `others`. -func (set *Set) Intersect(others ...*Set) (newSet *Set) { - newSet = NewSet() - set.mu.RLock() - defer set.mu.RUnlock() - for _, other := range others { - if set != other { - other.mu.RLock() - } - for k, v := range set.data { - if _, ok := other.data[k]; ok { - newSet.data[k] = v - } - } - if set != other { - other.mu.RUnlock() - } - } - return -} - -// Complement returns a new set which is the complement from `set` to `full`. -// Which means, all the items in `newSet` are in `full` and not in `set`. -// -// It returns the difference between `full` and `set` -// if the given set `full` is not the full set of `set`. -func (set *Set) Complement(full *Set) (newSet *Set) { - newSet = NewSet() - set.mu.RLock() - defer set.mu.RUnlock() - if set != full { - full.mu.RLock() - defer full.mu.RUnlock() - } - for k, v := range full.data { - if _, ok := set.data[k]; !ok { - newSet.data[k] = v - } - } - return -} - -// Merge adds items from `others` sets into `set`. -func (set *Set) Merge(others ...*Set) *Set { - set.mu.Lock() - defer set.mu.Unlock() - for _, other := range others { - if set != other { - other.mu.RLock() - } - for k, v := range other.data { - set.data[k] = v - } - if set != other { - other.mu.RUnlock() - } - } - return set -} - -// Sum sums items. -// Note: The items should be converted to int type, -// or you'd get a result that you unexpected. -func (set *Set) Sum() (sum int) { - set.mu.RLock() - defer set.mu.RUnlock() - for k := range set.data { - sum += gconv.Int(k) - } - return -} - -// Pop randomly pops an item from set. -func (set *Set) Pop() interface{} { - set.mu.Lock() - defer set.mu.Unlock() - for k := range set.data { - delete(set.data, k) - return k - } - return nil -} - -// Pops randomly pops `size` items from set. -// It returns all items if size == -1. -func (set *Set) Pops(size int) []interface{} { - set.mu.Lock() - defer set.mu.Unlock() - if size > len(set.data) || size == -1 { - size = len(set.data) - } - if size <= 0 { - return nil - } - index := 0 - array := make([]interface{}, size) - for k := range set.data { - delete(set.data, k) - array[index] = k - index++ - if index == size { - break - } - } - return array -} - -// Walk applies a user supplied function `f` to every item of set. -func (set *Set) Walk(f func(item interface{}) interface{}) *Set { - set.mu.Lock() - defer set.mu.Unlock() - m := make(map[interface{}]struct{}, len(set.data)) - for k, v := range set.data { - m[f(k)] = v - } - set.data = m - return set -} - -// MarshalJSON implements the interface MarshalJSON for json.Marshal. -func (set Set) MarshalJSON() ([]byte, error) { - return json.Marshal(set.Slice()) -} - -// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. -func (set *Set) UnmarshalJSON(b []byte) error { - set.mu.Lock() - defer set.mu.Unlock() - if set.data == nil { - set.data = make(map[interface{}]struct{}) - } - var array []interface{} - if err := json.UnmarshalUseNumber(b, &array); err != nil { - return err - } - for _, v := range array { - set.data[v] = struct{}{} - } - return nil -} - -// UnmarshalValue is an interface implement which sets any type of value for set. -func (set *Set) UnmarshalValue(value interface{}) (err error) { - set.mu.Lock() - defer set.mu.Unlock() - if set.data == nil { - set.data = make(map[interface{}]struct{}) - } - var array []interface{} - switch value.(type) { - case string, []byte: - err = json.UnmarshalUseNumber(gconv.Bytes(value), &array) - default: - array = gconv.SliceAny(value) - } - for _, v := range array { - set.data[v] = struct{}{} - } - return -} - -// DeepCopy implements interface for deep copy of current type. -func (set *Set) DeepCopy() interface{} { - if set == nil { - return nil - } - set.mu.RLock() - defer set.mu.RUnlock() - data := make([]interface{}, 0) - for k := range set.data { - data = append(data, k) - } - return NewFrom(data, set.mu.IsSafe()) -} diff --git a/vendor/github.com/gogf/gf/v2/container/gset/gset_int_set.go b/vendor/github.com/gogf/gf/v2/container/gset/gset_int_set.go deleted file mode 100644 index b29ebfd0..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gset/gset_int_set.go +++ /dev/null @@ -1,489 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. -// - -package gset - -import ( - "bytes" - - "github.com/gogf/gf/v2/internal/json" - "github.com/gogf/gf/v2/internal/rwmutex" - "github.com/gogf/gf/v2/util/gconv" -) - -type IntSet struct { - mu rwmutex.RWMutex - data map[int]struct{} -} - -// NewIntSet create and returns a new set, which contains un-repeated items. -// The parameter `safe` is used to specify whether using set in concurrent-safety, -// which is false in default. -func NewIntSet(safe ...bool) *IntSet { - return &IntSet{ - mu: rwmutex.Create(safe...), - data: make(map[int]struct{}), - } -} - -// NewIntSetFrom returns a new set from `items`. -func NewIntSetFrom(items []int, safe ...bool) *IntSet { - m := make(map[int]struct{}) - for _, v := range items { - m[v] = struct{}{} - } - return &IntSet{ - mu: rwmutex.Create(safe...), - data: m, - } -} - -// Iterator iterates the set readonly with given callback function `f`, -// if `f` returns true then continue iterating; or false to stop. -func (set *IntSet) Iterator(f func(v int) bool) { - set.mu.RLock() - defer set.mu.RUnlock() - for k := range set.data { - if !f(k) { - break - } - } -} - -// Add adds one or multiple items to the set. -func (set *IntSet) Add(item ...int) { - set.mu.Lock() - if set.data == nil { - set.data = make(map[int]struct{}) - } - for _, v := range item { - set.data[v] = struct{}{} - } - set.mu.Unlock() -} - -// AddIfNotExist checks whether item exists in the set, -// it adds the item to set and returns true if it does not exists in the set, -// or else it does nothing and returns false. -// -// Note that, if `item` is nil, it does nothing and returns false. -func (set *IntSet) AddIfNotExist(item int) bool { - if !set.Contains(item) { - set.mu.Lock() - defer set.mu.Unlock() - if set.data == nil { - set.data = make(map[int]struct{}) - } - if _, ok := set.data[item]; !ok { - set.data[item] = struct{}{} - return true - } - } - return false -} - -// AddIfNotExistFunc checks whether item exists in the set, -// it adds the item to set and returns true if it does not exists in the set and -// function `f` returns true, or else it does nothing and returns false. -// -// Note that, the function `f` is executed without writing lock. -func (set *IntSet) AddIfNotExistFunc(item int, f func() bool) bool { - if !set.Contains(item) { - if f() { - set.mu.Lock() - defer set.mu.Unlock() - if set.data == nil { - set.data = make(map[int]struct{}) - } - if _, ok := set.data[item]; !ok { - set.data[item] = struct{}{} - return true - } - } - } - return false -} - -// AddIfNotExistFuncLock checks whether item exists in the set, -// it adds the item to set and returns true if it does not exists in the set and -// function `f` returns true, or else it does nothing and returns false. -// -// Note that, the function `f` is executed without writing lock. -func (set *IntSet) AddIfNotExistFuncLock(item int, f func() bool) bool { - if !set.Contains(item) { - set.mu.Lock() - defer set.mu.Unlock() - if set.data == nil { - set.data = make(map[int]struct{}) - } - if f() { - if _, ok := set.data[item]; !ok { - set.data[item] = struct{}{} - return true - } - } - } - return false -} - -// Contains checks whether the set contains `item`. -func (set *IntSet) Contains(item int) bool { - var ok bool - set.mu.RLock() - if set.data != nil { - _, ok = set.data[item] - } - set.mu.RUnlock() - return ok -} - -// Remove deletes `item` from set. -func (set *IntSet) Remove(item int) { - set.mu.Lock() - if set.data != nil { - delete(set.data, item) - } - set.mu.Unlock() -} - -// Size returns the size of the set. -func (set *IntSet) Size() int { - set.mu.RLock() - l := len(set.data) - set.mu.RUnlock() - return l -} - -// Clear deletes all items of the set. -func (set *IntSet) Clear() { - set.mu.Lock() - set.data = make(map[int]struct{}) - set.mu.Unlock() -} - -// Slice returns the an of items of the set as slice. -func (set *IntSet) Slice() []int { - set.mu.RLock() - var ( - i = 0 - ret = make([]int, len(set.data)) - ) - for k := range set.data { - ret[i] = k - i++ - } - set.mu.RUnlock() - return ret -} - -// Join joins items with a string `glue`. -func (set *IntSet) Join(glue string) string { - set.mu.RLock() - defer set.mu.RUnlock() - if len(set.data) == 0 { - return "" - } - var ( - l = len(set.data) - i = 0 - buffer = bytes.NewBuffer(nil) - ) - for k := range set.data { - buffer.WriteString(gconv.String(k)) - if i != l-1 { - buffer.WriteString(glue) - } - i++ - } - return buffer.String() -} - -// String returns items as a string, which implements like json.Marshal does. -func (set *IntSet) String() string { - if set == nil { - return "" - } - return "[" + set.Join(",") + "]" -} - -// LockFunc locks writing with callback function `f`. -func (set *IntSet) LockFunc(f func(m map[int]struct{})) { - set.mu.Lock() - defer set.mu.Unlock() - f(set.data) -} - -// RLockFunc locks reading with callback function `f`. -func (set *IntSet) RLockFunc(f func(m map[int]struct{})) { - set.mu.RLock() - defer set.mu.RUnlock() - f(set.data) -} - -// Equal checks whether the two sets equal. -func (set *IntSet) Equal(other *IntSet) bool { - if set == other { - return true - } - set.mu.RLock() - defer set.mu.RUnlock() - other.mu.RLock() - defer other.mu.RUnlock() - if len(set.data) != len(other.data) { - return false - } - for key := range set.data { - if _, ok := other.data[key]; !ok { - return false - } - } - return true -} - -// IsSubsetOf checks whether the current set is a sub-set of `other`. -func (set *IntSet) IsSubsetOf(other *IntSet) bool { - if set == other { - return true - } - set.mu.RLock() - defer set.mu.RUnlock() - other.mu.RLock() - defer other.mu.RUnlock() - for key := range set.data { - if _, ok := other.data[key]; !ok { - return false - } - } - return true -} - -// Union returns a new set which is the union of `set` and `other`. -// Which means, all the items in `newSet` are in `set` or in `other`. -func (set *IntSet) Union(others ...*IntSet) (newSet *IntSet) { - newSet = NewIntSet() - set.mu.RLock() - defer set.mu.RUnlock() - for _, other := range others { - if set != other { - other.mu.RLock() - } - for k, v := range set.data { - newSet.data[k] = v - } - if set != other { - for k, v := range other.data { - newSet.data[k] = v - } - } - if set != other { - other.mu.RUnlock() - } - } - - return -} - -// Diff returns a new set which is the difference set from `set` to `other`. -// Which means, all the items in `newSet` are in `set` but not in `other`. -func (set *IntSet) Diff(others ...*IntSet) (newSet *IntSet) { - newSet = NewIntSet() - set.mu.RLock() - defer set.mu.RUnlock() - for _, other := range others { - if set == other { - continue - } - other.mu.RLock() - for k, v := range set.data { - if _, ok := other.data[k]; !ok { - newSet.data[k] = v - } - } - other.mu.RUnlock() - } - return -} - -// Intersect returns a new set which is the intersection from `set` to `other`. -// Which means, all the items in `newSet` are in `set` and also in `other`. -func (set *IntSet) Intersect(others ...*IntSet) (newSet *IntSet) { - newSet = NewIntSet() - set.mu.RLock() - defer set.mu.RUnlock() - for _, other := range others { - if set != other { - other.mu.RLock() - } - for k, v := range set.data { - if _, ok := other.data[k]; ok { - newSet.data[k] = v - } - } - if set != other { - other.mu.RUnlock() - } - } - return -} - -// Complement returns a new set which is the complement from `set` to `full`. -// Which means, all the items in `newSet` are in `full` and not in `set`. -// -// It returns the difference between `full` and `set` -// if the given set `full` is not the full set of `set`. -func (set *IntSet) Complement(full *IntSet) (newSet *IntSet) { - newSet = NewIntSet() - set.mu.RLock() - defer set.mu.RUnlock() - if set != full { - full.mu.RLock() - defer full.mu.RUnlock() - } - for k, v := range full.data { - if _, ok := set.data[k]; !ok { - newSet.data[k] = v - } - } - return -} - -// Merge adds items from `others` sets into `set`. -func (set *IntSet) Merge(others ...*IntSet) *IntSet { - set.mu.Lock() - defer set.mu.Unlock() - for _, other := range others { - if set != other { - other.mu.RLock() - } - for k, v := range other.data { - set.data[k] = v - } - if set != other { - other.mu.RUnlock() - } - } - return set -} - -// Sum sums items. -// Note: The items should be converted to int type, -// or you'd get a result that you unexpected. -func (set *IntSet) Sum() (sum int) { - set.mu.RLock() - defer set.mu.RUnlock() - for k := range set.data { - sum += k - } - return -} - -// Pop randomly pops an item from set. -func (set *IntSet) Pop() int { - set.mu.Lock() - defer set.mu.Unlock() - for k := range set.data { - delete(set.data, k) - return k - } - return 0 -} - -// Pops randomly pops `size` items from set. -// It returns all items if size == -1. -func (set *IntSet) Pops(size int) []int { - set.mu.Lock() - defer set.mu.Unlock() - if size > len(set.data) || size == -1 { - size = len(set.data) - } - if size <= 0 { - return nil - } - index := 0 - array := make([]int, size) - for k := range set.data { - delete(set.data, k) - array[index] = k - index++ - if index == size { - break - } - } - return array -} - -// Walk applies a user supplied function `f` to every item of set. -func (set *IntSet) Walk(f func(item int) int) *IntSet { - set.mu.Lock() - defer set.mu.Unlock() - m := make(map[int]struct{}, len(set.data)) - for k, v := range set.data { - m[f(k)] = v - } - set.data = m - return set -} - -// MarshalJSON implements the interface MarshalJSON for json.Marshal. -func (set IntSet) MarshalJSON() ([]byte, error) { - return json.Marshal(set.Slice()) -} - -// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. -func (set *IntSet) UnmarshalJSON(b []byte) error { - set.mu.Lock() - defer set.mu.Unlock() - if set.data == nil { - set.data = make(map[int]struct{}) - } - var array []int - if err := json.UnmarshalUseNumber(b, &array); err != nil { - return err - } - for _, v := range array { - set.data[v] = struct{}{} - } - return nil -} - -// UnmarshalValue is an interface implement which sets any type of value for set. -func (set *IntSet) UnmarshalValue(value interface{}) (err error) { - set.mu.Lock() - defer set.mu.Unlock() - if set.data == nil { - set.data = make(map[int]struct{}) - } - var array []int - switch value.(type) { - case string, []byte: - err = json.UnmarshalUseNumber(gconv.Bytes(value), &array) - default: - array = gconv.SliceInt(value) - } - for _, v := range array { - set.data[v] = struct{}{} - } - return -} - -// DeepCopy implements interface for deep copy of current type. -func (set *IntSet) DeepCopy() interface{} { - if set == nil { - return nil - } - set.mu.RLock() - defer set.mu.RUnlock() - var ( - slice = make([]int, len(set.data)) - index = 0 - ) - for k := range set.data { - slice[index] = k - index++ - } - return NewIntSetFrom(slice, set.mu.IsSafe()) -} diff --git a/vendor/github.com/gogf/gf/v2/container/gset/gset_str_set.go b/vendor/github.com/gogf/gf/v2/container/gset/gset_str_set.go deleted file mode 100644 index 386ba6b2..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gset/gset_str_set.go +++ /dev/null @@ -1,519 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. -// - -package gset - -import ( - "bytes" - "strings" - - "github.com/gogf/gf/v2/internal/json" - "github.com/gogf/gf/v2/internal/rwmutex" - "github.com/gogf/gf/v2/text/gstr" - "github.com/gogf/gf/v2/util/gconv" -) - -type StrSet struct { - mu rwmutex.RWMutex - data map[string]struct{} -} - -// NewStrSet create and returns a new set, which contains un-repeated items. -// The parameter `safe` is used to specify whether using set in concurrent-safety, -// which is false in default. -func NewStrSet(safe ...bool) *StrSet { - return &StrSet{ - mu: rwmutex.Create(safe...), - data: make(map[string]struct{}), - } -} - -// NewStrSetFrom returns a new set from `items`. -func NewStrSetFrom(items []string, safe ...bool) *StrSet { - m := make(map[string]struct{}) - for _, v := range items { - m[v] = struct{}{} - } - return &StrSet{ - mu: rwmutex.Create(safe...), - data: m, - } -} - -// Iterator iterates the set readonly with given callback function `f`, -// if `f` returns true then continue iterating; or false to stop. -func (set *StrSet) Iterator(f func(v string) bool) { - set.mu.RLock() - defer set.mu.RUnlock() - for k := range set.data { - if !f(k) { - break - } - } -} - -// Add adds one or multiple items to the set. -func (set *StrSet) Add(item ...string) { - set.mu.Lock() - if set.data == nil { - set.data = make(map[string]struct{}) - } - for _, v := range item { - set.data[v] = struct{}{} - } - set.mu.Unlock() -} - -// AddIfNotExist checks whether item exists in the set, -// it adds the item to set and returns true if it does not exist in the set, -// or else it does nothing and returns false. -func (set *StrSet) AddIfNotExist(item string) bool { - if !set.Contains(item) { - set.mu.Lock() - defer set.mu.Unlock() - if set.data == nil { - set.data = make(map[string]struct{}) - } - if _, ok := set.data[item]; !ok { - set.data[item] = struct{}{} - return true - } - } - return false -} - -// AddIfNotExistFunc checks whether item exists in the set, -// it adds the item to set and returns true if it does not exists in the set and -// function `f` returns true, or else it does nothing and returns false. -// -// Note that, the function `f` is executed without writing lock. -func (set *StrSet) AddIfNotExistFunc(item string, f func() bool) bool { - if !set.Contains(item) { - if f() { - set.mu.Lock() - defer set.mu.Unlock() - if set.data == nil { - set.data = make(map[string]struct{}) - } - if _, ok := set.data[item]; !ok { - set.data[item] = struct{}{} - return true - } - } - } - return false -} - -// AddIfNotExistFuncLock checks whether item exists in the set, -// it adds the item to set and returns true if it does not exists in the set and -// function `f` returns true, or else it does nothing and returns false. -// -// Note that, the function `f` is executed without writing lock. -func (set *StrSet) AddIfNotExistFuncLock(item string, f func() bool) bool { - if !set.Contains(item) { - set.mu.Lock() - defer set.mu.Unlock() - if set.data == nil { - set.data = make(map[string]struct{}) - } - if f() { - if _, ok := set.data[item]; !ok { - set.data[item] = struct{}{} - return true - } - } - } - return false -} - -// Contains checks whether the set contains `item`. -func (set *StrSet) Contains(item string) bool { - var ok bool - set.mu.RLock() - if set.data != nil { - _, ok = set.data[item] - } - set.mu.RUnlock() - return ok -} - -// ContainsI checks whether a value exists in the set with case-insensitively. -// Note that it internally iterates the whole set to do the comparison with case-insensitively. -func (set *StrSet) ContainsI(item string) bool { - set.mu.RLock() - defer set.mu.RUnlock() - for k := range set.data { - if strings.EqualFold(k, item) { - return true - } - } - return false -} - -// Remove deletes `item` from set. -func (set *StrSet) Remove(item string) { - set.mu.Lock() - if set.data != nil { - delete(set.data, item) - } - set.mu.Unlock() -} - -// Size returns the size of the set. -func (set *StrSet) Size() int { - set.mu.RLock() - l := len(set.data) - set.mu.RUnlock() - return l -} - -// Clear deletes all items of the set. -func (set *StrSet) Clear() { - set.mu.Lock() - set.data = make(map[string]struct{}) - set.mu.Unlock() -} - -// Slice returns the an of items of the set as slice. -func (set *StrSet) Slice() []string { - set.mu.RLock() - var ( - i = 0 - ret = make([]string, len(set.data)) - ) - for item := range set.data { - ret[i] = item - i++ - } - - set.mu.RUnlock() - return ret -} - -// Join joins items with a string `glue`. -func (set *StrSet) Join(glue string) string { - set.mu.RLock() - defer set.mu.RUnlock() - if len(set.data) == 0 { - return "" - } - var ( - l = len(set.data) - i = 0 - buffer = bytes.NewBuffer(nil) - ) - for k := range set.data { - buffer.WriteString(k) - if i != l-1 { - buffer.WriteString(glue) - } - i++ - } - return buffer.String() -} - -// String returns items as a string, which implements like json.Marshal does. -func (set *StrSet) String() string { - if set == nil { - return "" - } - set.mu.RLock() - defer set.mu.RUnlock() - var ( - l = len(set.data) - i = 0 - buffer = bytes.NewBuffer(nil) - ) - buffer.WriteByte('[') - for k := range set.data { - buffer.WriteString(`"` + gstr.QuoteMeta(k, `"\`) + `"`) - if i != l-1 { - buffer.WriteByte(',') - } - i++ - } - buffer.WriteByte(']') - return buffer.String() -} - -// LockFunc locks writing with callback function `f`. -func (set *StrSet) LockFunc(f func(m map[string]struct{})) { - set.mu.Lock() - defer set.mu.Unlock() - f(set.data) -} - -// RLockFunc locks reading with callback function `f`. -func (set *StrSet) RLockFunc(f func(m map[string]struct{})) { - set.mu.RLock() - defer set.mu.RUnlock() - f(set.data) -} - -// Equal checks whether the two sets equal. -func (set *StrSet) Equal(other *StrSet) bool { - if set == other { - return true - } - set.mu.RLock() - defer set.mu.RUnlock() - other.mu.RLock() - defer other.mu.RUnlock() - if len(set.data) != len(other.data) { - return false - } - for key := range set.data { - if _, ok := other.data[key]; !ok { - return false - } - } - return true -} - -// IsSubsetOf checks whether the current set is a sub-set of `other`. -func (set *StrSet) IsSubsetOf(other *StrSet) bool { - if set == other { - return true - } - set.mu.RLock() - defer set.mu.RUnlock() - other.mu.RLock() - defer other.mu.RUnlock() - for key := range set.data { - if _, ok := other.data[key]; !ok { - return false - } - } - return true -} - -// Union returns a new set which is the union of `set` and `other`. -// Which means, all the items in `newSet` are in `set` or in `other`. -func (set *StrSet) Union(others ...*StrSet) (newSet *StrSet) { - newSet = NewStrSet() - set.mu.RLock() - defer set.mu.RUnlock() - for _, other := range others { - if set != other { - other.mu.RLock() - } - for k, v := range set.data { - newSet.data[k] = v - } - if set != other { - for k, v := range other.data { - newSet.data[k] = v - } - } - if set != other { - other.mu.RUnlock() - } - } - - return -} - -// Diff returns a new set which is the difference set from `set` to `other`. -// Which means, all the items in `newSet` are in `set` but not in `other`. -func (set *StrSet) Diff(others ...*StrSet) (newSet *StrSet) { - newSet = NewStrSet() - set.mu.RLock() - defer set.mu.RUnlock() - for _, other := range others { - if set == other { - continue - } - other.mu.RLock() - for k, v := range set.data { - if _, ok := other.data[k]; !ok { - newSet.data[k] = v - } - } - other.mu.RUnlock() - } - return -} - -// Intersect returns a new set which is the intersection from `set` to `other`. -// Which means, all the items in `newSet` are in `set` and also in `other`. -func (set *StrSet) Intersect(others ...*StrSet) (newSet *StrSet) { - newSet = NewStrSet() - set.mu.RLock() - defer set.mu.RUnlock() - for _, other := range others { - if set != other { - other.mu.RLock() - } - for k, v := range set.data { - if _, ok := other.data[k]; ok { - newSet.data[k] = v - } - } - if set != other { - other.mu.RUnlock() - } - } - return -} - -// Complement returns a new set which is the complement from `set` to `full`. -// Which means, all the items in `newSet` are in `full` and not in `set`. -// -// It returns the difference between `full` and `set` -// if the given set `full` is not the full set of `set`. -func (set *StrSet) Complement(full *StrSet) (newSet *StrSet) { - newSet = NewStrSet() - set.mu.RLock() - defer set.mu.RUnlock() - if set != full { - full.mu.RLock() - defer full.mu.RUnlock() - } - for k, v := range full.data { - if _, ok := set.data[k]; !ok { - newSet.data[k] = v - } - } - return -} - -// Merge adds items from `others` sets into `set`. -func (set *StrSet) Merge(others ...*StrSet) *StrSet { - set.mu.Lock() - defer set.mu.Unlock() - for _, other := range others { - if set != other { - other.mu.RLock() - } - for k, v := range other.data { - set.data[k] = v - } - if set != other { - other.mu.RUnlock() - } - } - return set -} - -// Sum sums items. -// Note: The items should be converted to int type, -// or you'd get a result that you unexpected. -func (set *StrSet) Sum() (sum int) { - set.mu.RLock() - defer set.mu.RUnlock() - for k := range set.data { - sum += gconv.Int(k) - } - return -} - -// Pop randomly pops an item from set. -func (set *StrSet) Pop() string { - set.mu.Lock() - defer set.mu.Unlock() - for k := range set.data { - delete(set.data, k) - return k - } - return "" -} - -// Pops randomly pops `size` items from set. -// It returns all items if size == -1. -func (set *StrSet) Pops(size int) []string { - set.mu.Lock() - defer set.mu.Unlock() - if size > len(set.data) || size == -1 { - size = len(set.data) - } - if size <= 0 { - return nil - } - index := 0 - array := make([]string, size) - for k := range set.data { - delete(set.data, k) - array[index] = k - index++ - if index == size { - break - } - } - return array -} - -// Walk applies a user supplied function `f` to every item of set. -func (set *StrSet) Walk(f func(item string) string) *StrSet { - set.mu.Lock() - defer set.mu.Unlock() - m := make(map[string]struct{}, len(set.data)) - for k, v := range set.data { - m[f(k)] = v - } - set.data = m - return set -} - -// MarshalJSON implements the interface MarshalJSON for json.Marshal. -func (set StrSet) MarshalJSON() ([]byte, error) { - return json.Marshal(set.Slice()) -} - -// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. -func (set *StrSet) UnmarshalJSON(b []byte) error { - set.mu.Lock() - defer set.mu.Unlock() - if set.data == nil { - set.data = make(map[string]struct{}) - } - var array []string - if err := json.UnmarshalUseNumber(b, &array); err != nil { - return err - } - for _, v := range array { - set.data[v] = struct{}{} - } - return nil -} - -// UnmarshalValue is an interface implement which sets any type of value for set. -func (set *StrSet) UnmarshalValue(value interface{}) (err error) { - set.mu.Lock() - defer set.mu.Unlock() - if set.data == nil { - set.data = make(map[string]struct{}) - } - var array []string - switch value.(type) { - case string, []byte: - err = json.UnmarshalUseNumber(gconv.Bytes(value), &array) - default: - array = gconv.SliceStr(value) - } - for _, v := range array { - set.data[v] = struct{}{} - } - return -} - -// DeepCopy implements interface for deep copy of current type. -func (set *StrSet) DeepCopy() interface{} { - if set == nil { - return nil - } - set.mu.RLock() - defer set.mu.RUnlock() - var ( - slice = make([]string, len(set.data)) - index = 0 - ) - for k := range set.data { - slice[index] = k - index++ - } - return NewStrSetFrom(slice, set.mu.IsSafe()) -} diff --git a/vendor/github.com/gogf/gf/v2/container/gtree/gtree.go b/vendor/github.com/gogf/gf/v2/container/gtree/gtree.go deleted file mode 100644 index 2cb7b25e..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gtree/gtree.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package gtree provides concurrent-safe/unsafe tree containers. -// -// Some implements are from: https://github.com/emirpasic/gods -package gtree diff --git a/vendor/github.com/gogf/gf/v2/container/gtree/gtree_avltree.go b/vendor/github.com/gogf/gf/v2/container/gtree/gtree_avltree.go deleted file mode 100644 index 005be372..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gtree/gtree_avltree.go +++ /dev/null @@ -1,816 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gtree - -import ( - "bytes" - "fmt" - - "github.com/gogf/gf/v2/container/gvar" - "github.com/gogf/gf/v2/internal/json" - "github.com/gogf/gf/v2/internal/rwmutex" - "github.com/gogf/gf/v2/util/gconv" -) - -// AVLTree holds elements of the AVL tree. -type AVLTree struct { - mu rwmutex.RWMutex - root *AVLTreeNode - comparator func(v1, v2 interface{}) int - size int -} - -// AVLTreeNode is a single element within the tree. -type AVLTreeNode struct { - Key interface{} - Value interface{} - parent *AVLTreeNode - children [2]*AVLTreeNode - b int8 -} - -// NewAVLTree instantiates an AVL tree with the custom key comparator. -// The parameter `safe` is used to specify whether using tree in concurrent-safety, -// which is false in default. -func NewAVLTree(comparator func(v1, v2 interface{}) int, safe ...bool) *AVLTree { - return &AVLTree{ - mu: rwmutex.Create(safe...), - comparator: comparator, - } -} - -// NewAVLTreeFrom instantiates an AVL tree with the custom key comparator and data map. -// The parameter `safe` is used to specify whether using tree in concurrent-safety, -// which is false in default. -func NewAVLTreeFrom(comparator func(v1, v2 interface{}) int, data map[interface{}]interface{}, safe ...bool) *AVLTree { - tree := NewAVLTree(comparator, safe...) - for k, v := range data { - tree.put(k, v, nil, &tree.root) - } - return tree -} - -// Clone returns a new tree with a copy of current tree. -func (tree *AVLTree) Clone() *AVLTree { - newTree := NewAVLTree(tree.comparator, tree.mu.IsSafe()) - newTree.Sets(tree.Map()) - return newTree -} - -// Set inserts node into the tree. -func (tree *AVLTree) Set(key interface{}, value interface{}) { - tree.mu.Lock() - defer tree.mu.Unlock() - tree.put(key, value, nil, &tree.root) -} - -// Sets batch sets key-values to the tree. -func (tree *AVLTree) Sets(data map[interface{}]interface{}) { - tree.mu.Lock() - defer tree.mu.Unlock() - for key, value := range data { - tree.put(key, value, nil, &tree.root) - } -} - -// Search searches the tree with given `key`. -// Second return parameter `found` is true if key was found, otherwise false. -func (tree *AVLTree) Search(key interface{}) (value interface{}, found bool) { - tree.mu.RLock() - defer tree.mu.RUnlock() - if node, found := tree.doSearch(key); found { - return node.Value, true - } - return nil, false -} - -// doSearch searches the tree with given `key`. -// Second return parameter `found` is true if key was found, otherwise false. -func (tree *AVLTree) doSearch(key interface{}) (node *AVLTreeNode, found bool) { - node = tree.root - for node != nil { - cmp := tree.getComparator()(key, node.Key) - switch { - case cmp == 0: - return node, true - case cmp < 0: - node = node.children[0] - case cmp > 0: - node = node.children[1] - } - } - return nil, false -} - -// Get searches the node in the tree by `key` and returns its value or nil if key is not found in tree. -func (tree *AVLTree) Get(key interface{}) (value interface{}) { - value, _ = tree.Search(key) - return -} - -// doSetWithLockCheck checks whether value of the key exists with mutex.Lock, -// if not exists, set value to the map with given `key`, -// or else just return the existing value. -// -// When setting value, if `value` is type of , -// it will be executed with mutex.Lock of the hash map, -// and its return value will be set to the map with `key`. -// -// It returns value with given `key`. -func (tree *AVLTree) doSetWithLockCheck(key interface{}, value interface{}) interface{} { - tree.mu.Lock() - defer tree.mu.Unlock() - if node, found := tree.doSearch(key); found { - return node.Value - } - if f, ok := value.(func() interface{}); ok { - value = f() - } - if value != nil { - tree.put(key, value, nil, &tree.root) - } - return value -} - -// GetOrSet returns the value by key, -// or sets value with given `value` if it does not exist and then returns this value. -func (tree *AVLTree) GetOrSet(key interface{}, value interface{}) interface{} { - if v, ok := tree.Search(key); !ok { - return tree.doSetWithLockCheck(key, value) - } else { - return v - } -} - -// GetOrSetFunc returns the value by key, -// or sets value with returned value of callback function `f` if it does not exist -// and then returns this value. -func (tree *AVLTree) GetOrSetFunc(key interface{}, f func() interface{}) interface{} { - if v, ok := tree.Search(key); !ok { - return tree.doSetWithLockCheck(key, f()) - } else { - return v - } -} - -// GetOrSetFuncLock returns the value by key, -// or sets value with returned value of callback function `f` if it does not exist -// and then returns this value. -// -// GetOrSetFuncLock differs with GetOrSetFunc function is that it executes function `f` -// with mutex.Lock of the hash map. -func (tree *AVLTree) GetOrSetFuncLock(key interface{}, f func() interface{}) interface{} { - if v, ok := tree.Search(key); !ok { - return tree.doSetWithLockCheck(key, f) - } else { - return v - } -} - -// GetVar returns a gvar.Var with the value by given `key`. -// The returned gvar.Var is un-concurrent safe. -func (tree *AVLTree) GetVar(key interface{}) *gvar.Var { - return gvar.New(tree.Get(key)) -} - -// GetVarOrSet returns a gvar.Var with result from GetVarOrSet. -// The returned gvar.Var is un-concurrent safe. -func (tree *AVLTree) GetVarOrSet(key interface{}, value interface{}) *gvar.Var { - return gvar.New(tree.GetOrSet(key, value)) -} - -// GetVarOrSetFunc returns a gvar.Var with result from GetOrSetFunc. -// The returned gvar.Var is un-concurrent safe. -func (tree *AVLTree) GetVarOrSetFunc(key interface{}, f func() interface{}) *gvar.Var { - return gvar.New(tree.GetOrSetFunc(key, f)) -} - -// GetVarOrSetFuncLock returns a gvar.Var with result from GetOrSetFuncLock. -// The returned gvar.Var is un-concurrent safe. -func (tree *AVLTree) GetVarOrSetFuncLock(key interface{}, f func() interface{}) *gvar.Var { - return gvar.New(tree.GetOrSetFuncLock(key, f)) -} - -// SetIfNotExist sets `value` to the map if the `key` does not exist, and then returns true. -// It returns false if `key` exists, and `value` would be ignored. -func (tree *AVLTree) SetIfNotExist(key interface{}, value interface{}) bool { - if !tree.Contains(key) { - tree.doSetWithLockCheck(key, value) - return true - } - return false -} - -// SetIfNotExistFunc sets value with return value of callback function `f`, and then returns true. -// It returns false if `key` exists, and `value` would be ignored. -func (tree *AVLTree) SetIfNotExistFunc(key interface{}, f func() interface{}) bool { - if !tree.Contains(key) { - tree.doSetWithLockCheck(key, f()) - return true - } - return false -} - -// SetIfNotExistFuncLock sets value with return value of callback function `f`, and then returns true. -// It returns false if `key` exists, and `value` would be ignored. -// -// SetIfNotExistFuncLock differs with SetIfNotExistFunc function is that -// it executes function `f` with mutex.Lock of the hash map. -func (tree *AVLTree) SetIfNotExistFuncLock(key interface{}, f func() interface{}) bool { - if !tree.Contains(key) { - tree.doSetWithLockCheck(key, f) - return true - } - return false -} - -// Contains checks whether `key` exists in the tree. -func (tree *AVLTree) Contains(key interface{}) bool { - _, ok := tree.Search(key) - return ok -} - -// Remove removes the node from the tree by key. -// Key should adhere to the comparator's type assertion, otherwise method panics. -func (tree *AVLTree) Remove(key interface{}) (value interface{}) { - tree.mu.Lock() - defer tree.mu.Unlock() - value, _ = tree.remove(key, &tree.root) - return -} - -// Removes batch deletes values of the tree by `keys`. -func (tree *AVLTree) Removes(keys []interface{}) { - tree.mu.Lock() - defer tree.mu.Unlock() - for _, key := range keys { - tree.remove(key, &tree.root) - } -} - -// IsEmpty returns true if tree does not contain any nodes. -func (tree *AVLTree) IsEmpty() bool { - return tree.Size() == 0 -} - -// Size returns number of nodes in the tree. -func (tree *AVLTree) Size() int { - tree.mu.RLock() - defer tree.mu.RUnlock() - return tree.size -} - -// Keys returns all keys in asc order. -func (tree *AVLTree) Keys() []interface{} { - keys := make([]interface{}, tree.Size()) - index := 0 - tree.IteratorAsc(func(key, value interface{}) bool { - keys[index] = key - index++ - return true - }) - return keys -} - -// Values returns all values in asc order based on the key. -func (tree *AVLTree) Values() []interface{} { - values := make([]interface{}, tree.Size()) - index := 0 - tree.IteratorAsc(func(key, value interface{}) bool { - values[index] = value - index++ - return true - }) - return values -} - -// Left returns the minimum element of the AVL tree -// or nil if the tree is empty. -func (tree *AVLTree) Left() *AVLTreeNode { - tree.mu.RLock() - defer tree.mu.RUnlock() - node := tree.bottom(0) - if tree.mu.IsSafe() { - return &AVLTreeNode{ - Key: node.Key, - Value: node.Value, - } - } - return node -} - -// Right returns the maximum element of the AVL tree -// or nil if the tree is empty. -func (tree *AVLTree) Right() *AVLTreeNode { - tree.mu.RLock() - defer tree.mu.RUnlock() - node := tree.bottom(1) - if tree.mu.IsSafe() { - return &AVLTreeNode{ - Key: node.Key, - Value: node.Value, - } - } - return node -} - -// Floor Finds floor node of the input key, return the floor node or nil if no floor node is found. -// Second return parameter is true if floor was found, otherwise false. -// -// Floor node is defined as the largest node that is smaller than or equal to the given node. -// A floor node may not be found, either because the tree is empty, or because -// all nodes in the tree is larger than the given node. -// -// Key should adhere to the comparator's type assertion, otherwise method panics. -func (tree *AVLTree) Floor(key interface{}) (floor *AVLTreeNode, found bool) { - tree.mu.RLock() - defer tree.mu.RUnlock() - n := tree.root - for n != nil { - c := tree.getComparator()(key, n.Key) - switch { - case c == 0: - return n, true - case c < 0: - n = n.children[0] - case c > 0: - floor, found = n, true - n = n.children[1] - } - } - if found { - return - } - return nil, false -} - -// Ceiling finds ceiling node of the input key, return the ceiling node or nil if no ceiling node is found. -// Second return parameter is true if ceiling was found, otherwise false. -// -// Ceiling node is defined as the smallest node that is larger than or equal to the given node. -// A ceiling node may not be found, either because the tree is empty, or because -// all nodes in the tree is smaller than the given node. -// -// Key should adhere to the comparator's type assertion, otherwise method panics. -func (tree *AVLTree) Ceiling(key interface{}) (ceiling *AVLTreeNode, found bool) { - tree.mu.RLock() - defer tree.mu.RUnlock() - n := tree.root - for n != nil { - c := tree.getComparator()(key, n.Key) - switch { - case c == 0: - return n, true - case c > 0: - n = n.children[1] - case c < 0: - ceiling, found = n, true - n = n.children[0] - } - } - if found { - return - } - return nil, false -} - -// Clear removes all nodes from the tree. -func (tree *AVLTree) Clear() { - tree.mu.Lock() - defer tree.mu.Unlock() - tree.root = nil - tree.size = 0 -} - -// Replace the data of the tree with given `data`. -func (tree *AVLTree) Replace(data map[interface{}]interface{}) { - tree.mu.Lock() - defer tree.mu.Unlock() - tree.root = nil - tree.size = 0 - for key, value := range data { - tree.put(key, value, nil, &tree.root) - } -} - -// String returns a string representation of container -func (tree *AVLTree) String() string { - if tree == nil { - return "" - } - tree.mu.RLock() - defer tree.mu.RUnlock() - str := "" - if tree.size != 0 { - output(tree.root, "", true, &str) - } - return str -} - -// Print prints the tree to stdout. -func (tree *AVLTree) Print() { - fmt.Println(tree.String()) -} - -// Map returns all key-value items as map. -func (tree *AVLTree) Map() map[interface{}]interface{} { - m := make(map[interface{}]interface{}, tree.Size()) - tree.IteratorAsc(func(key, value interface{}) bool { - m[key] = value - return true - }) - return m -} - -// MapStrAny returns all key-value items as map[string]interface{}. -func (tree *AVLTree) MapStrAny() map[string]interface{} { - m := make(map[string]interface{}, tree.Size()) - tree.IteratorAsc(func(key, value interface{}) bool { - m[gconv.String(key)] = value - return true - }) - return m -} - -// Flip exchanges key-value of the tree to value-key. -// Note that you should guarantee the value is the same type as key, -// or else the comparator would panic. -// -// If the type of value is different with key, you pass the new `comparator`. -func (tree *AVLTree) Flip(comparator ...func(v1, v2 interface{}) int) { - t := (*AVLTree)(nil) - if len(comparator) > 0 { - t = NewAVLTree(comparator[0], tree.mu.IsSafe()) - } else { - t = NewAVLTree(tree.comparator, tree.mu.IsSafe()) - } - tree.IteratorAsc(func(key, value interface{}) bool { - t.put(value, key, nil, &t.root) - return true - }) - tree.mu.Lock() - tree.root = t.root - tree.size = t.size - tree.mu.Unlock() -} - -// Iterator is alias of IteratorAsc. -func (tree *AVLTree) Iterator(f func(key, value interface{}) bool) { - tree.IteratorAsc(f) -} - -// IteratorFrom is alias of IteratorAscFrom. -func (tree *AVLTree) IteratorFrom(key interface{}, match bool, f func(key, value interface{}) bool) { - tree.IteratorAscFrom(key, match, f) -} - -// IteratorAsc iterates the tree readonly in ascending order with given callback function `f`. -// If `f` returns true, then it continues iterating; or false to stop. -func (tree *AVLTree) IteratorAsc(f func(key, value interface{}) bool) { - tree.mu.RLock() - defer tree.mu.RUnlock() - tree.doIteratorAsc(tree.bottom(0), f) -} - -// IteratorAscFrom iterates the tree readonly in ascending order with given callback function `f`. -// The parameter `key` specifies the start entry for iterating. The `match` specifies whether -// starting iterating if the `key` is fully matched, or else using index searching iterating. -// If `f` returns true, then it continues iterating; or false to stop. -func (tree *AVLTree) IteratorAscFrom(key interface{}, match bool, f func(key, value interface{}) bool) { - tree.mu.RLock() - defer tree.mu.RUnlock() - node, found := tree.doSearch(key) - if match { - if found { - tree.doIteratorAsc(node, f) - } - } else { - tree.doIteratorAsc(node, f) - } -} - -func (tree *AVLTree) doIteratorAsc(node *AVLTreeNode, f func(key, value interface{}) bool) { - for node != nil { - if !f(node.Key, node.Value) { - return - } - node = node.Next() - } -} - -// IteratorDesc iterates the tree readonly in descending order with given callback function `f`. -// If `f` returns true, then it continues iterating; or false to stop. -func (tree *AVLTree) IteratorDesc(f func(key, value interface{}) bool) { - tree.mu.RLock() - defer tree.mu.RUnlock() - tree.doIteratorDesc(tree.bottom(1), f) -} - -// IteratorDescFrom iterates the tree readonly in descending order with given callback function `f`. -// The parameter `key` specifies the start entry for iterating. The `match` specifies whether -// starting iterating if the `key` is fully matched, or else using index searching iterating. -// If `f` returns true, then it continues iterating; or false to stop. -func (tree *AVLTree) IteratorDescFrom(key interface{}, match bool, f func(key, value interface{}) bool) { - tree.mu.RLock() - defer tree.mu.RUnlock() - node, found := tree.doSearch(key) - if match { - if found { - tree.doIteratorDesc(node, f) - } - } else { - tree.doIteratorDesc(node, f) - } -} - -func (tree *AVLTree) doIteratorDesc(node *AVLTreeNode, f func(key, value interface{}) bool) { - for node != nil { - if !f(node.Key, node.Value) { - return - } - node = node.Prev() - } -} - -func (tree *AVLTree) put(key interface{}, value interface{}, p *AVLTreeNode, qp **AVLTreeNode) bool { - q := *qp - if q == nil { - tree.size++ - *qp = &AVLTreeNode{Key: key, Value: value, parent: p} - return true - } - - c := tree.getComparator()(key, q.Key) - if c == 0 { - q.Key = key - q.Value = value - return false - } - - if c < 0 { - c = -1 - } else { - c = 1 - } - a := (c + 1) / 2 - if tree.put(key, value, q, &q.children[a]) { - return putFix(int8(c), qp) - } - return false -} - -func (tree *AVLTree) remove(key interface{}, qp **AVLTreeNode) (value interface{}, fix bool) { - q := *qp - if q == nil { - return nil, false - } - - c := tree.getComparator()(key, q.Key) - if c == 0 { - tree.size-- - value = q.Value - fix = true - if q.children[1] == nil { - if q.children[0] != nil { - q.children[0].parent = q.parent - } - *qp = q.children[0] - return - } - if removeMin(&q.children[1], &q.Key, &q.Value) { - return value, removeFix(-1, qp) - } - return - } - - if c < 0 { - c = -1 - } else { - c = 1 - } - a := (c + 1) / 2 - value, fix = tree.remove(key, &q.children[a]) - if fix { - return value, removeFix(int8(-c), qp) - } - return value, false -} - -func removeMin(qp **AVLTreeNode, minKey *interface{}, minVal *interface{}) bool { - q := *qp - if q.children[0] == nil { - *minKey = q.Key - *minVal = q.Value - if q.children[1] != nil { - q.children[1].parent = q.parent - } - *qp = q.children[1] - return true - } - fix := removeMin(&q.children[0], minKey, minVal) - if fix { - return removeFix(1, qp) - } - return false -} - -func putFix(c int8, t **AVLTreeNode) bool { - s := *t - if s.b == 0 { - s.b = c - return true - } - - if s.b == -c { - s.b = 0 - return false - } - - if s.children[(c+1)/2].b == c { - s = singleRotate(c, s) - } else { - s = doubleRotate(c, s) - } - *t = s - return false -} - -func removeFix(c int8, t **AVLTreeNode) bool { - s := *t - if s.b == 0 { - s.b = c - return false - } - - if s.b == -c { - s.b = 0 - return true - } - - a := (c + 1) / 2 - if s.children[a].b == 0 { - s = rotate(c, s) - s.b = -c - *t = s - return false - } - - if s.children[a].b == c { - s = singleRotate(c, s) - } else { - s = doubleRotate(c, s) - } - *t = s - return true -} - -func singleRotate(c int8, s *AVLTreeNode) *AVLTreeNode { - s.b = 0 - s = rotate(c, s) - s.b = 0 - return s -} - -func doubleRotate(c int8, s *AVLTreeNode) *AVLTreeNode { - a := (c + 1) / 2 - r := s.children[a] - s.children[a] = rotate(-c, s.children[a]) - p := rotate(c, s) - - switch { - default: - s.b = 0 - r.b = 0 - case p.b == c: - s.b = -c - r.b = 0 - case p.b == -c: - s.b = 0 - r.b = c - } - - p.b = 0 - return p -} - -func rotate(c int8, s *AVLTreeNode) *AVLTreeNode { - a := (c + 1) / 2 - r := s.children[a] - s.children[a] = r.children[a^1] - if s.children[a] != nil { - s.children[a].parent = s - } - r.children[a^1] = s - r.parent = s.parent - s.parent = r - return r -} - -func (tree *AVLTree) bottom(d int) *AVLTreeNode { - n := tree.root - if n == nil { - return nil - } - - for c := n.children[d]; c != nil; c = n.children[d] { - n = c - } - return n -} - -// Prev returns the previous element in an inorder -// walk of the AVL tree. -func (node *AVLTreeNode) Prev() *AVLTreeNode { - return node.walk1(0) -} - -// Next returns the next element in an inorder -// walk of the AVL tree. -func (node *AVLTreeNode) Next() *AVLTreeNode { - return node.walk1(1) -} - -func (node *AVLTreeNode) walk1(a int) *AVLTreeNode { - if node == nil { - return nil - } - n := node - if n.children[a] != nil { - n = n.children[a] - for n.children[a^1] != nil { - n = n.children[a^1] - } - return n - } - - p := n.parent - for p != nil && p.children[a] == n { - n = p - p = p.parent - } - return p -} - -func output(node *AVLTreeNode, prefix string, isTail bool, str *string) { - if node.children[1] != nil { - newPrefix := prefix - if isTail { - newPrefix += "│ " - } else { - newPrefix += " " - } - output(node.children[1], newPrefix, false, str) - } - *str += prefix - if isTail { - *str += "└── " - } else { - *str += "┌── " - } - *str += fmt.Sprintf("%v\n", node.Key) - if node.children[0] != nil { - newPrefix := prefix - if isTail { - newPrefix += " " - } else { - newPrefix += "│ " - } - output(node.children[0], newPrefix, true, str) - } -} - -// MarshalJSON implements the interface MarshalJSON for json.Marshal. -func (tree AVLTree) MarshalJSON() (jsonBytes []byte, err error) { - if tree.root == nil { - return []byte("null"), nil - } - buffer := bytes.NewBuffer(nil) - buffer.WriteByte('{') - tree.Iterator(func(key, value interface{}) bool { - valueBytes, valueJsonErr := json.Marshal(value) - if valueJsonErr != nil { - err = valueJsonErr - return false - } - if buffer.Len() > 1 { - buffer.WriteByte(',') - } - buffer.WriteString(fmt.Sprintf(`"%v":%s`, key, valueBytes)) - return true - }) - buffer.WriteByte('}') - return buffer.Bytes(), nil -} - -// getComparator returns the comparator if it's previously set, -// or else it panics. -func (tree *AVLTree) getComparator() func(a, b interface{}) int { - if tree.comparator == nil { - panic("comparator is missing for tree") - } - return tree.comparator -} diff --git a/vendor/github.com/gogf/gf/v2/container/gtree/gtree_btree.go b/vendor/github.com/gogf/gf/v2/container/gtree/gtree_btree.go deleted file mode 100644 index fd6c06ce..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gtree/gtree_btree.go +++ /dev/null @@ -1,979 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gtree - -import ( - "bytes" - "context" - "fmt" - "strings" - - "github.com/gogf/gf/v2/container/gvar" - "github.com/gogf/gf/v2/internal/intlog" - "github.com/gogf/gf/v2/internal/json" - "github.com/gogf/gf/v2/internal/rwmutex" - "github.com/gogf/gf/v2/util/gconv" -) - -// BTree holds elements of the B-tree. -type BTree struct { - mu rwmutex.RWMutex - root *BTreeNode - comparator func(v1, v2 interface{}) int - size int // Total number of keys in the tree - m int // order (maximum number of children) -} - -// BTreeNode is a single element within the tree. -type BTreeNode struct { - Parent *BTreeNode - Entries []*BTreeEntry // Contained keys in node - Children []*BTreeNode // Children nodes -} - -// BTreeEntry represents the key-value pair contained within nodes. -type BTreeEntry struct { - Key interface{} - Value interface{} -} - -// NewBTree instantiates a B-tree with `m` (maximum number of children) and a custom key comparator. -// The parameter `safe` is used to specify whether using tree in concurrent-safety, -// which is false in default. -// Note that the `m` must be greater or equal than 3, or else it panics. -func NewBTree(m int, comparator func(v1, v2 interface{}) int, safe ...bool) *BTree { - if m < 3 { - panic("Invalid order, should be at least 3") - } - return &BTree{ - comparator: comparator, - mu: rwmutex.Create(safe...), - m: m, - } -} - -// NewBTreeFrom instantiates a B-tree with `m` (maximum number of children), a custom key comparator and data map. -// The parameter `safe` is used to specify whether using tree in concurrent-safety, -// which is false in default. -func NewBTreeFrom(m int, comparator func(v1, v2 interface{}) int, data map[interface{}]interface{}, safe ...bool) *BTree { - tree := NewBTree(m, comparator, safe...) - for k, v := range data { - tree.doSet(k, v) - } - return tree -} - -// Clone returns a new tree with a copy of current tree. -func (tree *BTree) Clone() *BTree { - newTree := NewBTree(tree.m, tree.comparator, tree.mu.IsSafe()) - newTree.Sets(tree.Map()) - return newTree -} - -// Set inserts key-value item into the tree. -func (tree *BTree) Set(key interface{}, value interface{}) { - tree.mu.Lock() - defer tree.mu.Unlock() - tree.doSet(key, value) -} - -// doSet inserts key-value pair node into the tree. -// If key already exists, then its value is updated with the new value. -func (tree *BTree) doSet(key interface{}, value interface{}) { - entry := &BTreeEntry{Key: key, Value: value} - if tree.root == nil { - tree.root = &BTreeNode{Entries: []*BTreeEntry{entry}, Children: []*BTreeNode{}} - tree.size++ - return - } - - if tree.insert(tree.root, entry) { - tree.size++ - } -} - -// Sets batch sets key-values to the tree. -func (tree *BTree) Sets(data map[interface{}]interface{}) { - tree.mu.Lock() - defer tree.mu.Unlock() - for k, v := range data { - tree.doSet(k, v) - } -} - -// Get searches the node in the tree by `key` and returns its value or nil if key is not found in tree. -func (tree *BTree) Get(key interface{}) (value interface{}) { - value, _ = tree.Search(key) - return -} - -// doSetWithLockCheck checks whether value of the key exists with mutex.Lock, -// if not exists, set value to the map with given `key`, -// or else just return the existing value. -// -// When setting value, if `value` is type of , -// it will be executed with mutex.Lock of the hash map, -// and its return value will be set to the map with `key`. -// -// It returns value with given `key`. -func (tree *BTree) doSetWithLockCheck(key interface{}, value interface{}) interface{} { - tree.mu.Lock() - defer tree.mu.Unlock() - if entry := tree.doSearch(key); entry != nil { - return entry.Value - } - if f, ok := value.(func() interface{}); ok { - value = f() - } - if value != nil { - tree.doSet(key, value) - } - return value -} - -// GetOrSet returns the value by key, -// or sets value with given `value` if it does not exist and then returns this value. -func (tree *BTree) GetOrSet(key interface{}, value interface{}) interface{} { - if v, ok := tree.Search(key); !ok { - return tree.doSetWithLockCheck(key, value) - } else { - return v - } -} - -// GetOrSetFunc returns the value by key, -// or sets value with returned value of callback function `f` if it does not exist -// and then returns this value. -func (tree *BTree) GetOrSetFunc(key interface{}, f func() interface{}) interface{} { - if v, ok := tree.Search(key); !ok { - return tree.doSetWithLockCheck(key, f()) - } else { - return v - } -} - -// GetOrSetFuncLock returns the value by key, -// or sets value with returned value of callback function `f` if it does not exist -// and then returns this value. -// -// GetOrSetFuncLock differs with GetOrSetFunc function is that it executes function `f` -// with mutex.Lock of the hash map. -func (tree *BTree) GetOrSetFuncLock(key interface{}, f func() interface{}) interface{} { - if v, ok := tree.Search(key); !ok { - return tree.doSetWithLockCheck(key, f) - } else { - return v - } -} - -// GetVar returns a gvar.Var with the value by given `key`. -// The returned gvar.Var is un-concurrent safe. -func (tree *BTree) GetVar(key interface{}) *gvar.Var { - return gvar.New(tree.Get(key)) -} - -// GetVarOrSet returns a gvar.Var with result from GetVarOrSet. -// The returned gvar.Var is un-concurrent safe. -func (tree *BTree) GetVarOrSet(key interface{}, value interface{}) *gvar.Var { - return gvar.New(tree.GetOrSet(key, value)) -} - -// GetVarOrSetFunc returns a gvar.Var with result from GetOrSetFunc. -// The returned gvar.Var is un-concurrent safe. -func (tree *BTree) GetVarOrSetFunc(key interface{}, f func() interface{}) *gvar.Var { - return gvar.New(tree.GetOrSetFunc(key, f)) -} - -// GetVarOrSetFuncLock returns a gvar.Var with result from GetOrSetFuncLock. -// The returned gvar.Var is un-concurrent safe. -func (tree *BTree) GetVarOrSetFuncLock(key interface{}, f func() interface{}) *gvar.Var { - return gvar.New(tree.GetOrSetFuncLock(key, f)) -} - -// SetIfNotExist sets `value` to the map if the `key` does not exist, and then returns true. -// It returns false if `key` exists, and `value` would be ignored. -func (tree *BTree) SetIfNotExist(key interface{}, value interface{}) bool { - if !tree.Contains(key) { - tree.doSetWithLockCheck(key, value) - return true - } - return false -} - -// SetIfNotExistFunc sets value with return value of callback function `f`, and then returns true. -// It returns false if `key` exists, and `value` would be ignored. -func (tree *BTree) SetIfNotExistFunc(key interface{}, f func() interface{}) bool { - if !tree.Contains(key) { - tree.doSetWithLockCheck(key, f()) - return true - } - return false -} - -// SetIfNotExistFuncLock sets value with return value of callback function `f`, and then returns true. -// It returns false if `key` exists, and `value` would be ignored. -// -// SetIfNotExistFuncLock differs with SetIfNotExistFunc function is that -// it executes function `f` with mutex.Lock of the hash map. -func (tree *BTree) SetIfNotExistFuncLock(key interface{}, f func() interface{}) bool { - if !tree.Contains(key) { - tree.doSetWithLockCheck(key, f) - return true - } - return false -} - -// Contains checks whether `key` exists in the tree. -func (tree *BTree) Contains(key interface{}) bool { - _, ok := tree.Search(key) - return ok -} - -// doRemove removes the node from the tree by key. -// Key should adhere to the comparator's type assertion, otherwise method panics. -func (tree *BTree) doRemove(key interface{}) (value interface{}) { - node, index, found := tree.searchRecursively(tree.root, key) - if found { - value = node.Entries[index].Value - tree.delete(node, index) - tree.size-- - } - return -} - -// Remove removes the node from the tree by `key`. -func (tree *BTree) Remove(key interface{}) (value interface{}) { - tree.mu.Lock() - defer tree.mu.Unlock() - return tree.doRemove(key) -} - -// Removes batch deletes values of the tree by `keys`. -func (tree *BTree) Removes(keys []interface{}) { - tree.mu.Lock() - defer tree.mu.Unlock() - for _, key := range keys { - tree.doRemove(key) - } -} - -// IsEmpty returns true if tree does not contain any nodes -func (tree *BTree) IsEmpty() bool { - return tree.Size() == 0 -} - -// Size returns number of nodes in the tree. -func (tree *BTree) Size() int { - tree.mu.RLock() - defer tree.mu.RUnlock() - return tree.size -} - -// Keys returns all keys in asc order. -func (tree *BTree) Keys() []interface{} { - keys := make([]interface{}, tree.Size()) - index := 0 - tree.IteratorAsc(func(key, value interface{}) bool { - keys[index] = key - index++ - return true - }) - return keys -} - -// Values returns all values in asc order based on the key. -func (tree *BTree) Values() []interface{} { - values := make([]interface{}, tree.Size()) - index := 0 - tree.IteratorAsc(func(key, value interface{}) bool { - values[index] = value - index++ - return true - }) - return values -} - -// Map returns all key-value items as map. -func (tree *BTree) Map() map[interface{}]interface{} { - m := make(map[interface{}]interface{}, tree.Size()) - tree.IteratorAsc(func(key, value interface{}) bool { - m[key] = value - return true - }) - return m -} - -// MapStrAny returns all key-value items as map[string]interface{}. -func (tree *BTree) MapStrAny() map[string]interface{} { - m := make(map[string]interface{}, tree.Size()) - tree.IteratorAsc(func(key, value interface{}) bool { - m[gconv.String(key)] = value - return true - }) - return m -} - -// Clear removes all nodes from the tree. -func (tree *BTree) Clear() { - tree.mu.Lock() - defer tree.mu.Unlock() - tree.root = nil - tree.size = 0 -} - -// Replace the data of the tree with given `data`. -func (tree *BTree) Replace(data map[interface{}]interface{}) { - tree.mu.Lock() - defer tree.mu.Unlock() - tree.root = nil - tree.size = 0 - for k, v := range data { - tree.doSet(k, v) - } -} - -// Height returns the height of the tree. -func (tree *BTree) Height() int { - tree.mu.RLock() - defer tree.mu.RUnlock() - return tree.root.height() -} - -// Left returns the left-most (min) entry or nil if tree is empty. -func (tree *BTree) Left() *BTreeEntry { - tree.mu.RLock() - defer tree.mu.RUnlock() - node := tree.left(tree.root) - if node != nil { - return node.Entries[0] - } - return nil -} - -// Right returns the right-most (max) entry or nil if tree is empty. -func (tree *BTree) Right() *BTreeEntry { - tree.mu.RLock() - defer tree.mu.RUnlock() - node := tree.right(tree.root) - if node != nil { - return node.Entries[len(node.Entries)-1] - } - return nil -} - -// String returns a string representation of container (for debugging purposes) -func (tree *BTree) String() string { - if tree == nil { - return "" - } - tree.mu.RLock() - defer tree.mu.RUnlock() - var buffer bytes.Buffer - if tree.size != 0 { - tree.output(&buffer, tree.root, 0, true) - } - return buffer.String() -} - -// Search searches the tree with given `key`. -// Second return parameter `found` is true if key was found, otherwise false. -func (tree *BTree) Search(key interface{}) (value interface{}, found bool) { - tree.mu.RLock() - defer tree.mu.RUnlock() - node, index, found := tree.searchRecursively(tree.root, key) - if found { - return node.Entries[index].Value, true - } - return nil, false -} - -// Search searches the tree with given `key` without mutex. -// It returns the entry if found or otherwise nil. -func (tree *BTree) doSearch(key interface{}) *BTreeEntry { - node, index, found := tree.searchRecursively(tree.root, key) - if found { - return node.Entries[index] - } - return nil -} - -// Print prints the tree to stdout. -func (tree *BTree) Print() { - fmt.Println(tree.String()) -} - -// Iterator is alias of IteratorAsc. -func (tree *BTree) Iterator(f func(key, value interface{}) bool) { - tree.IteratorAsc(f) -} - -// IteratorFrom is alias of IteratorAscFrom. -func (tree *BTree) IteratorFrom(key interface{}, match bool, f func(key, value interface{}) bool) { - tree.IteratorAscFrom(key, match, f) -} - -// IteratorAsc iterates the tree readonly in ascending order with given callback function `f`. -// If `f` returns true, then it continues iterating; or false to stop. -func (tree *BTree) IteratorAsc(f func(key, value interface{}) bool) { - tree.mu.RLock() - defer tree.mu.RUnlock() - node := tree.left(tree.root) - if node == nil { - return - } - tree.doIteratorAsc(node, node.Entries[0], 0, f) -} - -// IteratorAscFrom iterates the tree readonly in ascending order with given callback function `f`. -// The parameter `key` specifies the start entry for iterating. The `match` specifies whether -// starting iterating if the `key` is fully matched, or else using index searching iterating. -// If `f` returns true, then it continues iterating; or false to stop. -func (tree *BTree) IteratorAscFrom(key interface{}, match bool, f func(key, value interface{}) bool) { - tree.mu.RLock() - defer tree.mu.RUnlock() - node, index, found := tree.searchRecursively(tree.root, key) - if match { - if found { - tree.doIteratorAsc(node, node.Entries[index], index, f) - } - } else { - if index >= 0 && index < len(node.Entries) { - tree.doIteratorAsc(node, node.Entries[index], index, f) - } - } -} - -func (tree *BTree) doIteratorAsc(node *BTreeNode, entry *BTreeEntry, index int, f func(key, value interface{}) bool) { - first := true -loop: - if entry == nil { - return - } - if !f(entry.Key, entry.Value) { - return - } - // Find current entry position in current node - if !first { - index, _ = tree.search(node, entry.Key) - } else { - first = false - } - // Try to go down to the child right of the current entry - if index+1 < len(node.Children) { - node = node.Children[index+1] - // Try to go down to the child left of the current node - for len(node.Children) > 0 { - node = node.Children[0] - } - // Return the left-most entry - entry = node.Entries[0] - goto loop - } - // Above assures that we have reached a leaf node, so return the next entry in current node (if any) - if index+1 < len(node.Entries) { - entry = node.Entries[index+1] - goto loop - } - // Reached leaf node and there are no entries to the right of the current entry, so go up to the parent - for node.Parent != nil { - node = node.Parent - // Find next entry position in current node (note: search returns the first equal or bigger than entry) - index, _ = tree.search(node, entry.Key) - // Check that there is a next entry position in current node - if index < len(node.Entries) { - entry = node.Entries[index] - goto loop - } - } -} - -// IteratorDesc iterates the tree readonly in descending order with given callback function `f`. -// If `f` returns true, then it continues iterating; or false to stop. -func (tree *BTree) IteratorDesc(f func(key, value interface{}) bool) { - tree.mu.RLock() - defer tree.mu.RUnlock() - node := tree.right(tree.root) - if node == nil { - return - } - index := len(node.Entries) - 1 - entry := node.Entries[index] - tree.doIteratorDesc(node, entry, index, f) -} - -// IteratorDescFrom iterates the tree readonly in descending order with given callback function `f`. -// The parameter `key` specifies the start entry for iterating. The `match` specifies whether -// starting iterating if the `key` is fully matched, or else using index searching iterating. -// If `f` returns true, then it continues iterating; or false to stop. -func (tree *BTree) IteratorDescFrom(key interface{}, match bool, f func(key, value interface{}) bool) { - tree.mu.RLock() - defer tree.mu.RUnlock() - node, index, found := tree.searchRecursively(tree.root, key) - if match { - if found { - tree.doIteratorDesc(node, node.Entries[index], index, f) - } - } else { - if index >= 0 && index < len(node.Entries) { - tree.doIteratorDesc(node, node.Entries[index], index, f) - } - } -} - -// IteratorDesc iterates the tree readonly in descending order with given callback function `f`. -// If `f` returns true, then it continues iterating; or false to stop. -func (tree *BTree) doIteratorDesc(node *BTreeNode, entry *BTreeEntry, index int, f func(key, value interface{}) bool) { - first := true -loop: - if entry == nil { - return - } - if !f(entry.Key, entry.Value) { - return - } - // Find current entry position in current node - if !first { - index, _ = tree.search(node, entry.Key) - } else { - first = false - } - // Try to go down to the child left of the current entry - if index < len(node.Children) { - node = node.Children[index] - // Try to go down to the child right of the current node - for len(node.Children) > 0 { - node = node.Children[len(node.Children)-1] - } - // Return the right-most entry - entry = node.Entries[len(node.Entries)-1] - goto loop - } - // Above assures that we have reached a leaf node, so return the previous entry in current node (if any) - if index-1 >= 0 { - entry = node.Entries[index-1] - goto loop - } - - // Reached leaf node and there are no entries to the left of the current entry, so go up to the parent - for node.Parent != nil { - node = node.Parent - // Find previous entry position in current node (note: search returns the first equal or bigger than entry) - index, _ = tree.search(node, entry.Key) - // Check that there is a previous entry position in current node - if index-1 >= 0 { - entry = node.Entries[index-1] - goto loop - } - } -} - -func (tree *BTree) output(buffer *bytes.Buffer, node *BTreeNode, level int, isTail bool) { - for e := 0; e < len(node.Entries)+1; e++ { - if e < len(node.Children) { - tree.output(buffer, node.Children[e], level+1, true) - } - if e < len(node.Entries) { - if _, err := buffer.WriteString(strings.Repeat(" ", level)); err != nil { - intlog.Errorf(context.TODO(), `%+v`, err) - } - if _, err := buffer.WriteString(fmt.Sprintf("%v", node.Entries[e].Key) + "\n"); err != nil { - intlog.Errorf(context.TODO(), `%+v`, err) - } - } - } -} - -func (node *BTreeNode) height() int { - h := 0 - n := node - for ; n != nil; n = n.Children[0] { - h++ - if len(n.Children) == 0 { - break - } - } - return h -} - -func (tree *BTree) isLeaf(node *BTreeNode) bool { - return len(node.Children) == 0 -} - -// func (tree *BTree) isFull(node *BTreeNode) bool { -// return len(node.Entries) == tree.maxEntries() -// } - -func (tree *BTree) shouldSplit(node *BTreeNode) bool { - return len(node.Entries) > tree.maxEntries() -} - -func (tree *BTree) maxChildren() int { - return tree.m -} - -func (tree *BTree) minChildren() int { - return (tree.m + 1) / 2 // ceil(m/2) -} - -func (tree *BTree) maxEntries() int { - return tree.maxChildren() - 1 -} - -func (tree *BTree) minEntries() int { - return tree.minChildren() - 1 -} - -func (tree *BTree) middle() int { - // "-1" to favor right nodes to have more keys when splitting - return (tree.m - 1) / 2 -} - -// search does search only within the single node among its entries -func (tree *BTree) search(node *BTreeNode, key interface{}) (index int, found bool) { - low, mid, high := 0, 0, len(node.Entries)-1 - for low <= high { - mid = low + (high-low)/2 - compare := tree.getComparator()(key, node.Entries[mid].Key) - switch { - case compare > 0: - low = mid + 1 - case compare < 0: - high = mid - 1 - case compare == 0: - return mid, true - } - } - return low, false -} - -// searchRecursively searches recursively down the tree starting at the startNode -func (tree *BTree) searchRecursively(startNode *BTreeNode, key interface{}) (node *BTreeNode, index int, found bool) { - if tree.size == 0 { - return nil, -1, false - } - node = startNode - for { - index, found = tree.search(node, key) - if found { - return node, index, true - } - if tree.isLeaf(node) { - return node, index, false - } - node = node.Children[index] - } -} - -func (tree *BTree) insert(node *BTreeNode, entry *BTreeEntry) (inserted bool) { - if tree.isLeaf(node) { - return tree.insertIntoLeaf(node, entry) - } - return tree.insertIntoInternal(node, entry) -} - -func (tree *BTree) insertIntoLeaf(node *BTreeNode, entry *BTreeEntry) (inserted bool) { - insertPosition, found := tree.search(node, entry.Key) - if found { - node.Entries[insertPosition] = entry - return false - } - // Insert entry's key in the middle of the node - node.Entries = append(node.Entries, nil) - copy(node.Entries[insertPosition+1:], node.Entries[insertPosition:]) - node.Entries[insertPosition] = entry - tree.split(node) - return true -} - -func (tree *BTree) insertIntoInternal(node *BTreeNode, entry *BTreeEntry) (inserted bool) { - insertPosition, found := tree.search(node, entry.Key) - if found { - node.Entries[insertPosition] = entry - return false - } - return tree.insert(node.Children[insertPosition], entry) -} - -func (tree *BTree) split(node *BTreeNode) { - if !tree.shouldSplit(node) { - return - } - - if node == tree.root { - tree.splitRoot() - return - } - - tree.splitNonRoot(node) -} - -func (tree *BTree) splitNonRoot(node *BTreeNode) { - middle := tree.middle() - parent := node.Parent - - left := &BTreeNode{Entries: append([]*BTreeEntry(nil), node.Entries[:middle]...), Parent: parent} - right := &BTreeNode{Entries: append([]*BTreeEntry(nil), node.Entries[middle+1:]...), Parent: parent} - - // Move children from the node to be split into left and right nodes - if !tree.isLeaf(node) { - left.Children = append([]*BTreeNode(nil), node.Children[:middle+1]...) - right.Children = append([]*BTreeNode(nil), node.Children[middle+1:]...) - setParent(left.Children, left) - setParent(right.Children, right) - } - - insertPosition, _ := tree.search(parent, node.Entries[middle].Key) - - // Insert middle key into parent - parent.Entries = append(parent.Entries, nil) - copy(parent.Entries[insertPosition+1:], parent.Entries[insertPosition:]) - parent.Entries[insertPosition] = node.Entries[middle] - - // Set child left of inserted key in parent to the created left node - parent.Children[insertPosition] = left - - // Set child right of inserted key in parent to the created right node - parent.Children = append(parent.Children, nil) - copy(parent.Children[insertPosition+2:], parent.Children[insertPosition+1:]) - parent.Children[insertPosition+1] = right - - tree.split(parent) -} - -func (tree *BTree) splitRoot() { - middle := tree.middle() - left := &BTreeNode{Entries: append([]*BTreeEntry(nil), tree.root.Entries[:middle]...)} - right := &BTreeNode{Entries: append([]*BTreeEntry(nil), tree.root.Entries[middle+1:]...)} - - // Move children from the node to be split into left and right nodes - if !tree.isLeaf(tree.root) { - left.Children = append([]*BTreeNode(nil), tree.root.Children[:middle+1]...) - right.Children = append([]*BTreeNode(nil), tree.root.Children[middle+1:]...) - setParent(left.Children, left) - setParent(right.Children, right) - } - - // Root is a node with one entry and two children (left and right) - newRoot := &BTreeNode{ - Entries: []*BTreeEntry{tree.root.Entries[middle]}, - Children: []*BTreeNode{left, right}, - } - - left.Parent = newRoot - right.Parent = newRoot - tree.root = newRoot -} - -func setParent(nodes []*BTreeNode, parent *BTreeNode) { - for _, node := range nodes { - node.Parent = parent - } -} - -func (tree *BTree) left(node *BTreeNode) *BTreeNode { - if tree.size == 0 { - return nil - } - current := node - for { - if tree.isLeaf(current) { - return current - } - current = current.Children[0] - } -} - -func (tree *BTree) right(node *BTreeNode) *BTreeNode { - if tree.size == 0 { - return nil - } - current := node - for { - if tree.isLeaf(current) { - return current - } - current = current.Children[len(current.Children)-1] - } -} - -// leftSibling returns the node's left sibling and child index (in parent) if it exists, otherwise (nil,-1) -// key is any of keys in node (could even be deleted). -func (tree *BTree) leftSibling(node *BTreeNode, key interface{}) (*BTreeNode, int) { - if node.Parent != nil { - index, _ := tree.search(node.Parent, key) - index-- - if index >= 0 && index < len(node.Parent.Children) { - return node.Parent.Children[index], index - } - } - return nil, -1 -} - -// rightSibling returns the node's right sibling and child index (in parent) if it exists, otherwise (nil,-1) -// key is any of keys in node (could even be deleted). -func (tree *BTree) rightSibling(node *BTreeNode, key interface{}) (*BTreeNode, int) { - if node.Parent != nil { - index, _ := tree.search(node.Parent, key) - index++ - if index < len(node.Parent.Children) { - return node.Parent.Children[index], index - } - } - return nil, -1 -} - -// delete deletes an entry in node at entries' index -// ref.: https://en.wikipedia.org/wiki/B-tree#Deletion -func (tree *BTree) delete(node *BTreeNode, index int) { - // deleting from a leaf node - if tree.isLeaf(node) { - deletedKey := node.Entries[index].Key - tree.deleteEntry(node, index) - tree.reBalance(node, deletedKey) - if len(tree.root.Entries) == 0 { - tree.root = nil - } - return - } - - // deleting from an internal node - leftLargestNode := tree.right(node.Children[index]) // largest node in the left sub-tree (assumed to exist) - leftLargestEntryIndex := len(leftLargestNode.Entries) - 1 - node.Entries[index] = leftLargestNode.Entries[leftLargestEntryIndex] - deletedKey := leftLargestNode.Entries[leftLargestEntryIndex].Key - tree.deleteEntry(leftLargestNode, leftLargestEntryIndex) - tree.reBalance(leftLargestNode, deletedKey) -} - -// reBalance reBalances the tree after deletion if necessary and returns true, otherwise false. -// Note that we first delete the entry and then call reBalance, thus the passed deleted key as reference. -func (tree *BTree) reBalance(node *BTreeNode, deletedKey interface{}) { - // check if re-balancing is needed - if node == nil || len(node.Entries) >= tree.minEntries() { - return - } - - // try to borrow from left sibling - leftSibling, leftSiblingIndex := tree.leftSibling(node, deletedKey) - if leftSibling != nil && len(leftSibling.Entries) > tree.minEntries() { - // rotate right - node.Entries = append([]*BTreeEntry{node.Parent.Entries[leftSiblingIndex]}, node.Entries...) // prepend parent's separator entry to node's entries - node.Parent.Entries[leftSiblingIndex] = leftSibling.Entries[len(leftSibling.Entries)-1] - tree.deleteEntry(leftSibling, len(leftSibling.Entries)-1) - if !tree.isLeaf(leftSibling) { - leftSiblingRightMostChild := leftSibling.Children[len(leftSibling.Children)-1] - leftSiblingRightMostChild.Parent = node - node.Children = append([]*BTreeNode{leftSiblingRightMostChild}, node.Children...) - tree.deleteChild(leftSibling, len(leftSibling.Children)-1) - } - return - } - - // try to borrow from right sibling - rightSibling, rightSiblingIndex := tree.rightSibling(node, deletedKey) - if rightSibling != nil && len(rightSibling.Entries) > tree.minEntries() { - // rotate left - node.Entries = append(node.Entries, node.Parent.Entries[rightSiblingIndex-1]) // append parent's separator entry to node's entries - node.Parent.Entries[rightSiblingIndex-1] = rightSibling.Entries[0] - tree.deleteEntry(rightSibling, 0) - if !tree.isLeaf(rightSibling) { - rightSiblingLeftMostChild := rightSibling.Children[0] - rightSiblingLeftMostChild.Parent = node - node.Children = append(node.Children, rightSiblingLeftMostChild) - tree.deleteChild(rightSibling, 0) - } - return - } - - // merge with siblings - if rightSibling != nil { - // merge with right sibling - node.Entries = append(node.Entries, node.Parent.Entries[rightSiblingIndex-1]) - node.Entries = append(node.Entries, rightSibling.Entries...) - deletedKey = node.Parent.Entries[rightSiblingIndex-1].Key - tree.deleteEntry(node.Parent, rightSiblingIndex-1) - tree.appendChildren(node.Parent.Children[rightSiblingIndex], node) - tree.deleteChild(node.Parent, rightSiblingIndex) - } else if leftSibling != nil { - // merge with left sibling - entries := append([]*BTreeEntry(nil), leftSibling.Entries...) - entries = append(entries, node.Parent.Entries[leftSiblingIndex]) - node.Entries = append(entries, node.Entries...) - deletedKey = node.Parent.Entries[leftSiblingIndex].Key - tree.deleteEntry(node.Parent, leftSiblingIndex) - tree.prependChildren(node.Parent.Children[leftSiblingIndex], node) - tree.deleteChild(node.Parent, leftSiblingIndex) - } - - // make the merged node the root if its parent was the root and the root is empty - if node.Parent == tree.root && len(tree.root.Entries) == 0 { - tree.root = node - node.Parent = nil - return - } - - // parent might be underflow, so try to reBalance if necessary - tree.reBalance(node.Parent, deletedKey) -} - -func (tree *BTree) prependChildren(fromNode *BTreeNode, toNode *BTreeNode) { - children := append([]*BTreeNode(nil), fromNode.Children...) - toNode.Children = append(children, toNode.Children...) - setParent(fromNode.Children, toNode) -} - -func (tree *BTree) appendChildren(fromNode *BTreeNode, toNode *BTreeNode) { - toNode.Children = append(toNode.Children, fromNode.Children...) - setParent(fromNode.Children, toNode) -} - -func (tree *BTree) deleteEntry(node *BTreeNode, index int) { - copy(node.Entries[index:], node.Entries[index+1:]) - node.Entries[len(node.Entries)-1] = nil - node.Entries = node.Entries[:len(node.Entries)-1] -} - -func (tree *BTree) deleteChild(node *BTreeNode, index int) { - if index >= len(node.Children) { - return - } - copy(node.Children[index:], node.Children[index+1:]) - node.Children[len(node.Children)-1] = nil - node.Children = node.Children[:len(node.Children)-1] -} - -// MarshalJSON implements the interface MarshalJSON for json.Marshal. -func (tree BTree) MarshalJSON() (jsonBytes []byte, err error) { - if tree.root == nil { - return []byte("null"), nil - } - buffer := bytes.NewBuffer(nil) - buffer.WriteByte('{') - tree.Iterator(func(key, value interface{}) bool { - valueBytes, valueJsonErr := json.Marshal(value) - if valueJsonErr != nil { - err = valueJsonErr - return false - } - if buffer.Len() > 1 { - buffer.WriteByte(',') - } - buffer.WriteString(fmt.Sprintf(`"%v":%s`, key, valueBytes)) - return true - }) - buffer.WriteByte('}') - return buffer.Bytes(), nil -} - -// getComparator returns the comparator if it's previously set, -// or else it panics. -func (tree *BTree) getComparator() func(a, b interface{}) int { - if tree.comparator == nil { - panic("comparator is missing for tree") - } - return tree.comparator -} diff --git a/vendor/github.com/gogf/gf/v2/container/gtree/gtree_redblacktree.go b/vendor/github.com/gogf/gf/v2/container/gtree/gtree_redblacktree.go deleted file mode 100644 index e9da6a07..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gtree/gtree_redblacktree.go +++ /dev/null @@ -1,991 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gtree - -import ( - "bytes" - "fmt" - - "github.com/gogf/gf/v2/container/gvar" - "github.com/gogf/gf/v2/internal/json" - "github.com/gogf/gf/v2/internal/rwmutex" - "github.com/gogf/gf/v2/util/gconv" - "github.com/gogf/gf/v2/util/gutil" -) - -type color bool - -const ( - black, red color = true, false -) - -// RedBlackTree holds elements of the red-black tree. -type RedBlackTree struct { - mu rwmutex.RWMutex - root *RedBlackTreeNode - size int - comparator func(v1, v2 interface{}) int -} - -// RedBlackTreeNode is a single element within the tree. -type RedBlackTreeNode struct { - Key interface{} - Value interface{} - color color - left *RedBlackTreeNode - right *RedBlackTreeNode - parent *RedBlackTreeNode -} - -// NewRedBlackTree instantiates a red-black tree with the custom key comparator. -// The parameter `safe` is used to specify whether using tree in concurrent-safety, -// which is false in default. -func NewRedBlackTree(comparator func(v1, v2 interface{}) int, safe ...bool) *RedBlackTree { - return &RedBlackTree{ - mu: rwmutex.Create(safe...), - comparator: comparator, - } -} - -// NewRedBlackTreeFrom instantiates a red-black tree with the custom key comparator and `data` map. -// The parameter `safe` is used to specify whether using tree in concurrent-safety, -// which is false in default. -func NewRedBlackTreeFrom(comparator func(v1, v2 interface{}) int, data map[interface{}]interface{}, safe ...bool) *RedBlackTree { - tree := NewRedBlackTree(comparator, safe...) - for k, v := range data { - tree.doSet(k, v) - } - return tree -} - -// SetComparator sets/changes the comparator for sorting. -func (tree *RedBlackTree) SetComparator(comparator func(a, b interface{}) int) { - tree.mu.Lock() - defer tree.mu.Unlock() - tree.comparator = comparator - if tree.size > 0 { - data := make(map[interface{}]interface{}, tree.size) - tree.doIteratorAsc(tree.leftNode(), func(key, value interface{}) bool { - data[key] = value - return true - }) - // Resort the tree if comparator is changed. - tree.root = nil - tree.size = 0 - for k, v := range data { - tree.doSet(k, v) - } - } -} - -// Clone returns a new tree with a copy of current tree. -func (tree *RedBlackTree) Clone() *RedBlackTree { - newTree := NewRedBlackTree(tree.comparator, tree.mu.IsSafe()) - newTree.Sets(tree.Map()) - return newTree -} - -// Set inserts key-value item into the tree. -func (tree *RedBlackTree) Set(key interface{}, value interface{}) { - tree.mu.Lock() - defer tree.mu.Unlock() - tree.doSet(key, value) -} - -// Sets batch sets key-values to the tree. -func (tree *RedBlackTree) Sets(data map[interface{}]interface{}) { - tree.mu.Lock() - defer tree.mu.Unlock() - for k, v := range data { - tree.doSet(k, v) - } -} - -// doSet inserts key-value item into the tree without mutex. -func (tree *RedBlackTree) doSet(key interface{}, value interface{}) { - insertedNode := (*RedBlackTreeNode)(nil) - if tree.root == nil { - // Assert key is of comparator's type for initial tree - tree.getComparator()(key, key) - tree.root = &RedBlackTreeNode{Key: key, Value: value, color: red} - insertedNode = tree.root - } else { - node := tree.root - loop := true - for loop { - compare := tree.getComparator()(key, node.Key) - switch { - case compare == 0: - // node.Key = key - node.Value = value - return - case compare < 0: - if node.left == nil { - node.left = &RedBlackTreeNode{Key: key, Value: value, color: red} - insertedNode = node.left - loop = false - } else { - node = node.left - } - case compare > 0: - if node.right == nil { - node.right = &RedBlackTreeNode{Key: key, Value: value, color: red} - insertedNode = node.right - loop = false - } else { - node = node.right - } - } - } - insertedNode.parent = node - } - tree.insertCase1(insertedNode) - tree.size++ -} - -// Get searches the node in the tree by `key` and returns its value or nil if key is not found in tree. -func (tree *RedBlackTree) Get(key interface{}) (value interface{}) { - value, _ = tree.Search(key) - return -} - -// doSetWithLockCheck checks whether value of the key exists with mutex.Lock, -// if not exists, set value to the map with given `key`, -// or else just return the existing value. -// -// When setting value, if `value` is type of , -// it will be executed with mutex.Lock of the hash map, -// and its return value will be set to the map with `key`. -// -// It returns value with given `key`. -func (tree *RedBlackTree) doSetWithLockCheck(key interface{}, value interface{}) interface{} { - tree.mu.Lock() - defer tree.mu.Unlock() - if node, found := tree.doSearch(key); found { - return node.Value - } - if f, ok := value.(func() interface{}); ok { - value = f() - } - if value != nil { - tree.doSet(key, value) - } - return value -} - -// GetOrSet returns the value by key, -// or sets value with given `value` if it does not exist and then returns this value. -func (tree *RedBlackTree) GetOrSet(key interface{}, value interface{}) interface{} { - if v, ok := tree.Search(key); !ok { - return tree.doSetWithLockCheck(key, value) - } else { - return v - } -} - -// GetOrSetFunc returns the value by key, -// or sets value with returned value of callback function `f` if it does not exist -// and then returns this value. -func (tree *RedBlackTree) GetOrSetFunc(key interface{}, f func() interface{}) interface{} { - if v, ok := tree.Search(key); !ok { - return tree.doSetWithLockCheck(key, f()) - } else { - return v - } -} - -// GetOrSetFuncLock returns the value by key, -// or sets value with returned value of callback function `f` if it does not exist -// and then returns this value. -// -// GetOrSetFuncLock differs with GetOrSetFunc function is that it executes function `f` -// with mutex.Lock of the hash map. -func (tree *RedBlackTree) GetOrSetFuncLock(key interface{}, f func() interface{}) interface{} { - if v, ok := tree.Search(key); !ok { - return tree.doSetWithLockCheck(key, f) - } else { - return v - } -} - -// GetVar returns a gvar.Var with the value by given `key`. -// The returned gvar.Var is un-concurrent safe. -func (tree *RedBlackTree) GetVar(key interface{}) *gvar.Var { - return gvar.New(tree.Get(key)) -} - -// GetVarOrSet returns a gvar.Var with result from GetVarOrSet. -// The returned gvar.Var is un-concurrent safe. -func (tree *RedBlackTree) GetVarOrSet(key interface{}, value interface{}) *gvar.Var { - return gvar.New(tree.GetOrSet(key, value)) -} - -// GetVarOrSetFunc returns a gvar.Var with result from GetOrSetFunc. -// The returned gvar.Var is un-concurrent safe. -func (tree *RedBlackTree) GetVarOrSetFunc(key interface{}, f func() interface{}) *gvar.Var { - return gvar.New(tree.GetOrSetFunc(key, f)) -} - -// GetVarOrSetFuncLock returns a gvar.Var with result from GetOrSetFuncLock. -// The returned gvar.Var is un-concurrent safe. -func (tree *RedBlackTree) GetVarOrSetFuncLock(key interface{}, f func() interface{}) *gvar.Var { - return gvar.New(tree.GetOrSetFuncLock(key, f)) -} - -// SetIfNotExist sets `value` to the map if the `key` does not exist, and then returns true. -// It returns false if `key` exists, and `value` would be ignored. -func (tree *RedBlackTree) SetIfNotExist(key interface{}, value interface{}) bool { - if !tree.Contains(key) { - tree.doSetWithLockCheck(key, value) - return true - } - return false -} - -// SetIfNotExistFunc sets value with return value of callback function `f`, and then returns true. -// It returns false if `key` exists, and `value` would be ignored. -func (tree *RedBlackTree) SetIfNotExistFunc(key interface{}, f func() interface{}) bool { - if !tree.Contains(key) { - tree.doSetWithLockCheck(key, f()) - return true - } - return false -} - -// SetIfNotExistFuncLock sets value with return value of callback function `f`, and then returns true. -// It returns false if `key` exists, and `value` would be ignored. -// -// SetIfNotExistFuncLock differs with SetIfNotExistFunc function is that -// it executes function `f` with mutex.Lock of the hash map. -func (tree *RedBlackTree) SetIfNotExistFuncLock(key interface{}, f func() interface{}) bool { - if !tree.Contains(key) { - tree.doSetWithLockCheck(key, f) - return true - } - return false -} - -// Contains checks whether `key` exists in the tree. -func (tree *RedBlackTree) Contains(key interface{}) bool { - _, ok := tree.Search(key) - return ok -} - -// doRemove removes the node from the tree by `key` without mutex. -func (tree *RedBlackTree) doRemove(key interface{}) (value interface{}) { - child := (*RedBlackTreeNode)(nil) - node, found := tree.doSearch(key) - if !found { - return - } - value = node.Value - if node.left != nil && node.right != nil { - p := node.left.maximumNode() - node.Key = p.Key - node.Value = p.Value - node = p - } - if node.left == nil || node.right == nil { - if node.right == nil { - child = node.left - } else { - child = node.right - } - if node.color == black { - node.color = tree.nodeColor(child) - tree.deleteCase1(node) - } - tree.replaceNode(node, child) - if node.parent == nil && child != nil { - child.color = black - } - } - tree.size-- - return -} - -// Remove removes the node from the tree by `key`. -func (tree *RedBlackTree) Remove(key interface{}) (value interface{}) { - tree.mu.Lock() - defer tree.mu.Unlock() - return tree.doRemove(key) -} - -// Removes batch deletes values of the tree by `keys`. -func (tree *RedBlackTree) Removes(keys []interface{}) { - tree.mu.Lock() - defer tree.mu.Unlock() - for _, key := range keys { - tree.doRemove(key) - } -} - -// IsEmpty returns true if tree does not contain any nodes. -func (tree *RedBlackTree) IsEmpty() bool { - return tree.Size() == 0 -} - -// Size returns number of nodes in the tree. -func (tree *RedBlackTree) Size() int { - tree.mu.RLock() - defer tree.mu.RUnlock() - return tree.size -} - -// Keys returns all keys in asc order. -func (tree *RedBlackTree) Keys() []interface{} { - var ( - keys = make([]interface{}, tree.Size()) - index = 0 - ) - tree.IteratorAsc(func(key, value interface{}) bool { - keys[index] = key - index++ - return true - }) - return keys -} - -// Values returns all values in asc order based on the key. -func (tree *RedBlackTree) Values() []interface{} { - var ( - values = make([]interface{}, tree.Size()) - index = 0 - ) - tree.IteratorAsc(func(key, value interface{}) bool { - values[index] = value - index++ - return true - }) - return values -} - -// Map returns all key-value items as map. -func (tree *RedBlackTree) Map() map[interface{}]interface{} { - m := make(map[interface{}]interface{}, tree.Size()) - tree.IteratorAsc(func(key, value interface{}) bool { - m[key] = value - return true - }) - return m -} - -// MapStrAny returns all key-value items as map[string]interface{}. -func (tree *RedBlackTree) MapStrAny() map[string]interface{} { - m := make(map[string]interface{}, tree.Size()) - tree.IteratorAsc(func(key, value interface{}) bool { - m[gconv.String(key)] = value - return true - }) - return m -} - -// Left returns the left-most (min) node or nil if tree is empty. -func (tree *RedBlackTree) Left() *RedBlackTreeNode { - tree.mu.RLock() - defer tree.mu.RUnlock() - node := tree.leftNode() - if tree.mu.IsSafe() { - return &RedBlackTreeNode{ - Key: node.Key, - Value: node.Value, - } - } - return node -} - -// Right returns the right-most (max) node or nil if tree is empty. -func (tree *RedBlackTree) Right() *RedBlackTreeNode { - tree.mu.RLock() - defer tree.mu.RUnlock() - node := tree.rightNode() - if tree.mu.IsSafe() { - return &RedBlackTreeNode{ - Key: node.Key, - Value: node.Value, - } - } - return node -} - -// leftNode returns the left-most (min) node or nil if tree is empty. -func (tree *RedBlackTree) leftNode() *RedBlackTreeNode { - p := (*RedBlackTreeNode)(nil) - n := tree.root - for n != nil { - p = n - n = n.left - } - return p -} - -// rightNode returns the right-most (max) node or nil if tree is empty. -func (tree *RedBlackTree) rightNode() *RedBlackTreeNode { - p := (*RedBlackTreeNode)(nil) - n := tree.root - for n != nil { - p = n - n = n.right - } - return p -} - -// Floor Finds floor node of the input key, return the floor node or nil if no floor node is found. -// Second return parameter is true if floor was found, otherwise false. -// -// Floor node is defined as the largest node that its key is smaller than or equal to the given `key`. -// A floor node may not be found, either because the tree is empty, or because -// all nodes in the tree are larger than the given node. -func (tree *RedBlackTree) Floor(key interface{}) (floor *RedBlackTreeNode, found bool) { - tree.mu.RLock() - defer tree.mu.RUnlock() - n := tree.root - for n != nil { - compare := tree.getComparator()(key, n.Key) - switch { - case compare == 0: - return n, true - case compare < 0: - n = n.left - case compare > 0: - floor, found = n, true - n = n.right - } - } - if found { - return - } - return nil, false -} - -// Ceiling finds ceiling node of the input key, return the ceiling node or nil if no ceiling node is found. -// Second return parameter is true if ceiling was found, otherwise false. -// -// Ceiling node is defined as the smallest node that its key is larger than or equal to the given `key`. -// A ceiling node may not be found, either because the tree is empty, or because -// all nodes in the tree are smaller than the given node. -func (tree *RedBlackTree) Ceiling(key interface{}) (ceiling *RedBlackTreeNode, found bool) { - tree.mu.RLock() - defer tree.mu.RUnlock() - n := tree.root - for n != nil { - compare := tree.getComparator()(key, n.Key) - switch { - case compare == 0: - return n, true - case compare > 0: - n = n.right - case compare < 0: - ceiling, found = n, true - n = n.left - } - } - if found { - return - } - return nil, false -} - -// Iterator is alias of IteratorAsc. -func (tree *RedBlackTree) Iterator(f func(key, value interface{}) bool) { - tree.IteratorAsc(f) -} - -// IteratorFrom is alias of IteratorAscFrom. -func (tree *RedBlackTree) IteratorFrom(key interface{}, match bool, f func(key, value interface{}) bool) { - tree.IteratorAscFrom(key, match, f) -} - -// IteratorAsc iterates the tree readonly in ascending order with given callback function `f`. -// If `f` returns true, then it continues iterating; or false to stop. -func (tree *RedBlackTree) IteratorAsc(f func(key, value interface{}) bool) { - tree.mu.RLock() - defer tree.mu.RUnlock() - tree.doIteratorAsc(tree.leftNode(), f) -} - -// IteratorAscFrom iterates the tree readonly in ascending order with given callback function `f`. -// The parameter `key` specifies the start entry for iterating. The `match` specifies whether -// starting iterating if the `key` is fully matched, or else using index searching iterating. -// If `f` returns true, then it continues iterating; or false to stop. -func (tree *RedBlackTree) IteratorAscFrom(key interface{}, match bool, f func(key, value interface{}) bool) { - tree.mu.RLock() - defer tree.mu.RUnlock() - node, found := tree.doSearch(key) - if match { - if found { - tree.doIteratorAsc(node, f) - } - } else { - tree.doIteratorAsc(node, f) - } -} - -func (tree *RedBlackTree) doIteratorAsc(node *RedBlackTreeNode, f func(key, value interface{}) bool) { -loop: - if node == nil { - return - } - if !f(node.Key, node.Value) { - return - } - if node.right != nil { - node = node.right - for node.left != nil { - node = node.left - } - goto loop - } - if node.parent != nil { - old := node - for node.parent != nil { - node = node.parent - if tree.getComparator()(old.Key, node.Key) <= 0 { - goto loop - } - } - } -} - -// IteratorDesc iterates the tree readonly in descending order with given callback function `f`. -// If `f` returns true, then it continues iterating; or false to stop. -func (tree *RedBlackTree) IteratorDesc(f func(key, value interface{}) bool) { - tree.mu.RLock() - defer tree.mu.RUnlock() - tree.doIteratorDesc(tree.rightNode(), f) -} - -// IteratorDescFrom iterates the tree readonly in descending order with given callback function `f`. -// The parameter `key` specifies the start entry for iterating. The `match` specifies whether -// starting iterating if the `key` is fully matched, or else using index searching iterating. -// If `f` returns true, then it continues iterating; or false to stop. -func (tree *RedBlackTree) IteratorDescFrom(key interface{}, match bool, f func(key, value interface{}) bool) { - tree.mu.RLock() - defer tree.mu.RUnlock() - node, found := tree.doSearch(key) - if match { - if found { - tree.doIteratorDesc(node, f) - } - } else { - tree.doIteratorDesc(node, f) - } -} - -func (tree *RedBlackTree) doIteratorDesc(node *RedBlackTreeNode, f func(key, value interface{}) bool) { -loop: - if node == nil { - return - } - if !f(node.Key, node.Value) { - return - } - if node.left != nil { - node = node.left - for node.right != nil { - node = node.right - } - goto loop - } - if node.parent != nil { - old := node - for node.parent != nil { - node = node.parent - if tree.getComparator()(old.Key, node.Key) >= 0 { - goto loop - } - } - } -} - -// Clear removes all nodes from the tree. -func (tree *RedBlackTree) Clear() { - tree.mu.Lock() - defer tree.mu.Unlock() - tree.root = nil - tree.size = 0 -} - -// Replace the data of the tree with given `data`. -func (tree *RedBlackTree) Replace(data map[interface{}]interface{}) { - tree.mu.Lock() - defer tree.mu.Unlock() - tree.root = nil - tree.size = 0 - for k, v := range data { - tree.doSet(k, v) - } -} - -// String returns a string representation of container. -func (tree *RedBlackTree) String() string { - if tree == nil { - return "" - } - tree.mu.RLock() - defer tree.mu.RUnlock() - str := "" - if tree.size != 0 { - tree.output(tree.root, "", true, &str) - } - return str -} - -// Print prints the tree to stdout. -func (tree *RedBlackTree) Print() { - fmt.Println(tree.String()) -} - -// Search searches the tree with given `key`. -// Second return parameter `found` is true if key was found, otherwise false. -func (tree *RedBlackTree) Search(key interface{}) (value interface{}, found bool) { - tree.mu.RLock() - defer tree.mu.RUnlock() - node, found := tree.doSearch(key) - if found { - return node.Value, true - } - return nil, false -} - -// Flip exchanges key-value of the tree to value-key. -// Note that you should guarantee the value is the same type as key, -// or else the comparator would panic. -// -// If the type of value is different with key, you pass the new `comparator`. -func (tree *RedBlackTree) Flip(comparator ...func(v1, v2 interface{}) int) { - t := (*RedBlackTree)(nil) - if len(comparator) > 0 { - t = NewRedBlackTree(comparator[0], tree.mu.IsSafe()) - } else { - t = NewRedBlackTree(tree.comparator, tree.mu.IsSafe()) - } - tree.IteratorAsc(func(key, value interface{}) bool { - t.doSet(value, key) - return true - }) - tree.mu.Lock() - tree.root = t.root - tree.size = t.size - tree.mu.Unlock() -} - -func (tree *RedBlackTree) output(node *RedBlackTreeNode, prefix string, isTail bool, str *string) { - if node.right != nil { - newPrefix := prefix - if isTail { - newPrefix += "│ " - } else { - newPrefix += " " - } - tree.output(node.right, newPrefix, false, str) - } - *str += prefix - if isTail { - *str += "└── " - } else { - *str += "┌── " - } - *str += fmt.Sprintf("%v\n", node.Key) - if node.left != nil { - newPrefix := prefix - if isTail { - newPrefix += " " - } else { - newPrefix += "│ " - } - tree.output(node.left, newPrefix, true, str) - } -} - -// doSearch searches the tree with given `key` without mutex. -// It returns the node if found or otherwise nil. -func (tree *RedBlackTree) doSearch(key interface{}) (node *RedBlackTreeNode, found bool) { - node = tree.root - for node != nil { - compare := tree.getComparator()(key, node.Key) - switch { - case compare == 0: - return node, true - case compare < 0: - node = node.left - case compare > 0: - node = node.right - } - } - return node, false -} - -func (node *RedBlackTreeNode) grandparent() *RedBlackTreeNode { - if node != nil && node.parent != nil { - return node.parent.parent - } - return nil -} - -func (node *RedBlackTreeNode) uncle() *RedBlackTreeNode { - if node == nil || node.parent == nil || node.parent.parent == nil { - return nil - } - return node.parent.sibling() -} - -func (node *RedBlackTreeNode) sibling() *RedBlackTreeNode { - if node == nil || node.parent == nil { - return nil - } - if node == node.parent.left { - return node.parent.right - } - return node.parent.left -} - -func (tree *RedBlackTree) rotateLeft(node *RedBlackTreeNode) { - right := node.right - tree.replaceNode(node, right) - node.right = right.left - if right.left != nil { - right.left.parent = node - } - right.left = node - node.parent = right -} - -func (tree *RedBlackTree) rotateRight(node *RedBlackTreeNode) { - left := node.left - tree.replaceNode(node, left) - node.left = left.right - if left.right != nil { - left.right.parent = node - } - left.right = node - node.parent = left -} - -func (tree *RedBlackTree) replaceNode(old *RedBlackTreeNode, new *RedBlackTreeNode) { - if old.parent == nil { - tree.root = new - } else { - if old == old.parent.left { - old.parent.left = new - } else { - old.parent.right = new - } - } - if new != nil { - new.parent = old.parent - } -} - -func (tree *RedBlackTree) insertCase1(node *RedBlackTreeNode) { - if node.parent == nil { - node.color = black - } else { - tree.insertCase2(node) - } -} - -func (tree *RedBlackTree) insertCase2(node *RedBlackTreeNode) { - if tree.nodeColor(node.parent) == black { - return - } - tree.insertCase3(node) -} - -func (tree *RedBlackTree) insertCase3(node *RedBlackTreeNode) { - uncle := node.uncle() - if tree.nodeColor(uncle) == red { - node.parent.color = black - uncle.color = black - node.grandparent().color = red - tree.insertCase1(node.grandparent()) - } else { - tree.insertCase4(node) - } -} - -func (tree *RedBlackTree) insertCase4(node *RedBlackTreeNode) { - grandparent := node.grandparent() - if node == node.parent.right && node.parent == grandparent.left { - tree.rotateLeft(node.parent) - node = node.left - } else if node == node.parent.left && node.parent == grandparent.right { - tree.rotateRight(node.parent) - node = node.right - } - tree.insertCase5(node) -} - -func (tree *RedBlackTree) insertCase5(node *RedBlackTreeNode) { - node.parent.color = black - grandparent := node.grandparent() - grandparent.color = red - if node == node.parent.left && node.parent == grandparent.left { - tree.rotateRight(grandparent) - } else if node == node.parent.right && node.parent == grandparent.right { - tree.rotateLeft(grandparent) - } -} - -func (node *RedBlackTreeNode) maximumNode() *RedBlackTreeNode { - if node == nil { - return nil - } - for node.right != nil { - return node.right - } - return node -} - -func (tree *RedBlackTree) deleteCase1(node *RedBlackTreeNode) { - if node.parent == nil { - return - } - tree.deleteCase2(node) -} - -func (tree *RedBlackTree) deleteCase2(node *RedBlackTreeNode) { - sibling := node.sibling() - if tree.nodeColor(sibling) == red { - node.parent.color = red - sibling.color = black - if node == node.parent.left { - tree.rotateLeft(node.parent) - } else { - tree.rotateRight(node.parent) - } - } - tree.deleteCase3(node) -} - -func (tree *RedBlackTree) deleteCase3(node *RedBlackTreeNode) { - sibling := node.sibling() - if tree.nodeColor(node.parent) == black && - tree.nodeColor(sibling) == black && - tree.nodeColor(sibling.left) == black && - tree.nodeColor(sibling.right) == black { - sibling.color = red - tree.deleteCase1(node.parent) - } else { - tree.deleteCase4(node) - } -} - -func (tree *RedBlackTree) deleteCase4(node *RedBlackTreeNode) { - sibling := node.sibling() - if tree.nodeColor(node.parent) == red && - tree.nodeColor(sibling) == black && - tree.nodeColor(sibling.left) == black && - tree.nodeColor(sibling.right) == black { - sibling.color = red - node.parent.color = black - } else { - tree.deleteCase5(node) - } -} - -func (tree *RedBlackTree) deleteCase5(node *RedBlackTreeNode) { - sibling := node.sibling() - if node == node.parent.left && - tree.nodeColor(sibling) == black && - tree.nodeColor(sibling.left) == red && - tree.nodeColor(sibling.right) == black { - sibling.color = red - sibling.left.color = black - tree.rotateRight(sibling) - } else if node == node.parent.right && - tree.nodeColor(sibling) == black && - tree.nodeColor(sibling.right) == red && - tree.nodeColor(sibling.left) == black { - sibling.color = red - sibling.right.color = black - tree.rotateLeft(sibling) - } - tree.deleteCase6(node) -} - -func (tree *RedBlackTree) deleteCase6(node *RedBlackTreeNode) { - sibling := node.sibling() - sibling.color = tree.nodeColor(node.parent) - node.parent.color = black - if node == node.parent.left && tree.nodeColor(sibling.right) == red { - sibling.right.color = black - tree.rotateLeft(node.parent) - } else if tree.nodeColor(sibling.left) == red { - sibling.left.color = black - tree.rotateRight(node.parent) - } -} - -func (tree *RedBlackTree) nodeColor(node *RedBlackTreeNode) color { - if node == nil { - return black - } - return node.color -} - -// MarshalJSON implements the interface MarshalJSON for json.Marshal. -func (tree RedBlackTree) MarshalJSON() (jsonBytes []byte, err error) { - if tree.root == nil { - return []byte("null"), nil - } - buffer := bytes.NewBuffer(nil) - buffer.WriteByte('{') - tree.Iterator(func(key, value interface{}) bool { - valueBytes, valueJsonErr := json.Marshal(value) - if valueJsonErr != nil { - err = valueJsonErr - return false - } - if buffer.Len() > 1 { - buffer.WriteByte(',') - } - buffer.WriteString(fmt.Sprintf(`"%v":%s`, key, valueBytes)) - return true - }) - buffer.WriteByte('}') - return buffer.Bytes(), nil -} - -// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. -func (tree *RedBlackTree) UnmarshalJSON(b []byte) error { - tree.mu.Lock() - defer tree.mu.Unlock() - if tree.comparator == nil { - tree.comparator = gutil.ComparatorString - } - var data map[string]interface{} - if err := json.UnmarshalUseNumber(b, &data); err != nil { - return err - } - for k, v := range data { - tree.doSet(k, v) - } - return nil -} - -// UnmarshalValue is an interface implement which sets any type of value for map. -func (tree *RedBlackTree) UnmarshalValue(value interface{}) (err error) { - tree.mu.Lock() - defer tree.mu.Unlock() - if tree.comparator == nil { - tree.comparator = gutil.ComparatorString - } - for k, v := range gconv.Map(value) { - tree.doSet(k, v) - } - return -} - -// getComparator returns the comparator if it's previously set, -// or else it panics. -func (tree *RedBlackTree) getComparator() func(a, b interface{}) int { - if tree.comparator == nil { - panic("comparator is missing for tree") - } - return tree.comparator -} diff --git a/vendor/github.com/gogf/gf/v2/container/gtype/gtype.go b/vendor/github.com/gogf/gf/v2/container/gtype/gtype.go deleted file mode 100644 index d6711f2a..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gtype/gtype.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package gtype provides high performance and concurrent-safe basic variable types. -package gtype - -// New is alias of NewInterface. -// See NewInterface. -func New(value ...interface{}) *Interface { - return NewInterface(value...) -} diff --git a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_bool.go b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_bool.go deleted file mode 100644 index b85dfd91..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_bool.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gtype - -import ( - "bytes" - "sync/atomic" - - "github.com/gogf/gf/v2/util/gconv" -) - -// Bool is a struct for concurrent-safe operation for type bool. -type Bool struct { - value int32 -} - -var ( - bytesTrue = []byte("true") - bytesFalse = []byte("false") -) - -// NewBool creates and returns a concurrent-safe object for bool type, -// with given initial value `value`. -func NewBool(value ...bool) *Bool { - t := &Bool{} - if len(value) > 0 { - if value[0] { - t.value = 1 - } else { - t.value = 0 - } - } - return t -} - -// Clone clones and returns a new concurrent-safe object for bool type. -func (v *Bool) Clone() *Bool { - return NewBool(v.Val()) -} - -// Set atomically stores `value` into t.value and returns the previous value of t.value. -func (v *Bool) Set(value bool) (old bool) { - if value { - old = atomic.SwapInt32(&v.value, 1) == 1 - } else { - old = atomic.SwapInt32(&v.value, 0) == 1 - } - return -} - -// Val atomically loads and returns t.value. -func (v *Bool) Val() bool { - return atomic.LoadInt32(&v.value) > 0 -} - -// Cas executes the compare-and-swap operation for value. -func (v *Bool) Cas(old, new bool) (swapped bool) { - var oldInt32, newInt32 int32 - if old { - oldInt32 = 1 - } - if new { - newInt32 = 1 - } - return atomic.CompareAndSwapInt32(&v.value, oldInt32, newInt32) -} - -// String implements String interface for string printing. -func (v *Bool) String() string { - if v.Val() { - return "true" - } - return "false" -} - -// MarshalJSON implements the interface MarshalJSON for json.Marshal. -func (v Bool) MarshalJSON() ([]byte, error) { - if v.Val() { - return bytesTrue, nil - } - return bytesFalse, nil -} - -// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. -func (v *Bool) UnmarshalJSON(b []byte) error { - v.Set(gconv.Bool(bytes.Trim(b, `"`))) - return nil -} - -// UnmarshalValue is an interface implement which sets any type of value for `v`. -func (v *Bool) UnmarshalValue(value interface{}) error { - v.Set(gconv.Bool(value)) - return nil -} - -// DeepCopy implements interface for deep copy of current type. -func (v *Bool) DeepCopy() interface{} { - if v == nil { - return nil - } - return NewBool(v.Val()) -} diff --git a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_byte.go b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_byte.go deleted file mode 100644 index 836231c5..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_byte.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gtype - -import ( - "strconv" - "sync/atomic" - - "github.com/gogf/gf/v2/util/gconv" -) - -// Byte is a struct for concurrent-safe operation for type byte. -type Byte struct { - value int32 -} - -// NewByte creates and returns a concurrent-safe object for byte type, -// with given initial value `value`. -func NewByte(value ...byte) *Byte { - if len(value) > 0 { - return &Byte{ - value: int32(value[0]), - } - } - return &Byte{} -} - -// Clone clones and returns a new concurrent-safe object for byte type. -func (v *Byte) Clone() *Byte { - return NewByte(v.Val()) -} - -// Set atomically stores `value` into t.value and returns the previous value of t.value. -func (v *Byte) Set(value byte) (old byte) { - return byte(atomic.SwapInt32(&v.value, int32(value))) -} - -// Val atomically loads and returns t.value. -func (v *Byte) Val() byte { - return byte(atomic.LoadInt32(&v.value)) -} - -// Add atomically adds `delta` to t.value and returns the new value. -func (v *Byte) Add(delta byte) (new byte) { - return byte(atomic.AddInt32(&v.value, int32(delta))) -} - -// Cas executes the compare-and-swap operation for value. -func (v *Byte) Cas(old, new byte) (swapped bool) { - return atomic.CompareAndSwapInt32(&v.value, int32(old), int32(new)) -} - -// String implements String interface for string printing. -func (v *Byte) String() string { - return strconv.FormatUint(uint64(v.Val()), 10) -} - -// MarshalJSON implements the interface MarshalJSON for json.Marshal. -func (v Byte) MarshalJSON() ([]byte, error) { - return []byte(strconv.FormatUint(uint64(v.Val()), 10)), nil -} - -// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. -func (v *Byte) UnmarshalJSON(b []byte) error { - v.Set(gconv.Uint8(string(b))) - return nil -} - -// UnmarshalValue is an interface implement which sets any type of value for `v`. -func (v *Byte) UnmarshalValue(value interface{}) error { - v.Set(gconv.Byte(value)) - return nil -} - -// DeepCopy implements interface for deep copy of current type. -func (v *Byte) DeepCopy() interface{} { - if v == nil { - return nil - } - return NewByte(v.Val()) -} diff --git a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_bytes.go b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_bytes.go deleted file mode 100644 index e01ee418..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_bytes.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gtype - -import ( - "bytes" - "encoding/base64" - "sync/atomic" - - "github.com/gogf/gf/v2/errors/gerror" - "github.com/gogf/gf/v2/util/gconv" -) - -// Bytes is a struct for concurrent-safe operation for type []byte. -type Bytes struct { - value atomic.Value -} - -// NewBytes creates and returns a concurrent-safe object for []byte type, -// with given initial value `value`. -func NewBytes(value ...[]byte) *Bytes { - t := &Bytes{} - if len(value) > 0 { - t.value.Store(value[0]) - } - return t -} - -// Clone clones and returns a new shallow copy object for []byte type. -func (v *Bytes) Clone() *Bytes { - return NewBytes(v.Val()) -} - -// Set atomically stores `value` into t.value and returns the previous value of t.value. -// Note: The parameter `value` cannot be nil. -func (v *Bytes) Set(value []byte) (old []byte) { - old = v.Val() - v.value.Store(value) - return -} - -// Val atomically loads and returns t.value. -func (v *Bytes) Val() []byte { - if s := v.value.Load(); s != nil { - return s.([]byte) - } - return nil -} - -// String implements String interface for string printing. -func (v *Bytes) String() string { - return string(v.Val()) -} - -// MarshalJSON implements the interface MarshalJSON for json.Marshal. -func (v Bytes) MarshalJSON() ([]byte, error) { - val := v.Val() - dst := make([]byte, base64.StdEncoding.EncodedLen(len(val))) - base64.StdEncoding.Encode(dst, val) - return []byte(`"` + string(dst) + `"`), nil -} - -// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. -func (v *Bytes) UnmarshalJSON(b []byte) error { - var ( - src = make([]byte, base64.StdEncoding.DecodedLen(len(b))) - n, err = base64.StdEncoding.Decode(src, bytes.Trim(b, `"`)) - ) - if err != nil { - err = gerror.Wrap(err, `base64.StdEncoding.Decode failed`) - return err - } - v.Set(src[:n]) - return nil -} - -// UnmarshalValue is an interface implement which sets any type of value for `v`. -func (v *Bytes) UnmarshalValue(value interface{}) error { - v.Set(gconv.Bytes(value)) - return nil -} - -// DeepCopy implements interface for deep copy of current type. -func (v *Bytes) DeepCopy() interface{} { - if v == nil { - return nil - } - oldBytes := v.Val() - newBytes := make([]byte, len(oldBytes)) - copy(newBytes, oldBytes) - return NewBytes(newBytes) -} diff --git a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_float32.go b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_float32.go deleted file mode 100644 index 82289abb..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_float32.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gtype - -import ( - "math" - "strconv" - "sync/atomic" - - "github.com/gogf/gf/v2/util/gconv" -) - -// Float32 is a struct for concurrent-safe operation for type float32. -type Float32 struct { - value uint32 -} - -// NewFloat32 creates and returns a concurrent-safe object for float32 type, -// with given initial value `value`. -func NewFloat32(value ...float32) *Float32 { - if len(value) > 0 { - return &Float32{ - value: math.Float32bits(value[0]), - } - } - return &Float32{} -} - -// Clone clones and returns a new concurrent-safe object for float32 type. -func (v *Float32) Clone() *Float32 { - return NewFloat32(v.Val()) -} - -// Set atomically stores `value` into t.value and returns the previous value of t.value. -func (v *Float32) Set(value float32) (old float32) { - return math.Float32frombits(atomic.SwapUint32(&v.value, math.Float32bits(value))) -} - -// Val atomically loads and returns t.value. -func (v *Float32) Val() float32 { - return math.Float32frombits(atomic.LoadUint32(&v.value)) -} - -// Add atomically adds `delta` to t.value and returns the new value. -func (v *Float32) Add(delta float32) (new float32) { - for { - old := math.Float32frombits(v.value) - new = old + delta - if atomic.CompareAndSwapUint32( - &v.value, - math.Float32bits(old), - math.Float32bits(new), - ) { - break - } - } - return -} - -// Cas executes the compare-and-swap operation for value. -func (v *Float32) Cas(old, new float32) (swapped bool) { - return atomic.CompareAndSwapUint32(&v.value, math.Float32bits(old), math.Float32bits(new)) -} - -// String implements String interface for string printing. -func (v *Float32) String() string { - return strconv.FormatFloat(float64(v.Val()), 'g', -1, 32) -} - -// MarshalJSON implements the interface MarshalJSON for json.Marshal. -func (v Float32) MarshalJSON() ([]byte, error) { - return []byte(strconv.FormatFloat(float64(v.Val()), 'g', -1, 32)), nil -} - -// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. -func (v *Float32) UnmarshalJSON(b []byte) error { - v.Set(gconv.Float32(string(b))) - return nil -} - -// UnmarshalValue is an interface implement which sets any type of value for `v`. -func (v *Float32) UnmarshalValue(value interface{}) error { - v.Set(gconv.Float32(value)) - return nil -} - -// DeepCopy implements interface for deep copy of current type. -func (v *Float32) DeepCopy() interface{} { - if v == nil { - return nil - } - return NewFloat32(v.Val()) -} diff --git a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_float64.go b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_float64.go deleted file mode 100644 index ce44abd1..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_float64.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gtype - -import ( - "math" - "strconv" - "sync/atomic" - - "github.com/gogf/gf/v2/util/gconv" -) - -// Float64 is a struct for concurrent-safe operation for type float64. -type Float64 struct { - value uint64 -} - -// NewFloat64 creates and returns a concurrent-safe object for float64 type, -// with given initial value `value`. -func NewFloat64(value ...float64) *Float64 { - if len(value) > 0 { - return &Float64{ - value: math.Float64bits(value[0]), - } - } - return &Float64{} -} - -// Clone clones and returns a new concurrent-safe object for float64 type. -func (v *Float64) Clone() *Float64 { - return NewFloat64(v.Val()) -} - -// Set atomically stores `value` into t.value and returns the previous value of t.value. -func (v *Float64) Set(value float64) (old float64) { - return math.Float64frombits(atomic.SwapUint64(&v.value, math.Float64bits(value))) -} - -// Val atomically loads and returns t.value. -func (v *Float64) Val() float64 { - return math.Float64frombits(atomic.LoadUint64(&v.value)) -} - -// Add atomically adds `delta` to t.value and returns the new value. -func (v *Float64) Add(delta float64) (new float64) { - for { - old := math.Float64frombits(v.value) - new = old + delta - if atomic.CompareAndSwapUint64( - &v.value, - math.Float64bits(old), - math.Float64bits(new), - ) { - break - } - } - return -} - -// Cas executes the compare-and-swap operation for value. -func (v *Float64) Cas(old, new float64) (swapped bool) { - return atomic.CompareAndSwapUint64(&v.value, math.Float64bits(old), math.Float64bits(new)) -} - -// String implements String interface for string printing. -func (v *Float64) String() string { - return strconv.FormatFloat(v.Val(), 'g', -1, 64) -} - -// MarshalJSON implements the interface MarshalJSON for json.Marshal. -func (v Float64) MarshalJSON() ([]byte, error) { - return []byte(strconv.FormatFloat(v.Val(), 'g', -1, 64)), nil -} - -// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. -func (v *Float64) UnmarshalJSON(b []byte) error { - v.Set(gconv.Float64(string(b))) - return nil -} - -// UnmarshalValue is an interface implement which sets any type of value for `v`. -func (v *Float64) UnmarshalValue(value interface{}) error { - v.Set(gconv.Float64(value)) - return nil -} - -// DeepCopy implements interface for deep copy of current type. -func (v *Float64) DeepCopy() interface{} { - if v == nil { - return nil - } - return NewFloat64(v.Val()) -} diff --git a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_int.go b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_int.go deleted file mode 100644 index 32a610fb..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_int.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gtype - -import ( - "strconv" - "sync/atomic" - - "github.com/gogf/gf/v2/util/gconv" -) - -// Int is a struct for concurrent-safe operation for type int. -type Int struct { - value int64 -} - -// NewInt creates and returns a concurrent-safe object for int type, -// with given initial value `value`. -func NewInt(value ...int) *Int { - if len(value) > 0 { - return &Int{ - value: int64(value[0]), - } - } - return &Int{} -} - -// Clone clones and returns a new concurrent-safe object for int type. -func (v *Int) Clone() *Int { - return NewInt(v.Val()) -} - -// Set atomically stores `value` into t.value and returns the previous value of t.value. -func (v *Int) Set(value int) (old int) { - return int(atomic.SwapInt64(&v.value, int64(value))) -} - -// Val atomically loads and returns t.value. -func (v *Int) Val() int { - return int(atomic.LoadInt64(&v.value)) -} - -// Add atomically adds `delta` to t.value and returns the new value. -func (v *Int) Add(delta int) (new int) { - return int(atomic.AddInt64(&v.value, int64(delta))) -} - -// Cas executes the compare-and-swap operation for value. -func (v *Int) Cas(old, new int) (swapped bool) { - return atomic.CompareAndSwapInt64(&v.value, int64(old), int64(new)) -} - -// String implements String interface for string printing. -func (v *Int) String() string { - return strconv.Itoa(v.Val()) -} - -// MarshalJSON implements the interface MarshalJSON for json.Marshal. -func (v Int) MarshalJSON() ([]byte, error) { - return []byte(strconv.Itoa(v.Val())), nil -} - -// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. -func (v *Int) UnmarshalJSON(b []byte) error { - v.Set(gconv.Int(string(b))) - return nil -} - -// UnmarshalValue is an interface implement which sets any type of value for `v`. -func (v *Int) UnmarshalValue(value interface{}) error { - v.Set(gconv.Int(value)) - return nil -} - -// DeepCopy implements interface for deep copy of current type. -func (v *Int) DeepCopy() interface{} { - if v == nil { - return nil - } - return NewInt(v.Val()) -} diff --git a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_int32.go b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_int32.go deleted file mode 100644 index 58ad36a3..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_int32.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gtype - -import ( - "strconv" - "sync/atomic" - - "github.com/gogf/gf/v2/util/gconv" -) - -// Int32 is a struct for concurrent-safe operation for type int32. -type Int32 struct { - value int32 -} - -// NewInt32 creates and returns a concurrent-safe object for int32 type, -// with given initial value `value`. -func NewInt32(value ...int32) *Int32 { - if len(value) > 0 { - return &Int32{ - value: value[0], - } - } - return &Int32{} -} - -// Clone clones and returns a new concurrent-safe object for int32 type. -func (v *Int32) Clone() *Int32 { - return NewInt32(v.Val()) -} - -// Set atomically stores `value` into t.value and returns the previous value of t.value. -func (v *Int32) Set(value int32) (old int32) { - return atomic.SwapInt32(&v.value, value) -} - -// Val atomically loads and returns t.value. -func (v *Int32) Val() int32 { - return atomic.LoadInt32(&v.value) -} - -// Add atomically adds `delta` to t.value and returns the new value. -func (v *Int32) Add(delta int32) (new int32) { - return atomic.AddInt32(&v.value, delta) -} - -// Cas executes the compare-and-swap operation for value. -func (v *Int32) Cas(old, new int32) (swapped bool) { - return atomic.CompareAndSwapInt32(&v.value, old, new) -} - -// String implements String interface for string printing. -func (v *Int32) String() string { - return strconv.Itoa(int(v.Val())) -} - -// MarshalJSON implements the interface MarshalJSON for json.Marshal. -func (v Int32) MarshalJSON() ([]byte, error) { - return []byte(strconv.Itoa(int(v.Val()))), nil -} - -// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. -func (v *Int32) UnmarshalJSON(b []byte) error { - v.Set(gconv.Int32(string(b))) - return nil -} - -// UnmarshalValue is an interface implement which sets any type of value for `v`. -func (v *Int32) UnmarshalValue(value interface{}) error { - v.Set(gconv.Int32(value)) - return nil -} - -// DeepCopy implements interface for deep copy of current type. -func (v *Int32) DeepCopy() interface{} { - if v == nil { - return nil - } - return NewInt32(v.Val()) -} diff --git a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_int64.go b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_int64.go deleted file mode 100644 index 54e72c12..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_int64.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gtype - -import ( - "strconv" - "sync/atomic" - - "github.com/gogf/gf/v2/util/gconv" -) - -// Int64 is a struct for concurrent-safe operation for type int64. -type Int64 struct { - value int64 -} - -// NewInt64 creates and returns a concurrent-safe object for int64 type, -// with given initial value `value`. -func NewInt64(value ...int64) *Int64 { - if len(value) > 0 { - return &Int64{ - value: value[0], - } - } - return &Int64{} -} - -// Clone clones and returns a new concurrent-safe object for int64 type. -func (v *Int64) Clone() *Int64 { - return NewInt64(v.Val()) -} - -// Set atomically stores `value` into t.value and returns the previous value of t.value. -func (v *Int64) Set(value int64) (old int64) { - return atomic.SwapInt64(&v.value, value) -} - -// Val atomically loads and returns t.value. -func (v *Int64) Val() int64 { - return atomic.LoadInt64(&v.value) -} - -// Add atomically adds `delta` to t.value and returns the new value. -func (v *Int64) Add(delta int64) (new int64) { - return atomic.AddInt64(&v.value, delta) -} - -// Cas executes the compare-and-swap operation for value. -func (v *Int64) Cas(old, new int64) (swapped bool) { - return atomic.CompareAndSwapInt64(&v.value, old, new) -} - -// String implements String interface for string printing. -func (v *Int64) String() string { - return strconv.FormatInt(v.Val(), 10) -} - -// MarshalJSON implements the interface MarshalJSON for json.Marshal. -func (v Int64) MarshalJSON() ([]byte, error) { - return []byte(strconv.FormatInt(v.Val(), 10)), nil -} - -// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. -func (v *Int64) UnmarshalJSON(b []byte) error { - v.Set(gconv.Int64(string(b))) - return nil -} - -// UnmarshalValue is an interface implement which sets any type of value for `v`. -func (v *Int64) UnmarshalValue(value interface{}) error { - v.Set(gconv.Int64(value)) - return nil -} - -// DeepCopy implements interface for deep copy of current type. -func (v *Int64) DeepCopy() interface{} { - if v == nil { - return nil - } - return NewInt64(v.Val()) -} diff --git a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_interface.go b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_interface.go deleted file mode 100644 index 9e57abb6..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_interface.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gtype - -import ( - "sync/atomic" - - "github.com/gogf/gf/v2/internal/deepcopy" - "github.com/gogf/gf/v2/internal/json" - "github.com/gogf/gf/v2/util/gconv" -) - -// Interface is a struct for concurrent-safe operation for type interface{}. -type Interface struct { - value atomic.Value -} - -// NewInterface creates and returns a concurrent-safe object for interface{} type, -// with given initial value `value`. -func NewInterface(value ...interface{}) *Interface { - t := &Interface{} - if len(value) > 0 && value[0] != nil { - t.value.Store(value[0]) - } - return t -} - -// Clone clones and returns a new concurrent-safe object for interface{} type. -func (v *Interface) Clone() *Interface { - return NewInterface(v.Val()) -} - -// Set atomically stores `value` into t.value and returns the previous value of t.value. -// Note: The parameter `value` cannot be nil. -func (v *Interface) Set(value interface{}) (old interface{}) { - old = v.Val() - v.value.Store(value) - return -} - -// Val atomically loads and returns t.value. -func (v *Interface) Val() interface{} { - return v.value.Load() -} - -// String implements String interface for string printing. -func (v *Interface) String() string { - return gconv.String(v.Val()) -} - -// MarshalJSON implements the interface MarshalJSON for json.Marshal. -func (v Interface) MarshalJSON() ([]byte, error) { - return json.Marshal(v.Val()) -} - -// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. -func (v *Interface) UnmarshalJSON(b []byte) error { - var i interface{} - if err := json.UnmarshalUseNumber(b, &i); err != nil { - return err - } - v.Set(i) - return nil -} - -// UnmarshalValue is an interface implement which sets any type of value for `v`. -func (v *Interface) UnmarshalValue(value interface{}) error { - v.Set(value) - return nil -} - -// DeepCopy implements interface for deep copy of current type. -func (v *Interface) DeepCopy() interface{} { - if v == nil { - return nil - } - return NewInterface(deepcopy.Copy(v.Val())) -} diff --git a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_string.go b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_string.go deleted file mode 100644 index b753e0fa..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_string.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gtype - -import ( - "bytes" - "sync/atomic" - - "github.com/gogf/gf/v2/util/gconv" -) - -// String is a struct for concurrent-safe operation for type string. -type String struct { - value atomic.Value -} - -// NewString creates and returns a concurrent-safe object for string type, -// with given initial value `value`. -func NewString(value ...string) *String { - t := &String{} - if len(value) > 0 { - t.value.Store(value[0]) - } - return t -} - -// Clone clones and returns a new concurrent-safe object for string type. -func (v *String) Clone() *String { - return NewString(v.Val()) -} - -// Set atomically stores `value` into t.value and returns the previous value of t.value. -func (v *String) Set(value string) (old string) { - old = v.Val() - v.value.Store(value) - return -} - -// Val atomically loads and returns t.value. -func (v *String) Val() string { - s := v.value.Load() - if s != nil { - return s.(string) - } - return "" -} - -// String implements String interface for string printing. -func (v *String) String() string { - return v.Val() -} - -// MarshalJSON implements the interface MarshalJSON for json.Marshal. -func (v String) MarshalJSON() ([]byte, error) { - return []byte(`"` + v.Val() + `"`), nil -} - -// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. -func (v *String) UnmarshalJSON(b []byte) error { - v.Set(string(bytes.Trim(b, `"`))) - return nil -} - -// UnmarshalValue is an interface implement which sets any type of value for `v`. -func (v *String) UnmarshalValue(value interface{}) error { - v.Set(gconv.String(value)) - return nil -} - -// DeepCopy implements interface for deep copy of current type. -func (v *String) DeepCopy() interface{} { - if v == nil { - return nil - } - return NewString(v.Val()) -} diff --git a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_uint.go b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_uint.go deleted file mode 100644 index fa00f472..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_uint.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gtype - -import ( - "strconv" - "sync/atomic" - - "github.com/gogf/gf/v2/util/gconv" -) - -// Uint is a struct for concurrent-safe operation for type uint. -type Uint struct { - value uint64 -} - -// NewUint creates and returns a concurrent-safe object for uint type, -// with given initial value `value`. -func NewUint(value ...uint) *Uint { - if len(value) > 0 { - return &Uint{ - value: uint64(value[0]), - } - } - return &Uint{} -} - -// Clone clones and returns a new concurrent-safe object for uint type. -func (v *Uint) Clone() *Uint { - return NewUint(v.Val()) -} - -// Set atomically stores `value` into t.value and returns the previous value of t.value. -func (v *Uint) Set(value uint) (old uint) { - return uint(atomic.SwapUint64(&v.value, uint64(value))) -} - -// Val atomically loads and returns t.value. -func (v *Uint) Val() uint { - return uint(atomic.LoadUint64(&v.value)) -} - -// Add atomically adds `delta` to t.value and returns the new value. -func (v *Uint) Add(delta uint) (new uint) { - return uint(atomic.AddUint64(&v.value, uint64(delta))) -} - -// Cas executes the compare-and-swap operation for value. -func (v *Uint) Cas(old, new uint) (swapped bool) { - return atomic.CompareAndSwapUint64(&v.value, uint64(old), uint64(new)) -} - -// String implements String interface for string printing. -func (v *Uint) String() string { - return strconv.FormatUint(uint64(v.Val()), 10) -} - -// MarshalJSON implements the interface MarshalJSON for json.Marshal. -func (v Uint) MarshalJSON() ([]byte, error) { - return []byte(strconv.FormatUint(uint64(v.Val()), 10)), nil -} - -// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. -func (v *Uint) UnmarshalJSON(b []byte) error { - v.Set(gconv.Uint(string(b))) - return nil -} - -// UnmarshalValue is an interface implement which sets any type of value for `v`. -func (v *Uint) UnmarshalValue(value interface{}) error { - v.Set(gconv.Uint(value)) - return nil -} - -// DeepCopy implements interface for deep copy of current type. -func (v *Uint) DeepCopy() interface{} { - if v == nil { - return nil - } - return NewUint(v.Val()) -} diff --git a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_uint32.go b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_uint32.go deleted file mode 100644 index 58df2e2c..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_uint32.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gtype - -import ( - "strconv" - "sync/atomic" - - "github.com/gogf/gf/v2/util/gconv" -) - -// Uint32 is a struct for concurrent-safe operation for type uint32. -type Uint32 struct { - value uint32 -} - -// NewUint32 creates and returns a concurrent-safe object for uint32 type, -// with given initial value `value`. -func NewUint32(value ...uint32) *Uint32 { - if len(value) > 0 { - return &Uint32{ - value: value[0], - } - } - return &Uint32{} -} - -// Clone clones and returns a new concurrent-safe object for uint32 type. -func (v *Uint32) Clone() *Uint32 { - return NewUint32(v.Val()) -} - -// Set atomically stores `value` into t.value and returns the previous value of t.value. -func (v *Uint32) Set(value uint32) (old uint32) { - return atomic.SwapUint32(&v.value, value) -} - -// Val atomically loads and returns t.value. -func (v *Uint32) Val() uint32 { - return atomic.LoadUint32(&v.value) -} - -// Add atomically adds `delta` to t.value and returns the new value. -func (v *Uint32) Add(delta uint32) (new uint32) { - return atomic.AddUint32(&v.value, delta) -} - -// Cas executes the compare-and-swap operation for value. -func (v *Uint32) Cas(old, new uint32) (swapped bool) { - return atomic.CompareAndSwapUint32(&v.value, old, new) -} - -// String implements String interface for string printing. -func (v *Uint32) String() string { - return strconv.FormatUint(uint64(v.Val()), 10) -} - -// MarshalJSON implements the interface MarshalJSON for json.Marshal. -func (v Uint32) MarshalJSON() ([]byte, error) { - return []byte(strconv.FormatUint(uint64(v.Val()), 10)), nil -} - -// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. -func (v *Uint32) UnmarshalJSON(b []byte) error { - v.Set(gconv.Uint32(string(b))) - return nil -} - -// UnmarshalValue is an interface implement which sets any type of value for `v`. -func (v *Uint32) UnmarshalValue(value interface{}) error { - v.Set(gconv.Uint32(value)) - return nil -} - -// DeepCopy implements interface for deep copy of current type. -func (v *Uint32) DeepCopy() interface{} { - if v == nil { - return nil - } - return NewUint32(v.Val()) -} diff --git a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_uint64.go b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_uint64.go deleted file mode 100644 index 3b54eb63..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_uint64.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gtype - -import ( - "strconv" - "sync/atomic" - - "github.com/gogf/gf/v2/util/gconv" -) - -// Uint64 is a struct for concurrent-safe operation for type uint64. -type Uint64 struct { - value uint64 -} - -// NewUint64 creates and returns a concurrent-safe object for uint64 type, -// with given initial value `value`. -func NewUint64(value ...uint64) *Uint64 { - if len(value) > 0 { - return &Uint64{ - value: value[0], - } - } - return &Uint64{} -} - -// Clone clones and returns a new concurrent-safe object for uint64 type. -func (v *Uint64) Clone() *Uint64 { - return NewUint64(v.Val()) -} - -// Set atomically stores `value` into t.value and returns the previous value of t.value. -func (v *Uint64) Set(value uint64) (old uint64) { - return atomic.SwapUint64(&v.value, value) -} - -// Val atomically loads and returns t.value. -func (v *Uint64) Val() uint64 { - return atomic.LoadUint64(&v.value) -} - -// Add atomically adds `delta` to t.value and returns the new value. -func (v *Uint64) Add(delta uint64) (new uint64) { - return atomic.AddUint64(&v.value, delta) -} - -// Cas executes the compare-and-swap operation for value. -func (v *Uint64) Cas(old, new uint64) (swapped bool) { - return atomic.CompareAndSwapUint64(&v.value, old, new) -} - -// String implements String interface for string printing. -func (v *Uint64) String() string { - return strconv.FormatUint(v.Val(), 10) -} - -// MarshalJSON implements the interface MarshalJSON for json.Marshal. -func (v Uint64) MarshalJSON() ([]byte, error) { - return []byte(strconv.FormatUint(v.Val(), 10)), nil -} - -// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. -func (v *Uint64) UnmarshalJSON(b []byte) error { - v.Set(gconv.Uint64(string(b))) - return nil -} - -// UnmarshalValue is an interface implement which sets any type of value for `v`. -func (v *Uint64) UnmarshalValue(value interface{}) error { - v.Set(gconv.Uint64(value)) - return nil -} - -// DeepCopy implements interface for deep copy of current type. -func (v *Uint64) DeepCopy() interface{} { - if v == nil { - return nil - } - return NewUint64(v.Val()) -} diff --git a/vendor/github.com/gogf/gf/v2/container/gvar/gvar.go b/vendor/github.com/gogf/gf/v2/container/gvar/gvar.go deleted file mode 100644 index 3b77e78a..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gvar/gvar.go +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package gvar provides an universal variable type, like generics. -package gvar - -import ( - "time" - - "github.com/gogf/gf/v2/container/gtype" - "github.com/gogf/gf/v2/internal/deepcopy" - "github.com/gogf/gf/v2/internal/json" - "github.com/gogf/gf/v2/os/gtime" - "github.com/gogf/gf/v2/util/gconv" - "github.com/gogf/gf/v2/util/gutil" -) - -// Var is an universal variable type implementer. -type Var struct { - value interface{} // Underlying value. - safe bool // Concurrent safe or not. -} - -// New creates and returns a new Var with given `value`. -// The optional parameter `safe` specifies whether Var is used in concurrent-safety, -// which is false in default. -func New(value interface{}, safe ...bool) *Var { - if len(safe) > 0 && safe[0] { - return &Var{ - value: gtype.NewInterface(value), - safe: true, - } - } - return &Var{ - value: value, - } -} - -// Copy does a deep copy of current Var and returns a pointer to this Var. -func (v *Var) Copy() *Var { - return New(gutil.Copy(v.Val()), v.safe) -} - -// Clone does a shallow copy of current Var and returns a pointer to this Var. -func (v *Var) Clone() *Var { - return New(v.Val(), v.safe) -} - -// Set sets `value` to `v`, and returns the old value. -func (v *Var) Set(value interface{}) (old interface{}) { - if v.safe { - if t, ok := v.value.(*gtype.Interface); ok { - old = t.Set(value) - return - } - } - old = v.value - v.value = value - return -} - -// Val returns the current value of `v`. -func (v *Var) Val() interface{} { - if v == nil { - return nil - } - if v.safe { - if t, ok := v.value.(*gtype.Interface); ok { - return t.Val() - } - } - return v.value -} - -// Interface is alias of Val. -func (v *Var) Interface() interface{} { - return v.Val() -} - -// Bytes converts and returns `v` as []byte. -func (v *Var) Bytes() []byte { - return gconv.Bytes(v.Val()) -} - -// String converts and returns `v` as string. -func (v *Var) String() string { - return gconv.String(v.Val()) -} - -// Bool converts and returns `v` as bool. -func (v *Var) Bool() bool { - return gconv.Bool(v.Val()) -} - -// Int converts and returns `v` as int. -func (v *Var) Int() int { - return gconv.Int(v.Val()) -} - -// Int8 converts and returns `v` as int8. -func (v *Var) Int8() int8 { - return gconv.Int8(v.Val()) -} - -// Int16 converts and returns `v` as int16. -func (v *Var) Int16() int16 { - return gconv.Int16(v.Val()) -} - -// Int32 converts and returns `v` as int32. -func (v *Var) Int32() int32 { - return gconv.Int32(v.Val()) -} - -// Int64 converts and returns `v` as int64. -func (v *Var) Int64() int64 { - return gconv.Int64(v.Val()) -} - -// Uint converts and returns `v` as uint. -func (v *Var) Uint() uint { - return gconv.Uint(v.Val()) -} - -// Uint8 converts and returns `v` as uint8. -func (v *Var) Uint8() uint8 { - return gconv.Uint8(v.Val()) -} - -// Uint16 converts and returns `v` as uint16. -func (v *Var) Uint16() uint16 { - return gconv.Uint16(v.Val()) -} - -// Uint32 converts and returns `v` as uint32. -func (v *Var) Uint32() uint32 { - return gconv.Uint32(v.Val()) -} - -// Uint64 converts and returns `v` as uint64. -func (v *Var) Uint64() uint64 { - return gconv.Uint64(v.Val()) -} - -// Float32 converts and returns `v` as float32. -func (v *Var) Float32() float32 { - return gconv.Float32(v.Val()) -} - -// Float64 converts and returns `v` as float64. -func (v *Var) Float64() float64 { - return gconv.Float64(v.Val()) -} - -// Time converts and returns `v` as time.Time. -// The parameter `format` specifies the format of the time string using gtime, -// eg: Y-m-d H:i:s. -func (v *Var) Time(format ...string) time.Time { - return gconv.Time(v.Val(), format...) -} - -// Duration converts and returns `v` as time.Duration. -// If value of `v` is string, then it uses time.ParseDuration for conversion. -func (v *Var) Duration() time.Duration { - return gconv.Duration(v.Val()) -} - -// GTime converts and returns `v` as *gtime.Time. -// The parameter `format` specifies the format of the time string using gtime, -// eg: Y-m-d H:i:s. -func (v *Var) GTime(format ...string) *gtime.Time { - return gconv.GTime(v.Val(), format...) -} - -// MarshalJSON implements the interface MarshalJSON for json.Marshal. -func (v Var) MarshalJSON() ([]byte, error) { - return json.Marshal(v.Val()) -} - -// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. -func (v *Var) UnmarshalJSON(b []byte) error { - var i interface{} - if err := json.UnmarshalUseNumber(b, &i); err != nil { - return err - } - v.Set(i) - return nil -} - -// UnmarshalValue is an interface implement which sets any type of value for Var. -func (v *Var) UnmarshalValue(value interface{}) error { - v.Set(value) - return nil -} - -// DeepCopy implements interface for deep copy of current type. -func (v *Var) DeepCopy() interface{} { - if v == nil { - return nil - } - return New(deepcopy.Copy(v.Val()), v.safe) -} diff --git a/vendor/github.com/gogf/gf/v2/container/gvar/gvar_is.go b/vendor/github.com/gogf/gf/v2/container/gvar/gvar_is.go deleted file mode 100644 index 497996cd..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gvar/gvar_is.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gvar - -import ( - "github.com/gogf/gf/v2/internal/utils" -) - -// IsNil checks whether `v` is nil. -func (v *Var) IsNil() bool { - return utils.IsNil(v.Val()) -} - -// IsEmpty checks whether `v` is empty. -func (v *Var) IsEmpty() bool { - return utils.IsEmpty(v.Val()) -} - -// IsInt checks whether `v` is type of int. -func (v *Var) IsInt() bool { - return utils.IsInt(v.Val()) -} - -// IsUint checks whether `v` is type of uint. -func (v *Var) IsUint() bool { - return utils.IsUint(v.Val()) -} - -// IsFloat checks whether `v` is type of float. -func (v *Var) IsFloat() bool { - return utils.IsFloat(v.Val()) -} - -// IsSlice checks whether `v` is type of slice. -func (v *Var) IsSlice() bool { - return utils.IsSlice(v.Val()) -} - -// IsMap checks whether `v` is type of map. -func (v *Var) IsMap() bool { - return utils.IsMap(v.Val()) -} - -// IsStruct checks whether `v` is type of struct. -func (v *Var) IsStruct() bool { - return utils.IsStruct(v.Val()) -} diff --git a/vendor/github.com/gogf/gf/v2/container/gvar/gvar_list.go b/vendor/github.com/gogf/gf/v2/container/gvar/gvar_list.go deleted file mode 100644 index 1f24bca8..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gvar/gvar_list.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gvar - -import ( - "github.com/gogf/gf/v2/util/gutil" -) - -// ListItemValues retrieves and returns the elements of all item struct/map with key `key`. -// Note that the parameter `list` should be type of slice which contains elements of map or struct, -// or else it returns an empty slice. -func (v *Var) ListItemValues(key interface{}) (values []interface{}) { - return gutil.ListItemValues(v.Val(), key) -} - -// ListItemValuesUnique retrieves and returns the unique elements of all struct/map with key `key`. -// Note that the parameter `list` should be type of slice which contains elements of map or struct, -// or else it returns an empty slice. -func (v *Var) ListItemValuesUnique(key string) []interface{} { - return gutil.ListItemValuesUnique(v.Val(), key) -} diff --git a/vendor/github.com/gogf/gf/v2/container/gvar/gvar_map.go b/vendor/github.com/gogf/gf/v2/container/gvar/gvar_map.go deleted file mode 100644 index 268d9f14..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gvar/gvar_map.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gvar - -import "github.com/gogf/gf/v2/util/gconv" - -// Map converts and returns `v` as map[string]interface{}. -func (v *Var) Map(tags ...string) map[string]interface{} { - return gconv.Map(v.Val(), tags...) -} - -// MapStrAny is like function Map, but implements the interface of MapStrAny. -func (v *Var) MapStrAny() map[string]interface{} { - return v.Map() -} - -// MapStrStr converts and returns `v` as map[string]string. -func (v *Var) MapStrStr(tags ...string) map[string]string { - return gconv.MapStrStr(v.Val(), tags...) -} - -// MapStrVar converts and returns `v` as map[string]Var. -func (v *Var) MapStrVar(tags ...string) map[string]*Var { - m := v.Map(tags...) - if len(m) > 0 { - vMap := make(map[string]*Var, len(m)) - for k, v := range m { - vMap[k] = New(v) - } - return vMap - } - return nil -} - -// MapDeep converts and returns `v` as map[string]interface{} recursively. -func (v *Var) MapDeep(tags ...string) map[string]interface{} { - return gconv.MapDeep(v.Val(), tags...) -} - -// MapStrStrDeep converts and returns `v` as map[string]string recursively. -func (v *Var) MapStrStrDeep(tags ...string) map[string]string { - return gconv.MapStrStrDeep(v.Val(), tags...) -} - -// MapStrVarDeep converts and returns `v` as map[string]*Var recursively. -func (v *Var) MapStrVarDeep(tags ...string) map[string]*Var { - m := v.MapDeep(tags...) - if len(m) > 0 { - vMap := make(map[string]*Var, len(m)) - for k, v := range m { - vMap[k] = New(v) - } - return vMap - } - return nil -} - -// Maps converts and returns `v` as map[string]string. -// See gconv.Maps. -func (v *Var) Maps(tags ...string) []map[string]interface{} { - return gconv.Maps(v.Val(), tags...) -} - -// MapsDeep converts `value` to []map[string]interface{} recursively. -// See gconv.MapsDeep. -func (v *Var) MapsDeep(tags ...string) []map[string]interface{} { - return gconv.MapsDeep(v.Val(), tags...) -} - -// MapToMap converts any map type variable `params` to another map type variable `pointer`. -// See gconv.MapToMap. -func (v *Var) MapToMap(pointer interface{}, mapping ...map[string]string) (err error) { - return gconv.MapToMap(v.Val(), pointer, mapping...) -} - -// MapToMaps converts any map type variable `params` to another map type variable `pointer`. -// See gconv.MapToMaps. -func (v *Var) MapToMaps(pointer interface{}, mapping ...map[string]string) (err error) { - return gconv.MapToMaps(v.Val(), pointer, mapping...) -} - -// MapToMapsDeep converts any map type variable `params` to another map type variable -// `pointer` recursively. -// See gconv.MapToMapsDeep. -func (v *Var) MapToMapsDeep(pointer interface{}, mapping ...map[string]string) (err error) { - return gconv.MapToMaps(v.Val(), pointer, mapping...) -} diff --git a/vendor/github.com/gogf/gf/v2/container/gvar/gvar_scan.go b/vendor/github.com/gogf/gf/v2/container/gvar/gvar_scan.go deleted file mode 100644 index 469005b5..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gvar/gvar_scan.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gvar - -import ( - "github.com/gogf/gf/v2/util/gconv" -) - -// Scan automatically checks the type of `pointer` and converts `params` to `pointer`. It supports `pointer` -// with type of `*map/*[]map/*[]*map/*struct/**struct/*[]struct/*[]*struct` for converting. -// -// See gconv.Scan. -func (v *Var) Scan(pointer interface{}, mapping ...map[string]string) error { - return gconv.Scan(v.Val(), pointer, mapping...) -} diff --git a/vendor/github.com/gogf/gf/v2/container/gvar/gvar_slice.go b/vendor/github.com/gogf/gf/v2/container/gvar/gvar_slice.go deleted file mode 100644 index 02a61aa2..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gvar/gvar_slice.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gvar - -import "github.com/gogf/gf/v2/util/gconv" - -// Ints converts and returns `v` as []int. -func (v *Var) Ints() []int { - return gconv.Ints(v.Val()) -} - -// Int64s converts and returns `v` as []int64. -func (v *Var) Int64s() []int64 { - return gconv.Int64s(v.Val()) -} - -// Uints converts and returns `v` as []uint. -func (v *Var) Uints() []uint { - return gconv.Uints(v.Val()) -} - -// Uint64s converts and returns `v` as []uint64. -func (v *Var) Uint64s() []uint64 { - return gconv.Uint64s(v.Val()) -} - -// Floats is alias of Float64s. -func (v *Var) Floats() []float64 { - return gconv.Floats(v.Val()) -} - -// Float32s converts and returns `v` as []float32. -func (v *Var) Float32s() []float32 { - return gconv.Float32s(v.Val()) -} - -// Float64s converts and returns `v` as []float64. -func (v *Var) Float64s() []float64 { - return gconv.Float64s(v.Val()) -} - -// Strings converts and returns `v` as []string. -func (v *Var) Strings() []string { - return gconv.Strings(v.Val()) -} - -// Interfaces converts and returns `v` as []interfaces{}. -func (v *Var) Interfaces() []interface{} { - return gconv.Interfaces(v.Val()) -} - -// Slice is alias of Interfaces. -func (v *Var) Slice() []interface{} { - return v.Interfaces() -} - -// Array is alias of Interfaces. -func (v *Var) Array() []interface{} { - return v.Interfaces() -} - -// Vars converts and returns `v` as []Var. -func (v *Var) Vars() []*Var { - array := gconv.Interfaces(v.Val()) - if len(array) == 0 { - return nil - } - vars := make([]*Var, len(array)) - for k, v := range array { - vars[k] = New(v) - } - return vars -} diff --git a/vendor/github.com/gogf/gf/v2/container/gvar/gvar_struct.go b/vendor/github.com/gogf/gf/v2/container/gvar/gvar_struct.go deleted file mode 100644 index 30ca794b..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gvar/gvar_struct.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gvar - -import ( - "github.com/gogf/gf/v2/util/gconv" -) - -// Struct maps value of `v` to `pointer`. -// The parameter `pointer` should be a pointer to a struct instance. -// The parameter `mapping` is used to specify the key-to-attribute mapping rules. -func (v *Var) Struct(pointer interface{}, mapping ...map[string]string) error { - return gconv.Struct(v.Val(), pointer, mapping...) -} - -// Structs converts and returns `v` as given struct slice. -func (v *Var) Structs(pointer interface{}, mapping ...map[string]string) error { - return gconv.Structs(v.Val(), pointer, mapping...) -} diff --git a/vendor/github.com/gogf/gf/v2/container/gvar/gvar_vars.go b/vendor/github.com/gogf/gf/v2/container/gvar/gvar_vars.go deleted file mode 100644 index f566a782..00000000 --- a/vendor/github.com/gogf/gf/v2/container/gvar/gvar_vars.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gvar - -import ( - "github.com/gogf/gf/v2/util/gconv" -) - -// Vars is a slice of *Var. -type Vars []*Var - -// Strings converts and returns `vs` as []string. -func (vs Vars) Strings() (s []string) { - for _, v := range vs { - s = append(s, v.String()) - } - return s -} - -// Interfaces converts and returns `vs` as []interface{}. -func (vs Vars) Interfaces() (s []interface{}) { - for _, v := range vs { - s = append(s, v.Val()) - } - return s -} - -// Float32s converts and returns `vs` as []float32. -func (vs Vars) Float32s() (s []float32) { - for _, v := range vs { - s = append(s, v.Float32()) - } - return s -} - -// Float64s converts and returns `vs` as []float64. -func (vs Vars) Float64s() (s []float64) { - for _, v := range vs { - s = append(s, v.Float64()) - } - return s -} - -// Ints converts and returns `vs` as []Int. -func (vs Vars) Ints() (s []int) { - for _, v := range vs { - s = append(s, v.Int()) - } - return s -} - -// Int8s converts and returns `vs` as []int8. -func (vs Vars) Int8s() (s []int8) { - for _, v := range vs { - s = append(s, v.Int8()) - } - return s -} - -// Int16s converts and returns `vs` as []int16. -func (vs Vars) Int16s() (s []int16) { - for _, v := range vs { - s = append(s, v.Int16()) - } - return s -} - -// Int32s converts and returns `vs` as []int32. -func (vs Vars) Int32s() (s []int32) { - for _, v := range vs { - s = append(s, v.Int32()) - } - return s -} - -// Int64s converts and returns `vs` as []int64. -func (vs Vars) Int64s() (s []int64) { - for _, v := range vs { - s = append(s, v.Int64()) - } - return s -} - -// Uints converts and returns `vs` as []uint. -func (vs Vars) Uints() (s []uint) { - for _, v := range vs { - s = append(s, v.Uint()) - } - return s -} - -// Uint8s converts and returns `vs` as []uint8. -func (vs Vars) Uint8s() (s []uint8) { - for _, v := range vs { - s = append(s, v.Uint8()) - } - return s -} - -// Uint16s converts and returns `vs` as []uint16. -func (vs Vars) Uint16s() (s []uint16) { - for _, v := range vs { - s = append(s, v.Uint16()) - } - return s -} - -// Uint32s converts and returns `vs` as []uint32. -func (vs Vars) Uint32s() (s []uint32) { - for _, v := range vs { - s = append(s, v.Uint32()) - } - return s -} - -// Uint64s converts and returns `vs` as []uint64. -func (vs Vars) Uint64s() (s []uint64) { - for _, v := range vs { - s = append(s, v.Uint64()) - } - return s -} - -// Scan converts `vs` to []struct/[]*struct. -func (vs Vars) Scan(pointer interface{}, mapping ...map[string]string) error { - return gconv.Structs(vs.Interfaces(), pointer, mapping...) -} diff --git a/vendor/github.com/gogf/gf/v2/database/gredis/gredis.go b/vendor/github.com/gogf/gf/v2/database/gredis/gredis.go deleted file mode 100644 index 3f8f5dc2..00000000 --- a/vendor/github.com/gogf/gf/v2/database/gredis/gredis.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package gredis provides convenient client for redis server. -// -// Redis Client. -// -// Redis Commands Official: https://redis.io/commands -// -// Redis Chinese Documentation: http://redisdoc.com/ -package gredis - -import ( - "github.com/gogf/gf/v2/errors/gcode" - "github.com/gogf/gf/v2/errors/gerror" -) - -// AdapterFunc is the function creating redis adapter. -type AdapterFunc func(config *Config) Adapter - -var ( - // defaultAdapterFunc is the default adapter function creating redis adapter. - defaultAdapterFunc AdapterFunc = func(config *Config) Adapter { - return nil - } -) - -// New creates and returns a redis client. -// It creates a default redis adapter of go-redis. -func New(config ...*Config) (*Redis, error) { - var ( - usedConfig *Config - usedAdapter Adapter - ) - if len(config) > 0 && config[0] != nil { - // Redis client with go redis implements adapter from given configuration. - usedConfig = config[0] - usedAdapter = defaultAdapterFunc(config[0]) - } else if configFromGlobal, ok := GetConfig(); ok { - // Redis client with go redis implements adapter from package configuration. - usedConfig = configFromGlobal - usedAdapter = defaultAdapterFunc(configFromGlobal) - } - if usedConfig == nil { - return nil, gerror.NewCode( - gcode.CodeInvalidConfiguration, - `no configuration found for creating Redis client`, - ) - } - if usedAdapter == nil { - return nil, gerror.NewCode( - gcode.CodeNecessaryPackageNotImport, - errorNilAdapter, - ) - } - redis := &Redis{ - config: usedConfig, - localAdapter: usedAdapter, - } - return redis.initGroup(), nil -} - -// NewWithAdapter creates and returns a redis client with given adapter. -func NewWithAdapter(adapter Adapter) (*Redis, error) { - if adapter == nil { - return nil, gerror.NewCodef(gcode.CodeInvalidParameter, `adapter cannot be nil`) - } - redis := &Redis{localAdapter: adapter} - return redis.initGroup(), nil -} - -// RegisterAdapterFunc registers default function creating redis adapter. -func RegisterAdapterFunc(adapterFunc AdapterFunc) { - defaultAdapterFunc = adapterFunc -} diff --git a/vendor/github.com/gogf/gf/v2/database/gredis/gredis_adapter.go b/vendor/github.com/gogf/gf/v2/database/gredis/gredis_adapter.go deleted file mode 100644 index ad63bdbe..00000000 --- a/vendor/github.com/gogf/gf/v2/database/gredis/gredis_adapter.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gredis - -import ( - "context" - - "github.com/gogf/gf/v2/container/gvar" -) - -// Adapter is an interface for universal redis operations. -type Adapter interface { - AdapterGroup - - // Do send a command to the server and returns the received reply. - // It uses json.Marshal for struct/slice/map type values before committing them to redis. - Do(ctx context.Context, command string, args ...interface{}) (*gvar.Var, error) - - // Conn retrieves and returns a connection object for continuous operations. - // Note that you should call Close function manually if you do not use this connection any further. - Conn(ctx context.Context) (conn Conn, err error) - - // Close closes current redis client, closes its connection pool and releases all its related resources. - Close(ctx context.Context) (err error) -} - -// Conn is an interface of a connection from universal redis client. -type Conn interface { - ConnCommand - - // Do send a command to the server and returns the received reply. - // It uses json.Marshal for struct/slice/map type values before committing them to redis. - Do(ctx context.Context, command string, args ...interface{}) (result *gvar.Var, err error) - - // Close puts the connection back to connection pool. - Close(ctx context.Context) (err error) -} - -// AdapterGroup is an interface managing group operations for redis. -type AdapterGroup interface { - GroupGeneric() IGroupGeneric - GroupHash() IGroupHash - GroupList() IGroupList - GroupPubSub() IGroupPubSub - GroupScript() IGroupScript - GroupSet() IGroupSet - GroupSortedSet() IGroupSortedSet - GroupString() IGroupString -} - -// ConnCommand is an interface managing some operations bound to certain connection. -type ConnCommand interface { - // Subscribe subscribes the client to the specified channels. - // https://redis.io/commands/subscribe/ - Subscribe(ctx context.Context, channel string, channels ...string) ([]*Subscription, error) - - // PSubscribe subscribes the client to the given patterns. - // - // Supported glob-style patterns: - // - h?llo subscribes to hello, hallo and hxllo - // - h*llo subscribes to hllo and heeeello - // - h[ae]llo subscribes to hello and hallo, but not hillo - // - // Use \ to escape special characters if you want to match them verbatim. - // - // https://redis.io/commands/psubscribe/ - PSubscribe(ctx context.Context, pattern string, patterns ...string) ([]*Subscription, error) - - // ReceiveMessage receives a single message of subscription from the Redis server. - ReceiveMessage(ctx context.Context) (*Message, error) - - // Receive receives a single reply as gvar.Var from the Redis server. - Receive(ctx context.Context) (result *gvar.Var, err error) -} diff --git a/vendor/github.com/gogf/gf/v2/database/gredis/gredis_config.go b/vendor/github.com/gogf/gf/v2/database/gredis/gredis_config.go deleted file mode 100644 index c743197d..00000000 --- a/vendor/github.com/gogf/gf/v2/database/gredis/gredis_config.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gredis - -import ( - "context" - "crypto/tls" - "time" - - "github.com/gogf/gf/v2/container/gmap" - "github.com/gogf/gf/v2/errors/gcode" - "github.com/gogf/gf/v2/errors/gerror" - "github.com/gogf/gf/v2/internal/intlog" - "github.com/gogf/gf/v2/util/gconv" -) - -// Config is redis configuration. -type Config struct { - // Address It supports single and cluster redis server. Multiple addresses joined with char ','. Eg: 192.168.1.1:6379, 192.168.1.2:6379. - Address string `json:"address"` - Db int `json:"db"` // Redis db. - User string `json:"user"` // Username for AUTH. - Pass string `json:"pass"` // Password for AUTH. - MinIdle int `json:"minIdle"` // Minimum number of connections allowed to be idle (default is 0) - MaxIdle int `json:"maxIdle"` // Maximum number of connections allowed to be idle (default is 10) - MaxActive int `json:"maxActive"` // Maximum number of connections limit (default is 0 means no limit). - MaxConnLifetime time.Duration `json:"maxConnLifetime"` // Maximum lifetime of the connection (default is 30 seconds, not allowed to be set to 0) - IdleTimeout time.Duration `json:"idleTimeout"` // Maximum idle time for connection (default is 10 seconds, not allowed to be set to 0) - WaitTimeout time.Duration `json:"waitTimeout"` // Timed out duration waiting to get a connection from the connection pool. - DialTimeout time.Duration `json:"dialTimeout"` // Dial connection timeout for TCP. - ReadTimeout time.Duration `json:"readTimeout"` // Read timeout for TCP. DO NOT set it if not necessary. - WriteTimeout time.Duration `json:"writeTimeout"` // Write timeout for TCP. - MasterName string `json:"masterName"` // Used in Redis Sentinel mode. - TLS bool `json:"tls"` // Specifies whether TLS should be used when connecting to the server. - TLSSkipVerify bool `json:"tlsSkipVerify"` // Disables server name verification when connecting over TLS. - TLSConfig *tls.Config `json:"-"` // TLS Config to use. When set TLS will be negotiated. - SlaveOnly bool `json:"slaveOnly"` // Route all commands to slave read-only nodes. -} - -const ( - DefaultGroupName = "default" // Default configuration group name. -) - -var ( - // Configuration groups. - localConfigMap = gmap.NewStrAnyMap(true) -) - -// SetConfig sets the global configuration for specified group. -// If `name` is not passed, it sets configuration for the default group name. -func SetConfig(config *Config, name ...string) { - group := DefaultGroupName - if len(name) > 0 { - group = name[0] - } - localConfigMap.Set(group, config) - - intlog.Printf(context.TODO(), `SetConfig for group "%s": %+v`, group, config) -} - -// SetConfigByMap sets the global configuration for specified group with map. -// If `name` is not passed, it sets configuration for the default group name. -func SetConfigByMap(m map[string]interface{}, name ...string) error { - group := DefaultGroupName - if len(name) > 0 { - group = name[0] - } - config, err := ConfigFromMap(m) - if err != nil { - return err - } - localConfigMap.Set(group, config) - return nil -} - -// ConfigFromMap parses and returns config from given map. -func ConfigFromMap(m map[string]interface{}) (config *Config, err error) { - config = &Config{} - if err = gconv.Scan(m, config); err != nil { - err = gerror.NewCodef(gcode.CodeInvalidConfiguration, `invalid redis configuration: %#v`, m) - } - if config.DialTimeout < time.Second { - config.DialTimeout = config.DialTimeout * time.Second - } - if config.WaitTimeout < time.Second { - config.WaitTimeout = config.WaitTimeout * time.Second - } - if config.WriteTimeout < time.Second { - config.WriteTimeout = config.WriteTimeout * time.Second - } - if config.ReadTimeout < time.Second { - config.ReadTimeout = config.ReadTimeout * time.Second - } - if config.IdleTimeout < time.Second { - config.IdleTimeout = config.IdleTimeout * time.Second - } - if config.MaxConnLifetime < time.Second { - config.MaxConnLifetime = config.MaxConnLifetime * time.Second - } - return -} - -// GetConfig returns the global configuration with specified group name. -// If `name` is not passed, it returns configuration of the default group name. -func GetConfig(name ...string) (config *Config, ok bool) { - group := DefaultGroupName - if len(name) > 0 { - group = name[0] - } - if v := localConfigMap.Get(group); v != nil { - return v.(*Config), true - } - return &Config{}, false -} - -// RemoveConfig removes the global configuration with specified group. -// If `name` is not passed, it removes configuration of the default group name. -func RemoveConfig(name ...string) { - group := DefaultGroupName - if len(name) > 0 { - group = name[0] - } - localConfigMap.Remove(group) - - intlog.Printf(context.TODO(), `RemoveConfig: %s`, group) -} - -// ClearConfig removes all configurations of redis. -func ClearConfig() { - localConfigMap.Clear() -} diff --git a/vendor/github.com/gogf/gf/v2/database/gredis/gredis_instance.go b/vendor/github.com/gogf/gf/v2/database/gredis/gredis_instance.go deleted file mode 100644 index 2805d557..00000000 --- a/vendor/github.com/gogf/gf/v2/database/gredis/gredis_instance.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gredis - -import ( - "context" - - "github.com/gogf/gf/v2/container/gmap" - "github.com/gogf/gf/v2/internal/intlog" -) - -var ( - // localInstances for instance management of redis client. - localInstances = gmap.NewStrAnyMap(true) -) - -// Instance returns an instance of redis client with specified group. -// The `name` param is unnecessary, if `name` is not passed, -// it returns a redis instance with default configuration group. -func Instance(name ...string) *Redis { - group := DefaultGroupName - if len(name) > 0 && name[0] != "" { - group = name[0] - } - v := localInstances.GetOrSetFuncLock(group, func() interface{} { - if config, ok := GetConfig(group); ok { - r, err := New(config) - if err != nil { - intlog.Errorf(context.TODO(), `%+v`, err) - return nil - } - return r - } - return nil - }) - if v != nil { - return v.(*Redis) - } - return nil -} diff --git a/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis.go b/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis.go deleted file mode 100644 index 04d72b47..00000000 --- a/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gredis - -import ( - "context" - - "github.com/gogf/gf/v2/container/gvar" - "github.com/gogf/gf/v2/errors/gcode" - "github.com/gogf/gf/v2/errors/gerror" - "github.com/gogf/gf/v2/text/gstr" -) - -// Redis client. -type Redis struct { - config *Config - localAdapter - localGroup -} - -type ( - localGroup struct { - localGroupGeneric - localGroupHash - localGroupList - localGroupPubSub - localGroupScript - localGroupSet - localGroupSortedSet - localGroupString - } - localAdapter = Adapter - localGroupGeneric = IGroupGeneric - localGroupHash = IGroupHash - localGroupList = IGroupList - localGroupPubSub = IGroupPubSub - localGroupScript = IGroupScript - localGroupSet = IGroupSet - localGroupSortedSet = IGroupSortedSet - localGroupString = IGroupString -) - -const ( - errorNilRedis = `the Redis object is nil` -) - -var ( - errorNilAdapter = gstr.Trim(gstr.Replace(` -redis adapter is not set, missing configuration or adapter register? -possible reference: https://github.com/gogf/gf/tree/master/contrib/nosql/redis -`, "\n", "")) -) - -// initGroup initializes the group object of redis. -func (r *Redis) initGroup() *Redis { - r.localGroup = localGroup{ - localGroupGeneric: r.localAdapter.GroupGeneric(), - localGroupHash: r.localAdapter.GroupHash(), - localGroupList: r.localAdapter.GroupList(), - localGroupPubSub: r.localAdapter.GroupPubSub(), - localGroupScript: r.localAdapter.GroupScript(), - localGroupSet: r.localAdapter.GroupSet(), - localGroupSortedSet: r.localAdapter.GroupSortedSet(), - localGroupString: r.localAdapter.GroupString(), - } - return r -} - -// SetAdapter changes the underlying adapter with custom adapter for current redis client. -func (r *Redis) SetAdapter(adapter Adapter) { - if r == nil { - panic(gerror.NewCode(gcode.CodeInvalidParameter, errorNilRedis)) - } - r.localAdapter = adapter -} - -// GetAdapter returns the adapter that is set in current redis client. -func (r *Redis) GetAdapter() Adapter { - if r == nil { - return nil - } - return r.localAdapter -} - -// Conn retrieves and returns a connection object for continuous operations. -// Note that you should call Close function manually if you do not use this connection any further. -func (r *Redis) Conn(ctx context.Context) (Conn, error) { - if r == nil { - return nil, gerror.NewCode(gcode.CodeInvalidParameter, errorNilRedis) - } - if r.localAdapter == nil { - return nil, gerror.NewCode(gcode.CodeNecessaryPackageNotImport, errorNilAdapter) - } - return r.localAdapter.Conn(ctx) -} - -// Do send a command to the server and returns the received reply. -// It uses json.Marshal for struct/slice/map type values before committing them to redis. -func (r *Redis) Do(ctx context.Context, command string, args ...interface{}) (*gvar.Var, error) { - if r == nil { - return nil, gerror.NewCode(gcode.CodeInvalidParameter, errorNilRedis) - } - if r.localAdapter == nil { - return nil, gerror.NewCodef(gcode.CodeMissingConfiguration, errorNilAdapter) - } - return r.localAdapter.Do(ctx, command, args...) -} - -// MustConn performs as function Conn, but it panics if any error occurs internally. -func (r *Redis) MustConn(ctx context.Context) Conn { - c, err := r.Conn(ctx) - if err != nil { - panic(err) - } - return c -} - -// MustDo performs as function Do, but it panics if any error occurs internally. -func (r *Redis) MustDo(ctx context.Context, command string, args ...interface{}) *gvar.Var { - v, err := r.Do(ctx, command, args...) - if err != nil { - panic(err) - } - return v -} - -// Close closes current redis client, closes its connection pool and releases all its related resources. -func (r *Redis) Close(ctx context.Context) error { - if r == nil || r.localAdapter == nil { - return nil - } - return r.localAdapter.Close(ctx) -} diff --git a/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_generic.go b/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_generic.go deleted file mode 100644 index fc5ace89..00000000 --- a/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_generic.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gredis - -import ( - "context" - "time" - - "github.com/gogf/gf/v2/container/gvar" -) - -// IGroupGeneric manages generic redis operations. -// Implements see redis.GroupGeneric. -type IGroupGeneric interface { - Copy(ctx context.Context, source, destination string, option ...CopyOption) (int64, error) - Exists(ctx context.Context, keys ...string) (int64, error) - Type(ctx context.Context, key string) (string, error) - Unlink(ctx context.Context, keys ...string) (int64, error) - Rename(ctx context.Context, key, newKey string) error - RenameNX(ctx context.Context, key, newKey string) (int64, error) - Move(ctx context.Context, key string, db int) (int64, error) - Del(ctx context.Context, keys ...string) (int64, error) - RandomKey(ctx context.Context) (string, error) - DBSize(ctx context.Context) (int64, error) - Keys(ctx context.Context, pattern string) ([]string, error) - FlushDB(ctx context.Context, option ...FlushOp) error - FlushAll(ctx context.Context, option ...FlushOp) error - Expire(ctx context.Context, key string, seconds int64, option ...ExpireOption) (int64, error) - ExpireAt(ctx context.Context, key string, time time.Time, option ...ExpireOption) (int64, error) - ExpireTime(ctx context.Context, key string) (*gvar.Var, error) - TTL(ctx context.Context, key string) (int64, error) - Persist(ctx context.Context, key string) (int64, error) - PExpire(ctx context.Context, key string, milliseconds int64, option ...ExpireOption) (int64, error) - PExpireAt(ctx context.Context, key string, time time.Time, option ...ExpireOption) (int64, error) - PExpireTime(ctx context.Context, key string) (*gvar.Var, error) - PTTL(ctx context.Context, key string) (int64, error) -} - -// CopyOption provides options for function Copy. -type CopyOption struct { - DB int // DB option allows specifying an alternative logical database index for the destination key. - REPLACE bool // REPLACE option removes the destination key before copying the value to it. -} - -type FlushOp string - -const ( - FlushAsync FlushOp = "ASYNC" // ASYNC: flushes the databases asynchronously - FlushSync FlushOp = "SYNC" // SYNC: flushes the databases synchronously -) - -// ExpireOption provides options for function Expire. -type ExpireOption struct { - NX bool // NX -- Set expiry only when the key has no expiry - XX bool // XX -- Set expiry only when the key has an existing expiry - GT bool // GT -- Set expiry only when the new expiry is greater than current one - LT bool // LT -- Set expiry only when the new expiry is less than current one -} diff --git a/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_hash.go b/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_hash.go deleted file mode 100644 index ab398447..00000000 --- a/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_hash.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gredis - -import ( - "context" - - "github.com/gogf/gf/v2/container/gvar" -) - -// IGroupHash manages redis hash operations. -// Implements see redis.GroupHash. -type IGroupHash interface { - HSet(ctx context.Context, key string, fields map[string]interface{}) (int64, error) - HSetNX(ctx context.Context, key, field string, value interface{}) (int64, error) - HGet(ctx context.Context, key, field string) (*gvar.Var, error) - HStrLen(ctx context.Context, key, field string) (int64, error) - HExists(ctx context.Context, key, field string) (int64, error) - HDel(ctx context.Context, key string, fields ...string) (int64, error) - HLen(ctx context.Context, key string) (int64, error) - HIncrBy(ctx context.Context, key, field string, increment int64) (int64, error) - HIncrByFloat(ctx context.Context, key, field string, increment float64) (float64, error) - HMSet(ctx context.Context, key string, fields map[string]interface{}) error - HMGet(ctx context.Context, key string, fields ...string) (gvar.Vars, error) - HKeys(ctx context.Context, key string) ([]string, error) - HVals(ctx context.Context, key string) (gvar.Vars, error) - HGetAll(ctx context.Context, key string) (*gvar.Var, error) -} diff --git a/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_list.go b/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_list.go deleted file mode 100644 index fa989170..00000000 --- a/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_list.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gredis - -import ( - "context" - - "github.com/gogf/gf/v2/container/gvar" -) - -// IGroupList manages redis list operations. -// Implements see redis.GroupList. -type IGroupList interface { - LPush(ctx context.Context, key string, values ...interface{}) (int64, error) - LPushX(ctx context.Context, key string, element interface{}, elements ...interface{}) (int64, error) - RPush(ctx context.Context, key string, values ...interface{}) (int64, error) - RPushX(ctx context.Context, key string, value interface{}) (int64, error) - LPop(ctx context.Context, key string, count ...int) (*gvar.Var, error) - RPop(ctx context.Context, key string, count ...int) (*gvar.Var, error) - LRem(ctx context.Context, key string, count int64, value interface{}) (int64, error) - LLen(ctx context.Context, key string) (int64, error) - LIndex(ctx context.Context, key string, index int64) (*gvar.Var, error) - LInsert(ctx context.Context, key string, op LInsertOp, pivot, value interface{}) (int64, error) - LSet(ctx context.Context, key string, index int64, value interface{}) (*gvar.Var, error) - LRange(ctx context.Context, key string, start, stop int64) (gvar.Vars, error) - LTrim(ctx context.Context, key string, start, stop int64) error - BLPop(ctx context.Context, timeout int64, keys ...string) (gvar.Vars, error) - BRPop(ctx context.Context, timeout int64, keys ...string) (gvar.Vars, error) - RPopLPush(ctx context.Context, source, destination string) (*gvar.Var, error) - BRPopLPush(ctx context.Context, source, destination string, timeout int64) (*gvar.Var, error) -} - -// LInsertOp defines the operation name for function LInsert. -type LInsertOp string - -const ( - LInsertBefore LInsertOp = "BEFORE" - LInsertAfter LInsertOp = "AFTER" -) diff --git a/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_pubsub.go b/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_pubsub.go deleted file mode 100644 index 2e4bd553..00000000 --- a/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_pubsub.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gredis - -import ( - "context" - "fmt" -) - -// IGroupPubSub manages redis pub/sub operations. -// Implements see redis.GroupPubSub. -type IGroupPubSub interface { - Publish(ctx context.Context, channel string, message interface{}) (int64, error) - Subscribe(ctx context.Context, channel string, channels ...string) (Conn, []*Subscription, error) - PSubscribe(ctx context.Context, pattern string, patterns ...string) (Conn, []*Subscription, error) -} - -// Message received as result of a PUBLISH command issued by another client. -type Message struct { - Channel string - Pattern string - Payload string - PayloadSlice []string -} - -// Subscription received after a successful subscription to channel. -type Subscription struct { - Kind string // Can be "subscribe", "unsubscribe", "psubscribe" or "punsubscribe". - Channel string // Channel name we have subscribed to. - Count int // Number of channels we are currently subscribed to. -} - -// String converts current object to a readable string. -func (m *Subscription) String() string { - return fmt.Sprintf("%s: %s", m.Kind, m.Channel) -} diff --git a/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_script.go b/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_script.go deleted file mode 100644 index e2ef1fc4..00000000 --- a/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_script.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gredis - -import ( - "context" - - "github.com/gogf/gf/v2/container/gvar" -) - -// IGroupScript manages redis script operations. -// Implements see redis.GroupScript. -type IGroupScript interface { - Eval(ctx context.Context, script string, numKeys int64, keys []string, args []interface{}) (*gvar.Var, error) - EvalSha(ctx context.Context, sha1 string, numKeys int64, keys []string, args []interface{}) (*gvar.Var, error) - ScriptLoad(ctx context.Context, script string) (string, error) - ScriptExists(ctx context.Context, sha1 string, sha1s ...string) (map[string]bool, error) - ScriptFlush(ctx context.Context, option ...ScriptFlushOption) error - ScriptKill(ctx context.Context) error -} - -// ScriptFlushOption provides options for function ScriptFlush. -type ScriptFlushOption struct { - SYNC bool // SYNC flushes the cache synchronously. - ASYNC bool // ASYNC flushes the cache asynchronously. -} diff --git a/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_set.go b/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_set.go deleted file mode 100644 index 27e04fbf..00000000 --- a/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_set.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gredis - -import ( - "context" - - "github.com/gogf/gf/v2/container/gvar" -) - -// IGroupSet manages redis set operations. -// Implements see redis.GroupSet. -type IGroupSet interface { - SAdd(ctx context.Context, key string, member interface{}, members ...interface{}) (int64, error) - SIsMember(ctx context.Context, key string, member interface{}) (int64, error) - SPop(ctx context.Context, key string, count ...int) (*gvar.Var, error) - SRandMember(ctx context.Context, key string, count ...int) (*gvar.Var, error) - SRem(ctx context.Context, key string, member interface{}, members ...interface{}) (int64, error) - SMove(ctx context.Context, source, destination string, member interface{}) (int64, error) - SCard(ctx context.Context, key string) (int64, error) - SMembers(ctx context.Context, key string) (gvar.Vars, error) - SMIsMember(ctx context.Context, key, member interface{}, members ...interface{}) ([]int, error) - SInter(ctx context.Context, key string, keys ...string) (gvar.Vars, error) - SInterStore(ctx context.Context, destination string, key string, keys ...string) (int64, error) - SUnion(ctx context.Context, key string, keys ...string) (gvar.Vars, error) - SUnionStore(ctx context.Context, destination, key string, keys ...string) (int64, error) - SDiff(ctx context.Context, key string, keys ...string) (gvar.Vars, error) - SDiffStore(ctx context.Context, destination string, key string, keys ...string) (int64, error) -} diff --git a/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_sorted_set.go b/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_sorted_set.go deleted file mode 100644 index 83367136..00000000 --- a/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_sorted_set.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gredis - -import ( - "context" - - "github.com/gogf/gf/v2/container/gvar" -) - -// IGroupSortedSet manages redis sorted set operations. -// Implements see redis.GroupSortedSet. -type IGroupSortedSet interface { - ZAdd(ctx context.Context, key string, option *ZAddOption, member ZAddMember, members ...ZAddMember) (*gvar.Var, error) - ZScore(ctx context.Context, key string, member interface{}) (float64, error) - ZIncrBy(ctx context.Context, key string, increment float64, member interface{}) (float64, error) - ZCard(ctx context.Context, key string) (int64, error) - ZCount(ctx context.Context, key string, min, max string) (int64, error) - ZRange(ctx context.Context, key string, start, stop int64, option ...ZRangeOption) (gvar.Vars, error) - ZRevRange(ctx context.Context, key string, start, stop int64, option ...ZRevRangeOption) (*gvar.Var, error) - ZRank(ctx context.Context, key string, member interface{}) (int64, error) - ZRevRank(ctx context.Context, key string, member interface{}) (int64, error) - ZRem(ctx context.Context, key string, member interface{}, members ...interface{}) (int64, error) - ZRemRangeByRank(ctx context.Context, key string, start, stop int64) (int64, error) - ZRemRangeByScore(ctx context.Context, key string, min, max string) (int64, error) - ZRemRangeByLex(ctx context.Context, key string, min, max string) (int64, error) - ZLexCount(ctx context.Context, key, min, max string) (int64, error) -} - -// ZAddOption provides options for function ZAdd. -type ZAddOption struct { - XX bool // Only update elements that already exist. Don't add new elements. - NX bool // Only add new elements. Don't update already existing elements. - // Only update existing elements if the new score is less than the current score. - // This flag doesn't prevent adding new elements. - LT bool - - // Only update existing elements if the new score is greater than the current score. - // This flag doesn't prevent adding new elements. - GT bool - - // Modify the return value from the number of new elements added, to the total number of elements changed (CH is an abbreviation of changed). - // Changed elements are new elements added and elements already existing for which the score was updated. - // So elements specified in the command line having the same score as they had in the past are not counted. - // Note: normally the return value of ZAdd only counts the number of new elements added. - CH bool - - // When this option is specified ZAdd acts like ZIncrBy. Only one score-element pair can be specified in this mode. - INCR bool -} - -// ZAddMember is element struct for set. -type ZAddMember struct { - Score float64 - Member interface{} -} - -// ZRangeOption provides extra option for ZRange function. -type ZRangeOption struct { - ByScore bool - ByLex bool - // The optional REV argument reverses the ordering, so elements are ordered from highest to lowest score, - // and score ties are resolved by reverse lexicographical ordering. - Rev bool - Limit *ZRangeOptionLimit - // The optional WithScores argument supplements the command's reply with the scores of elements returned. - WithScores bool -} - -// ZRangeOptionLimit provides LIMIT argument for ZRange function. -// The optional LIMIT argument can be used to obtain a sub-range from the matching elements -// (similar to SELECT LIMIT offset, count in SQL). A negative `Count` returns all elements from the `Offset`. -type ZRangeOptionLimit struct { - Offset *int - Count *int -} - -// ZRevRangeOption provides options for function ZRevRange. -type ZRevRangeOption struct { - WithScores bool -} diff --git a/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_string.go b/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_string.go deleted file mode 100644 index 88e82704..00000000 --- a/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_string.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gredis - -import ( - "context" - - "github.com/gogf/gf/v2/container/gvar" -) - -// IGroupString manages redis string operations. -// Implements see redis.GroupString. -type IGroupString interface { - Set(ctx context.Context, key string, value interface{}, option ...SetOption) (*gvar.Var, error) - SetNX(ctx context.Context, key string, value interface{}) (bool, error) - SetEX(ctx context.Context, key string, value interface{}, ttlInSeconds int64) error - Get(ctx context.Context, key string) (*gvar.Var, error) - GetDel(ctx context.Context, key string) (*gvar.Var, error) - GetEX(ctx context.Context, key string, option ...GetEXOption) (*gvar.Var, error) - GetSet(ctx context.Context, key string, value interface{}) (*gvar.Var, error) - StrLen(ctx context.Context, key string) (int64, error) - Append(ctx context.Context, key string, value string) (int64, error) - SetRange(ctx context.Context, key string, offset int64, value string) (int64, error) - GetRange(ctx context.Context, key string, start, end int64) (string, error) - Incr(ctx context.Context, key string) (int64, error) - IncrBy(ctx context.Context, key string, increment int64) (int64, error) - IncrByFloat(ctx context.Context, key string, increment float64) (float64, error) - Decr(ctx context.Context, key string) (int64, error) - DecrBy(ctx context.Context, key string, decrement int64) (int64, error) - MSet(ctx context.Context, keyValueMap map[string]interface{}) error - MSetNX(ctx context.Context, keyValueMap map[string]interface{}) (bool, error) - MGet(ctx context.Context, keys ...string) (map[string]*gvar.Var, error) -} - -// TTLOption provides extra option for TTL related functions. -type TTLOption struct { - EX *int64 // EX seconds -- Set the specified expire time, in seconds. - PX *int64 // PX milliseconds -- Set the specified expire time, in milliseconds. - EXAT *int64 // EXAT timestamp-seconds -- Set the specified Unix time at which the key will expire, in seconds. - PXAT *int64 // PXAT timestamp-milliseconds -- Set the specified Unix time at which the key will expire, in milliseconds. - KeepTTL bool // Retain the time to live associated with the key. -} - -// SetOption provides extra option for Set function. -type SetOption struct { - TTLOption - NX bool // Only set the key if it does not already exist. - XX bool // Only set the key if it already exists. - - // Return the old string stored at key, or nil if key did not exist. - // An error is returned and SET aborted if the value stored at key is not a string. - Get bool -} - -// GetEXOption provides extra option for GetEx function. -type GetEXOption struct { - TTLOption - Persist bool // Persist -- Remove the time to live associated with the key. -} diff --git a/vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug.go b/vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug.go deleted file mode 100644 index 7ff8e3db..00000000 --- a/vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package gdebug contains facilities for programs to debug themselves while they are running. -package gdebug diff --git a/vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug_caller.go b/vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug_caller.go deleted file mode 100644 index d02ff4ab..00000000 --- a/vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug_caller.go +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gdebug - -import ( - "fmt" - "os" - "os/exec" - "path/filepath" - "reflect" - "runtime" - "strings" -) - -const ( - maxCallerDepth = 1000 - stackFilterKey = "/debug/gdebug/gdebug" -) - -var ( - goRootForFilter = runtime.GOROOT() // goRootForFilter is used for stack filtering purpose. - binaryVersion = "" // The version of current running binary(uint64 hex). - binaryVersionMd5 = "" // The version of current running binary(MD5). - selfPath = "" // Current running binary absolute path. -) - -func init() { - if goRootForFilter != "" { - goRootForFilter = strings.ReplaceAll(goRootForFilter, "\\", "/") - } - // Initialize internal package variable: selfPath. - selfPath, _ = exec.LookPath(os.Args[0]) - if selfPath != "" { - selfPath, _ = filepath.Abs(selfPath) - } - if selfPath == "" { - selfPath, _ = filepath.Abs(os.Args[0]) - } -} - -// Caller returns the function name and the absolute file path along with its line -// number of the caller. -func Caller(skip ...int) (function string, path string, line int) { - return CallerWithFilter(nil, skip...) -} - -// CallerWithFilter returns the function name and the absolute file path along with -// its line number of the caller. -// -// The parameter `filters` is used to filter the path of the caller. -func CallerWithFilter(filters []string, skip ...int) (function string, path string, line int) { - var ( - number = 0 - ok = true - ) - if len(skip) > 0 { - number = skip[0] - } - pc, file, line, start := callerFromIndex(filters) - if start != -1 { - for i := start + number; i < maxCallerDepth; i++ { - if i != start { - pc, file, line, ok = runtime.Caller(i) - } - if ok { - if filterFileByFilters(file, filters) { - continue - } - function = "" - if fn := runtime.FuncForPC(pc); fn == nil { - function = "unknown" - } else { - function = fn.Name() - } - return function, file, line - } else { - break - } - } - } - return "", "", -1 -} - -// callerFromIndex returns the caller position and according information exclusive of the -// debug package. -// -// VERY NOTE THAT, the returned index value should be `index - 1` as the caller's start point. -func callerFromIndex(filters []string) (pc uintptr, file string, line int, index int) { - var ok bool - for index = 0; index < maxCallerDepth; index++ { - if pc, file, line, ok = runtime.Caller(index); ok { - if filterFileByFilters(file, filters) { - continue - } - if index > 0 { - index-- - } - return - } - } - return 0, "", -1, -1 -} - -func filterFileByFilters(file string, filters []string) (filtered bool) { - // Filter empty file. - if file == "" { - return true - } - // Filter gdebug package callings. - if strings.Contains(file, stackFilterKey) { - return true - } - for _, filter := range filters { - if filter != "" && strings.Contains(file, filter) { - return true - } - } - // GOROOT filter. - if goRootForFilter != "" && len(file) >= len(goRootForFilter) && file[0:len(goRootForFilter)] == goRootForFilter { - // https://github.com/gogf/gf/issues/2047 - fileSeparator := file[len(goRootForFilter)] - if fileSeparator == filepath.Separator || fileSeparator == '\\' || fileSeparator == '/' { - return true - } - } - return false -} - -// CallerPackage returns the package name of the caller. -func CallerPackage() string { - function, _, _ := Caller() - indexSplit := strings.LastIndexByte(function, '/') - if indexSplit == -1 { - return function[:strings.IndexByte(function, '.')] - } else { - leftPart := function[:indexSplit+1] - rightPart := function[indexSplit+1:] - indexDot := strings.IndexByte(function, '.') - rightPart = rightPart[:indexDot-1] - return leftPart + rightPart - } -} - -// CallerFunction returns the function name of the caller. -func CallerFunction() string { - function, _, _ := Caller() - function = function[strings.LastIndexByte(function, '/')+1:] - function = function[strings.IndexByte(function, '.')+1:] - return function -} - -// CallerFilePath returns the file path of the caller. -func CallerFilePath() string { - _, path, _ := Caller() - return path -} - -// CallerDirectory returns the directory of the caller. -func CallerDirectory() string { - _, path, _ := Caller() - return filepath.Dir(path) -} - -// CallerFileLine returns the file path along with the line number of the caller. -func CallerFileLine() string { - _, path, line := Caller() - return fmt.Sprintf(`%s:%d`, path, line) -} - -// CallerFileLineShort returns the file name along with the line number of the caller. -func CallerFileLineShort() string { - _, path, line := Caller() - return fmt.Sprintf(`%s:%d`, filepath.Base(path), line) -} - -// FuncPath returns the complete function path of given `f`. -func FuncPath(f interface{}) string { - return runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name() -} - -// FuncName returns the function name of given `f`. -func FuncName(f interface{}) string { - path := FuncPath(f) - if path == "" { - return "" - } - index := strings.LastIndexByte(path, '/') - if index < 0 { - index = strings.LastIndexByte(path, '\\') - } - return path[index+1:] -} diff --git a/vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug_grid.go b/vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug_grid.go deleted file mode 100644 index f43023d8..00000000 --- a/vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug_grid.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gdebug - -import ( - "regexp" - "runtime" - "strconv" -) - -var ( - // gridRegex is the regular expression object for parsing goroutine id from stack information. - gridRegex = regexp.MustCompile(`^\w+\s+(\d+)\s+`) -) - -// GoroutineId retrieves and returns the current goroutine id from stack information. -// Be very aware that, it is with low performance as it uses runtime.Stack function. -// It is commonly used for debugging purpose. -func GoroutineId() int { - buf := make([]byte, 26) - runtime.Stack(buf, false) - match := gridRegex.FindSubmatch(buf) - id, _ := strconv.Atoi(string(match[1])) - return id -} diff --git a/vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug_stack.go b/vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug_stack.go deleted file mode 100644 index 20db7d80..00000000 --- a/vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug_stack.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gdebug - -import ( - "bytes" - "fmt" - "runtime" -) - -// PrintStack prints to standard error the stack trace returned by runtime.Stack. -func PrintStack(skip ...int) { - fmt.Print(Stack(skip...)) -} - -// Stack returns a formatted stack trace of the goroutine that calls it. -// It calls runtime.Stack with a large enough buffer to capture the entire trace. -func Stack(skip ...int) string { - return StackWithFilter(nil, skip...) -} - -// StackWithFilter returns a formatted stack trace of the goroutine that calls it. -// It calls runtime.Stack with a large enough buffer to capture the entire trace. -// -// The parameter `filter` is used to filter the path of the caller. -func StackWithFilter(filters []string, skip ...int) string { - return StackWithFilters(filters, skip...) -} - -// StackWithFilters returns a formatted stack trace of the goroutine that calls it. -// It calls runtime.Stack with a large enough buffer to capture the entire trace. -// -// The parameter `filters` is a slice of strings, which are used to filter the path of the -// caller. -// -// TODO Improve the performance using debug.Stack. -func StackWithFilters(filters []string, skip ...int) string { - number := 0 - if len(skip) > 0 { - number = skip[0] - } - var ( - name string - space = " " - index = 1 - buffer = bytes.NewBuffer(nil) - ok = true - pc, file, line, start = callerFromIndex(filters) - ) - for i := start + number; i < maxCallerDepth; i++ { - if i != start { - pc, file, line, ok = runtime.Caller(i) - } - if ok { - if filterFileByFilters(file, filters) { - continue - } - if fn := runtime.FuncForPC(pc); fn == nil { - name = "unknown" - } else { - name = fn.Name() - } - if index > 9 { - space = " " - } - buffer.WriteString(fmt.Sprintf("%d.%s%s\n %s:%d\n", index, space, name, file, line)) - index++ - } else { - break - } - } - return buffer.String() -} diff --git a/vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug_version.go b/vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug_version.go deleted file mode 100644 index db4bec6e..00000000 --- a/vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug_version.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gdebug - -import ( - "crypto/md5" - "fmt" - "io" - "os" - "strconv" - - "github.com/gogf/gf/v2/encoding/ghash" - "github.com/gogf/gf/v2/errors/gerror" -) - -// BinVersion returns the version of current running binary. -// It uses ghash.BKDRHash+BASE36 algorithm to calculate the unique version of the binary. -func BinVersion() string { - if binaryVersion == "" { - binaryContent, _ := os.ReadFile(selfPath) - binaryVersion = strconv.FormatInt( - int64(ghash.BKDR(binaryContent)), - 36, - ) - } - return binaryVersion -} - -// BinVersionMd5 returns the version of current running binary. -// It uses MD5 algorithm to calculate the unique version of the binary. -func BinVersionMd5() string { - if binaryVersionMd5 == "" { - binaryVersionMd5, _ = md5File(selfPath) - } - return binaryVersionMd5 -} - -// md5File encrypts file content of `path` using MD5 algorithms. -func md5File(path string) (encrypt string, err error) { - f, err := os.Open(path) - if err != nil { - err = gerror.Wrapf(err, `os.Open failed for name "%s"`, path) - return "", err - } - defer f.Close() - h := md5.New() - _, err = io.Copy(h, f) - if err != nil { - err = gerror.Wrap(err, `io.Copy failed`) - return "", err - } - return fmt.Sprintf("%x", h.Sum(nil)), nil -} diff --git a/vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary.go b/vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary.go deleted file mode 100644 index 5fbc4847..00000000 --- a/vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package gbinary provides useful API for handling binary/bytes data. -// -// Note that package gbinary encodes the data using LittleEndian in default. -package gbinary - -func Encode(values ...interface{}) []byte { - return LeEncode(values...) -} - -func EncodeByLength(length int, values ...interface{}) []byte { - return LeEncodeByLength(length, values...) -} - -func Decode(b []byte, values ...interface{}) error { - return LeDecode(b, values...) -} - -func EncodeString(s string) []byte { - return LeEncodeString(s) -} - -func DecodeToString(b []byte) string { - return LeDecodeToString(b) -} - -func EncodeBool(b bool) []byte { - return LeEncodeBool(b) -} - -func EncodeInt(i int) []byte { - return LeEncodeInt(i) -} - -func EncodeUint(i uint) []byte { - return LeEncodeUint(i) -} - -func EncodeInt8(i int8) []byte { - return LeEncodeInt8(i) -} - -func EncodeUint8(i uint8) []byte { - return LeEncodeUint8(i) -} - -func EncodeInt16(i int16) []byte { - return LeEncodeInt16(i) -} - -func EncodeUint16(i uint16) []byte { - return LeEncodeUint16(i) -} - -func EncodeInt32(i int32) []byte { - return LeEncodeInt32(i) -} - -func EncodeUint32(i uint32) []byte { - return LeEncodeUint32(i) -} - -func EncodeInt64(i int64) []byte { - return LeEncodeInt64(i) -} - -func EncodeUint64(i uint64) []byte { - return LeEncodeUint64(i) -} - -func EncodeFloat32(f float32) []byte { - return LeEncodeFloat32(f) -} - -func EncodeFloat64(f float64) []byte { - return LeEncodeFloat64(f) -} - -func DecodeToInt(b []byte) int { - return LeDecodeToInt(b) -} - -func DecodeToUint(b []byte) uint { - return LeDecodeToUint(b) -} - -func DecodeToBool(b []byte) bool { - return LeDecodeToBool(b) -} - -func DecodeToInt8(b []byte) int8 { - return LeDecodeToInt8(b) -} - -func DecodeToUint8(b []byte) uint8 { - return LeDecodeToUint8(b) -} - -func DecodeToInt16(b []byte) int16 { - return LeDecodeToInt16(b) -} - -func DecodeToUint16(b []byte) uint16 { - return LeDecodeToUint16(b) -} - -func DecodeToInt32(b []byte) int32 { - return LeDecodeToInt32(b) -} - -func DecodeToUint32(b []byte) uint32 { - return LeDecodeToUint32(b) -} - -func DecodeToInt64(b []byte) int64 { - return LeDecodeToInt64(b) -} - -func DecodeToUint64(b []byte) uint64 { - return LeDecodeToUint64(b) -} - -func DecodeToFloat32(b []byte) float32 { - return LeDecodeToFloat32(b) -} - -func DecodeToFloat64(b []byte) float64 { - return LeDecodeToFloat64(b) -} diff --git a/vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary_be.go b/vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary_be.go deleted file mode 100644 index 6f608548..00000000 --- a/vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary_be.go +++ /dev/null @@ -1,287 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gbinary - -import ( - "bytes" - "context" - "encoding/binary" - "fmt" - "math" - - "github.com/gogf/gf/v2/errors/gerror" - "github.com/gogf/gf/v2/internal/intlog" -) - -// BeEncode encodes one or multiple `values` into bytes using BigEndian. -// It uses type asserting checking the type of each value of `values` and internally -// calls corresponding converting function do the bytes converting. -// -// It supports common variable type asserting, and finally it uses fmt.Sprintf converting -// value to string and then to bytes. -func BeEncode(values ...interface{}) []byte { - buf := new(bytes.Buffer) - for i := 0; i < len(values); i++ { - if values[i] == nil { - return buf.Bytes() - } - - switch value := values[i].(type) { - case int: - buf.Write(BeEncodeInt(value)) - case int8: - buf.Write(BeEncodeInt8(value)) - case int16: - buf.Write(BeEncodeInt16(value)) - case int32: - buf.Write(BeEncodeInt32(value)) - case int64: - buf.Write(BeEncodeInt64(value)) - case uint: - buf.Write(BeEncodeUint(value)) - case uint8: - buf.Write(BeEncodeUint8(value)) - case uint16: - buf.Write(BeEncodeUint16(value)) - case uint32: - buf.Write(BeEncodeUint32(value)) - case uint64: - buf.Write(BeEncodeUint64(value)) - case bool: - buf.Write(BeEncodeBool(value)) - case string: - buf.Write(BeEncodeString(value)) - case []byte: - buf.Write(value) - case float32: - buf.Write(BeEncodeFloat32(value)) - case float64: - buf.Write(BeEncodeFloat64(value)) - default: - if err := binary.Write(buf, binary.BigEndian, value); err != nil { - intlog.Errorf(context.TODO(), `%+v`, err) - buf.Write(BeEncodeString(fmt.Sprintf("%v", value))) - } - } - } - return buf.Bytes() -} - -func BeEncodeByLength(length int, values ...interface{}) []byte { - b := BeEncode(values...) - if len(b) < length { - b = append(b, make([]byte, length-len(b))...) - } else if len(b) > length { - b = b[0:length] - } - return b -} - -func BeDecode(b []byte, values ...interface{}) error { - var ( - err error - buf = bytes.NewBuffer(b) - ) - for i := 0; i < len(values); i++ { - if err = binary.Read(buf, binary.BigEndian, values[i]); err != nil { - err = gerror.Wrap(err, `binary.Read failed`) - return err - } - } - return nil -} - -func BeEncodeString(s string) []byte { - return []byte(s) -} - -func BeDecodeToString(b []byte) string { - return string(b) -} - -func BeEncodeBool(b bool) []byte { - if b { - return []byte{1} - } else { - return []byte{0} - } -} - -func BeEncodeInt(i int) []byte { - if i <= math.MaxInt8 { - return BeEncodeInt8(int8(i)) - } else if i <= math.MaxInt16 { - return BeEncodeInt16(int16(i)) - } else if i <= math.MaxInt32 { - return BeEncodeInt32(int32(i)) - } else { - return BeEncodeInt64(int64(i)) - } -} - -func BeEncodeUint(i uint) []byte { - if i <= math.MaxUint8 { - return BeEncodeUint8(uint8(i)) - } else if i <= math.MaxUint16 { - return BeEncodeUint16(uint16(i)) - } else if i <= math.MaxUint32 { - return BeEncodeUint32(uint32(i)) - } else { - return BeEncodeUint64(uint64(i)) - } -} - -func BeEncodeInt8(i int8) []byte { - return []byte{byte(i)} -} - -func BeEncodeUint8(i uint8) []byte { - return []byte{i} -} - -func BeEncodeInt16(i int16) []byte { - b := make([]byte, 2) - binary.BigEndian.PutUint16(b, uint16(i)) - return b -} - -func BeEncodeUint16(i uint16) []byte { - b := make([]byte, 2) - binary.BigEndian.PutUint16(b, i) - return b -} - -func BeEncodeInt32(i int32) []byte { - b := make([]byte, 4) - binary.BigEndian.PutUint32(b, uint32(i)) - return b -} - -func BeEncodeUint32(i uint32) []byte { - b := make([]byte, 4) - binary.BigEndian.PutUint32(b, i) - return b -} - -func BeEncodeInt64(i int64) []byte { - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, uint64(i)) - return b -} - -func BeEncodeUint64(i uint64) []byte { - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, i) - return b -} - -func BeEncodeFloat32(f float32) []byte { - bits := math.Float32bits(f) - b := make([]byte, 4) - binary.BigEndian.PutUint32(b, bits) - return b -} - -func BeEncodeFloat64(f float64) []byte { - bits := math.Float64bits(f) - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, bits) - return b -} - -func BeDecodeToInt(b []byte) int { - if len(b) < 2 { - return int(BeDecodeToUint8(b)) - } else if len(b) < 3 { - return int(BeDecodeToUint16(b)) - } else if len(b) < 5 { - return int(BeDecodeToUint32(b)) - } else { - return int(BeDecodeToUint64(b)) - } -} - -func BeDecodeToUint(b []byte) uint { - if len(b) < 2 { - return uint(BeDecodeToUint8(b)) - } else if len(b) < 3 { - return uint(BeDecodeToUint16(b)) - } else if len(b) < 5 { - return uint(BeDecodeToUint32(b)) - } else { - return uint(BeDecodeToUint64(b)) - } -} - -func BeDecodeToBool(b []byte) bool { - if len(b) == 0 { - return false - } - if bytes.Equal(b, make([]byte, len(b))) { - return false - } - return true -} - -func BeDecodeToInt8(b []byte) int8 { - if len(b) == 0 { - panic(`empty slice given`) - } - return int8(b[0]) -} - -func BeDecodeToUint8(b []byte) uint8 { - if len(b) == 0 { - panic(`empty slice given`) - } - return b[0] -} - -func BeDecodeToInt16(b []byte) int16 { - return int16(binary.BigEndian.Uint16(BeFillUpSize(b, 2))) -} - -func BeDecodeToUint16(b []byte) uint16 { - return binary.BigEndian.Uint16(BeFillUpSize(b, 2)) -} - -func BeDecodeToInt32(b []byte) int32 { - return int32(binary.BigEndian.Uint32(BeFillUpSize(b, 4))) -} - -func BeDecodeToUint32(b []byte) uint32 { - return binary.BigEndian.Uint32(BeFillUpSize(b, 4)) -} - -func BeDecodeToInt64(b []byte) int64 { - return int64(binary.BigEndian.Uint64(BeFillUpSize(b, 8))) -} - -func BeDecodeToUint64(b []byte) uint64 { - return binary.BigEndian.Uint64(BeFillUpSize(b, 8)) -} - -func BeDecodeToFloat32(b []byte) float32 { - return math.Float32frombits(binary.BigEndian.Uint32(BeFillUpSize(b, 4))) -} - -func BeDecodeToFloat64(b []byte) float64 { - return math.Float64frombits(binary.BigEndian.Uint64(BeFillUpSize(b, 8))) -} - -// BeFillUpSize fills up the bytes `b` to given length `l` using big BigEndian. -// -// Note that it creates a new bytes slice by copying the original one to avoid changing -// the original parameter bytes. -func BeFillUpSize(b []byte, l int) []byte { - if len(b) >= l { - return b[:l] - } - c := make([]byte, l) - copy(c[l-len(b):], b) - return c -} diff --git a/vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary_bit.go b/vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary_bit.go deleted file mode 100644 index 3e93dcab..00000000 --- a/vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary_bit.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gbinary - -// NOTE: THIS IS AN EXPERIMENTAL FEATURE! - -// Bit Binary bit (0 | 1) -type Bit int8 - -// EncodeBits does encode bits return bits Default coding -func EncodeBits(bits []Bit, i int, l int) []Bit { - return EncodeBitsWithUint(bits, uint(i), l) -} - -// EncodeBitsWithUint . Merge ui bitwise into the bits array and occupy the length bits -// (Note: binary 0 | 1 digits are stored in the uis array) -func EncodeBitsWithUint(bits []Bit, ui uint, l int) []Bit { - a := make([]Bit, l) - for i := l - 1; i >= 0; i-- { - a[i] = Bit(ui & 1) - ui >>= 1 - } - if bits != nil { - return append(bits, a...) - } - return a -} - -// EncodeBitsToBytes . does encode bits to bytes -// Convert bits to [] byte, encode from left to right, and add less than 1 byte from 0 to the end. -func EncodeBitsToBytes(bits []Bit) []byte { - if len(bits)%8 != 0 { - for i := 0; i < len(bits)%8; i++ { - bits = append(bits, 0) - } - } - b := make([]byte, 0) - for i := 0; i < len(bits); i += 8 { - b = append(b, byte(DecodeBitsToUint(bits[i:i+8]))) - } - return b -} - -// DecodeBits .does decode bits to int -// Resolve to int -func DecodeBits(bits []Bit) int { - v := 0 - for _, i := range bits { - v = v<<1 | int(i) - } - return v -} - -// DecodeBitsToUint .Resolve to uint -func DecodeBitsToUint(bits []Bit) uint { - v := uint(0) - for _, i := range bits { - v = v<<1 | uint(i) - } - return v -} - -// DecodeBytesToBits .Parsing [] byte into character array [] uint8 -func DecodeBytesToBits(bs []byte) []Bit { - bits := make([]Bit, 0) - for _, b := range bs { - bits = EncodeBitsWithUint(bits, uint(b), 8) - } - return bits -} diff --git a/vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary_func.go b/vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary_func.go deleted file mode 100644 index 6e1fba24..00000000 --- a/vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary_func.go +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gbinary diff --git a/vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary_le.go b/vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary_le.go deleted file mode 100644 index b648c09d..00000000 --- a/vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary_le.go +++ /dev/null @@ -1,287 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gbinary - -import ( - "bytes" - "context" - "encoding/binary" - "fmt" - "math" - - "github.com/gogf/gf/v2/errors/gerror" - "github.com/gogf/gf/v2/internal/intlog" -) - -// LeEncode encodes one or multiple `values` into bytes using LittleEndian. -// It uses type asserting checking the type of each value of `values` and internally -// calls corresponding converting function do the bytes converting. -// -// It supports common variable type asserting, and finally it uses fmt.Sprintf converting -// value to string and then to bytes. -func LeEncode(values ...interface{}) []byte { - buf := new(bytes.Buffer) - for i := 0; i < len(values); i++ { - if values[i] == nil { - return buf.Bytes() - } - switch value := values[i].(type) { - case int: - buf.Write(LeEncodeInt(value)) - case int8: - buf.Write(LeEncodeInt8(value)) - case int16: - buf.Write(LeEncodeInt16(value)) - case int32: - buf.Write(LeEncodeInt32(value)) - case int64: - buf.Write(LeEncodeInt64(value)) - case uint: - buf.Write(LeEncodeUint(value)) - case uint8: - buf.Write(LeEncodeUint8(value)) - case uint16: - buf.Write(LeEncodeUint16(value)) - case uint32: - buf.Write(LeEncodeUint32(value)) - case uint64: - buf.Write(LeEncodeUint64(value)) - case bool: - buf.Write(LeEncodeBool(value)) - case string: - buf.Write(LeEncodeString(value)) - case []byte: - buf.Write(value) - case float32: - buf.Write(LeEncodeFloat32(value)) - case float64: - buf.Write(LeEncodeFloat64(value)) - - default: - if err := binary.Write(buf, binary.LittleEndian, value); err != nil { - intlog.Errorf(context.TODO(), `%+v`, err) - buf.Write(LeEncodeString(fmt.Sprintf("%v", value))) - } - } - } - return buf.Bytes() -} - -func LeEncodeByLength(length int, values ...interface{}) []byte { - b := LeEncode(values...) - if len(b) < length { - b = append(b, make([]byte, length-len(b))...) - } else if len(b) > length { - b = b[0:length] - } - return b -} - -func LeDecode(b []byte, values ...interface{}) error { - var ( - err error - buf = bytes.NewBuffer(b) - ) - for i := 0; i < len(values); i++ { - if err = binary.Read(buf, binary.LittleEndian, values[i]); err != nil { - err = gerror.Wrap(err, `binary.Read failed`) - return err - } - } - return nil -} - -func LeEncodeString(s string) []byte { - return []byte(s) -} - -func LeDecodeToString(b []byte) string { - return string(b) -} - -func LeEncodeBool(b bool) []byte { - if b { - return []byte{1} - } else { - return []byte{0} - } -} - -func LeEncodeInt(i int) []byte { - if i <= math.MaxInt8 { - return EncodeInt8(int8(i)) - } else if i <= math.MaxInt16 { - return EncodeInt16(int16(i)) - } else if i <= math.MaxInt32 { - return EncodeInt32(int32(i)) - } else { - return EncodeInt64(int64(i)) - } -} - -func LeEncodeUint(i uint) []byte { - if i <= math.MaxUint8 { - return EncodeUint8(uint8(i)) - } else if i <= math.MaxUint16 { - return EncodeUint16(uint16(i)) - } else if i <= math.MaxUint32 { - return EncodeUint32(uint32(i)) - } else { - return EncodeUint64(uint64(i)) - } -} - -func LeEncodeInt8(i int8) []byte { - return []byte{byte(i)} -} - -func LeEncodeUint8(i uint8) []byte { - return []byte{i} -} - -func LeEncodeInt16(i int16) []byte { - b := make([]byte, 2) - binary.LittleEndian.PutUint16(b, uint16(i)) - return b -} - -func LeEncodeUint16(i uint16) []byte { - b := make([]byte, 2) - binary.LittleEndian.PutUint16(b, i) - return b -} - -func LeEncodeInt32(i int32) []byte { - b := make([]byte, 4) - binary.LittleEndian.PutUint32(b, uint32(i)) - return b -} - -func LeEncodeUint32(i uint32) []byte { - b := make([]byte, 4) - binary.LittleEndian.PutUint32(b, i) - return b -} - -func LeEncodeInt64(i int64) []byte { - b := make([]byte, 8) - binary.LittleEndian.PutUint64(b, uint64(i)) - return b -} - -func LeEncodeUint64(i uint64) []byte { - b := make([]byte, 8) - binary.LittleEndian.PutUint64(b, i) - return b -} - -func LeEncodeFloat32(f float32) []byte { - bits := math.Float32bits(f) - b := make([]byte, 4) - binary.LittleEndian.PutUint32(b, bits) - return b -} - -func LeEncodeFloat64(f float64) []byte { - bits := math.Float64bits(f) - b := make([]byte, 8) - binary.LittleEndian.PutUint64(b, bits) - return b -} - -func LeDecodeToInt(b []byte) int { - if len(b) < 2 { - return int(LeDecodeToUint8(b)) - } else if len(b) < 3 { - return int(LeDecodeToUint16(b)) - } else if len(b) < 5 { - return int(LeDecodeToUint32(b)) - } else { - return int(LeDecodeToUint64(b)) - } -} - -func LeDecodeToUint(b []byte) uint { - if len(b) < 2 { - return uint(LeDecodeToUint8(b)) - } else if len(b) < 3 { - return uint(LeDecodeToUint16(b)) - } else if len(b) < 5 { - return uint(LeDecodeToUint32(b)) - } else { - return uint(LeDecodeToUint64(b)) - } -} - -func LeDecodeToBool(b []byte) bool { - if len(b) == 0 { - return false - } - if bytes.Equal(b, make([]byte, len(b))) { - return false - } - return true -} - -func LeDecodeToInt8(b []byte) int8 { - if len(b) == 0 { - panic(`empty slice given`) - } - return int8(b[0]) -} - -func LeDecodeToUint8(b []byte) uint8 { - if len(b) == 0 { - panic(`empty slice given`) - } - return b[0] -} - -func LeDecodeToInt16(b []byte) int16 { - return int16(binary.LittleEndian.Uint16(LeFillUpSize(b, 2))) -} - -func LeDecodeToUint16(b []byte) uint16 { - return binary.LittleEndian.Uint16(LeFillUpSize(b, 2)) -} - -func LeDecodeToInt32(b []byte) int32 { - return int32(binary.LittleEndian.Uint32(LeFillUpSize(b, 4))) -} - -func LeDecodeToUint32(b []byte) uint32 { - return binary.LittleEndian.Uint32(LeFillUpSize(b, 4)) -} - -func LeDecodeToInt64(b []byte) int64 { - return int64(binary.LittleEndian.Uint64(LeFillUpSize(b, 8))) -} - -func LeDecodeToUint64(b []byte) uint64 { - return binary.LittleEndian.Uint64(LeFillUpSize(b, 8)) -} - -func LeDecodeToFloat32(b []byte) float32 { - return math.Float32frombits(binary.LittleEndian.Uint32(LeFillUpSize(b, 4))) -} - -func LeDecodeToFloat64(b []byte) float64 { - return math.Float64frombits(binary.LittleEndian.Uint64(LeFillUpSize(b, 8))) -} - -// LeFillUpSize fills up the bytes `b` to given length `l` using LittleEndian. -// -// Note that it creates a new bytes slice by copying the original one to avoid changing -// the original parameter bytes. -func LeFillUpSize(b []byte, l int) []byte { - if len(b) >= l { - return b[:l] - } - c := make([]byte, l) - copy(c, b) - return c -} diff --git a/vendor/github.com/gogf/gf/v2/encoding/gcompress/gcompress.go b/vendor/github.com/gogf/gf/v2/encoding/gcompress/gcompress.go deleted file mode 100644 index 1b4ca942..00000000 --- a/vendor/github.com/gogf/gf/v2/encoding/gcompress/gcompress.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package gcompress provides kinds of compression algorithms for binary/bytes data. -package gcompress diff --git a/vendor/github.com/gogf/gf/v2/encoding/gcompress/gcompress_gzip.go b/vendor/github.com/gogf/gf/v2/encoding/gcompress/gcompress_gzip.go deleted file mode 100644 index 40464f92..00000000 --- a/vendor/github.com/gogf/gf/v2/encoding/gcompress/gcompress_gzip.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gcompress - -import ( - "bytes" - "compress/gzip" - "io" - - "github.com/gogf/gf/v2/errors/gerror" - "github.com/gogf/gf/v2/os/gfile" -) - -// Gzip compresses `data` using gzip algorithm. -// The optional parameter `level` specifies the compression level from -// 1 to 9 which means from none to the best compression. -// -// Note that it returns error if given `level` is invalid. -func Gzip(data []byte, level ...int) ([]byte, error) { - var ( - writer *gzip.Writer - buf bytes.Buffer - err error - ) - if len(level) > 0 { - writer, err = gzip.NewWriterLevel(&buf, level[0]) - if err != nil { - err = gerror.Wrapf(err, `gzip.NewWriterLevel failed for level "%d"`, level[0]) - return nil, err - } - } else { - writer = gzip.NewWriter(&buf) - } - if _, err = writer.Write(data); err != nil { - err = gerror.Wrap(err, `writer.Write failed`) - return nil, err - } - if err = writer.Close(); err != nil { - err = gerror.Wrap(err, `writer.Close failed`) - return nil, err - } - return buf.Bytes(), nil -} - -// GzipFile compresses the file `src` to `dst` using gzip algorithm. -func GzipFile(srcFilePath, dstFilePath string, level ...int) (err error) { - dstFile, err := gfile.Create(dstFilePath) - if err != nil { - return err - } - defer dstFile.Close() - - return GzipPathWriter(srcFilePath, dstFile, level...) -} - -// GzipPathWriter compresses `filePath` to `writer` using gzip compressing algorithm. -// -// Note that the parameter `path` can be either a directory or a file. -func GzipPathWriter(filePath string, writer io.Writer, level ...int) error { - var ( - gzipWriter *gzip.Writer - err error - ) - srcFile, err := gfile.Open(filePath) - if err != nil { - return err - } - defer srcFile.Close() - - if len(level) > 0 { - gzipWriter, err = gzip.NewWriterLevel(writer, level[0]) - if err != nil { - return gerror.Wrap(err, `gzip.NewWriterLevel failed`) - } - } else { - gzipWriter = gzip.NewWriter(writer) - } - defer gzipWriter.Close() - - if _, err = io.Copy(gzipWriter, srcFile); err != nil { - err = gerror.Wrap(err, `io.Copy failed`) - return err - } - return nil -} - -// UnGzip decompresses `data` with gzip algorithm. -func UnGzip(data []byte) ([]byte, error) { - var buf bytes.Buffer - reader, err := gzip.NewReader(bytes.NewReader(data)) - if err != nil { - err = gerror.Wrap(err, `gzip.NewReader failed`) - return nil, err - } - if _, err = io.Copy(&buf, reader); err != nil { - err = gerror.Wrap(err, `io.Copy failed`) - return nil, err - } - if err = reader.Close(); err != nil { - err = gerror.Wrap(err, `reader.Close failed`) - return buf.Bytes(), err - } - return buf.Bytes(), nil -} - -// UnGzipFile decompresses srcFilePath `src` to `dst` using gzip algorithm. -func UnGzipFile(srcFilePath, dstFilePath string) error { - srcFile, err := gfile.Open(srcFilePath) - if err != nil { - return err - } - defer srcFile.Close() - dstFile, err := gfile.Create(dstFilePath) - if err != nil { - return err - } - defer dstFile.Close() - - reader, err := gzip.NewReader(srcFile) - if err != nil { - err = gerror.Wrap(err, `gzip.NewReader failed`) - return err - } - defer reader.Close() - - if _, err = io.Copy(dstFile, reader); err != nil { - err = gerror.Wrap(err, `io.Copy failed`) - return err - } - return nil -} diff --git a/vendor/github.com/gogf/gf/v2/encoding/gcompress/gcompress_zip.go b/vendor/github.com/gogf/gf/v2/encoding/gcompress/gcompress_zip.go deleted file mode 100644 index a1c2ff41..00000000 --- a/vendor/github.com/gogf/gf/v2/encoding/gcompress/gcompress_zip.go +++ /dev/null @@ -1,280 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gcompress - -import ( - "archive/zip" - "bytes" - "context" - "io" - "os" - "path/filepath" - "strings" - - "github.com/gogf/gf/v2/errors/gerror" - "github.com/gogf/gf/v2/internal/intlog" - "github.com/gogf/gf/v2/os/gfile" - "github.com/gogf/gf/v2/text/gstr" -) - -// ZipPath compresses `fileOrFolderPaths` to `dstFilePath` using zip compressing algorithm. -// -// The parameter `paths` can be either a directory or a file, which -// supports multiple paths join with ','. -// The unnecessary parameter `prefix` indicates the path prefix for zip file. -func ZipPath(fileOrFolderPaths, dstFilePath string, prefix ...string) error { - writer, err := os.Create(dstFilePath) - if err != nil { - err = gerror.Wrapf(err, `os.Create failed for name "%s"`, dstFilePath) - return err - } - defer writer.Close() - zipWriter := zip.NewWriter(writer) - defer zipWriter.Close() - for _, path := range strings.Split(fileOrFolderPaths, ",") { - path = strings.TrimSpace(path) - if err = doZipPathWriter(path, gfile.RealPath(dstFilePath), zipWriter, prefix...); err != nil { - return err - } - } - return nil -} - -// ZipPathWriter compresses `fileOrFolderPaths` to `writer` using zip compressing algorithm. -// -// Note that the parameter `fileOrFolderPaths` can be either a directory or a file, which -// supports multiple paths join with ','. -// The unnecessary parameter `prefix` indicates the path prefix for zip file. -func ZipPathWriter(fileOrFolderPaths string, writer io.Writer, prefix ...string) error { - zipWriter := zip.NewWriter(writer) - defer zipWriter.Close() - for _, path := range strings.Split(fileOrFolderPaths, ",") { - path = strings.TrimSpace(path) - if err := doZipPathWriter(path, "", zipWriter, prefix...); err != nil { - return err - } - } - return nil -} - -// ZipPathContent compresses `fileOrFolderPaths` to []byte using zip compressing algorithm. -// -// Note that the parameter `fileOrFolderPaths` can be either a directory or a file, which -// supports multiple paths join with ','. -// The unnecessary parameter `prefix` indicates the path prefix for zip file. -func ZipPathContent(fileOrFolderPaths string, prefix ...string) ([]byte, error) { - var ( - err error - buffer = bytes.NewBuffer(nil) - ) - if err = ZipPathWriter(fileOrFolderPaths, buffer, prefix...); err != nil { - return nil, err - } - return buffer.Bytes(), nil -} - -// doZipPathWriter compresses given `fileOrFolderPaths` and writes the content to `zipWriter`. -// -// The parameter `fileOrFolderPath` can be either a single file or folder path. -// The parameter `exclude` specifies the exclusive file path that is not compressed to `zipWriter`, -// commonly the destination zip file path. -// The unnecessary parameter `prefix` indicates the path prefix for zip file. -func doZipPathWriter(fileOrFolderPath string, exclude string, zipWriter *zip.Writer, prefix ...string) error { - var ( - err error - files []string - ) - fileOrFolderPath, err = gfile.Search(fileOrFolderPath) - if err != nil { - return err - } - if gfile.IsDir(fileOrFolderPath) { - files, err = gfile.ScanDir(fileOrFolderPath, "*", true) - if err != nil { - return err - } - } else { - files = []string{fileOrFolderPath} - } - headerPrefix := "" - if len(prefix) > 0 && prefix[0] != "" { - headerPrefix = prefix[0] - } - headerPrefix = strings.TrimRight(headerPrefix, "\\/") - if gfile.IsDir(fileOrFolderPath) { - if len(headerPrefix) > 0 { - headerPrefix += "/" - } else { - headerPrefix = gfile.Basename(fileOrFolderPath) - } - } - headerPrefix = strings.ReplaceAll(headerPrefix, "//", "/") - for _, file := range files { - if exclude == file { - intlog.Printf(context.TODO(), `exclude file path: %s`, file) - continue - } - dir := gfile.Dir(file[len(fileOrFolderPath):]) - if dir == "." { - dir = "" - } - if err = zipFile(file, headerPrefix+dir, zipWriter); err != nil { - return err - } - } - return nil -} - -// UnZipFile decompresses `archive` to `dstFolderPath` using zip compressing algorithm. -// -// The parameter `dstFolderPath` should be a directory. -// The optional parameter `zippedPrefix` specifies the unzipped path of `zippedFilePath`, -// which can be used to specify part of the archive file to unzip. -func UnZipFile(zippedFilePath, dstFolderPath string, zippedPrefix ...string) error { - readerCloser, err := zip.OpenReader(zippedFilePath) - if err != nil { - err = gerror.Wrapf(err, `zip.OpenReader failed for name "%s"`, dstFolderPath) - return err - } - defer readerCloser.Close() - return unZipFileWithReader(&readerCloser.Reader, dstFolderPath, zippedPrefix...) -} - -// UnZipContent decompresses `zippedContent` to `dstFolderPath` using zip compressing algorithm. -// -// The parameter `dstFolderPath` should be a directory. -// The parameter `zippedPrefix` specifies the unzipped path of `zippedContent`, -// which can be used to specify part of the archive file to unzip. -func UnZipContent(zippedContent []byte, dstFolderPath string, zippedPrefix ...string) error { - reader, err := zip.NewReader(bytes.NewReader(zippedContent), int64(len(zippedContent))) - if err != nil { - err = gerror.Wrapf(err, `zip.NewReader failed`) - return err - } - return unZipFileWithReader(reader, dstFolderPath, zippedPrefix...) -} - -func unZipFileWithReader(reader *zip.Reader, dstFolderPath string, zippedPrefix ...string) error { - prefix := "" - if len(zippedPrefix) > 0 { - prefix = gstr.Replace(zippedPrefix[0], `\`, `/`) - } - if err := os.MkdirAll(dstFolderPath, 0755); err != nil { - return err - } - var ( - name string - dstPath string - dstDir string - ) - for _, file := range reader.File { - name = gstr.Replace(file.Name, `\`, `/`) - name = gstr.Trim(name, "/") - if prefix != "" { - if !strings.HasPrefix(name, prefix) { - continue - } - name = name[len(prefix):] - } - dstPath = filepath.Join(dstFolderPath, name) - if file.FileInfo().IsDir() { - _ = os.MkdirAll(dstPath, file.Mode()) - continue - } - dstDir = filepath.Dir(dstPath) - if len(dstDir) > 0 { - if _, err := os.Stat(dstDir); os.IsNotExist(err) { - if err = os.MkdirAll(dstDir, 0755); err != nil { - err = gerror.Wrapf(err, `os.MkdirAll failed for path "%s"`, dstDir) - return err - } - } - } - fileReader, err := file.Open() - if err != nil { - err = gerror.Wrapf(err, `file.Open failed`) - return err - } - // The fileReader is closed in function doCopyForUnZipFileWithReader. - if err = doCopyForUnZipFileWithReader(file, fileReader, dstPath); err != nil { - return err - } - } - return nil -} - -func doCopyForUnZipFileWithReader(file *zip.File, fileReader io.ReadCloser, dstPath string) error { - defer fileReader.Close() - targetFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, file.Mode()) - if err != nil { - err = gerror.Wrapf(err, `os.OpenFile failed for name "%s"`, dstPath) - return err - } - defer targetFile.Close() - - if _, err = io.Copy(targetFile, fileReader); err != nil { - err = gerror.Wrapf(err, `io.Copy failed from "%s" to "%s"`, file.Name, dstPath) - return err - } - return nil -} - -// zipFile compresses the file of given `filePath` and writes the content to `zw`. -// The parameter `prefix` indicates the path prefix for zip file. -func zipFile(filePath string, prefix string, zw *zip.Writer) error { - file, err := os.Open(filePath) - if err != nil { - err = gerror.Wrapf(err, `os.Open failed for name "%s"`, filePath) - return err - } - defer file.Close() - - info, err := file.Stat() - if err != nil { - err = gerror.Wrapf(err, `file.Stat failed for name "%s"`, filePath) - return err - } - - header, err := createFileHeader(info, prefix) - if err != nil { - return err - } - - if info.IsDir() { - header.Name += "/" - } else { - header.Method = zip.Deflate - } - - writer, err := zw.CreateHeader(header) - if err != nil { - err = gerror.Wrapf(err, `zip.Writer.CreateHeader failed for header "%#v"`, header) - return err - } - if !info.IsDir() { - if _, err = io.Copy(writer, file); err != nil { - err = gerror.Wrapf(err, `io.Copy failed from "%s" to "%s"`, filePath, header.Name) - return err - } - } - return nil -} - -func createFileHeader(info os.FileInfo, prefix string) (*zip.FileHeader, error) { - header, err := zip.FileInfoHeader(info) - if err != nil { - err = gerror.Wrapf(err, `zip.FileInfoHeader failed for info "%#v"`, info) - return nil, err - } - - if len(prefix) > 0 { - prefix = strings.ReplaceAll(prefix, `\`, `/`) - prefix = strings.TrimRight(prefix, `/`) - header.Name = prefix + `/` + header.Name - } - return header, nil -} diff --git a/vendor/github.com/gogf/gf/v2/encoding/gcompress/gcompress_zlib.go b/vendor/github.com/gogf/gf/v2/encoding/gcompress/gcompress_zlib.go deleted file mode 100644 index c45b3d2b..00000000 --- a/vendor/github.com/gogf/gf/v2/encoding/gcompress/gcompress_zlib.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package gcompress provides kinds of compression algorithms for binary/bytes data. -package gcompress - -import ( - "bytes" - "compress/zlib" - "io" - - "github.com/gogf/gf/v2/errors/gerror" -) - -// Zlib compresses `data` with zlib algorithm. -func Zlib(data []byte) ([]byte, error) { - if data == nil || len(data) < 13 { - return data, nil - } - var ( - err error - in bytes.Buffer - writer = zlib.NewWriter(&in) - ) - - if _, err = writer.Write(data); err != nil { - err = gerror.Wrapf(err, `zlib.Writer.Write failed`) - return nil, err - } - if err = writer.Close(); err != nil { - err = gerror.Wrapf(err, `zlib.Writer.Close failed`) - return in.Bytes(), err - } - return in.Bytes(), nil -} - -// UnZlib decompresses `data` with zlib algorithm. -func UnZlib(data []byte) ([]byte, error) { - if data == nil || len(data) < 13 { - return data, nil - } - var ( - out bytes.Buffer - bytesReader = bytes.NewReader(data) - zlibReader, err = zlib.NewReader(bytesReader) - ) - if err != nil { - err = gerror.Wrapf(err, `zlib.NewReader failed`) - return nil, err - } - if _, err = io.Copy(&out, zlibReader); err != nil { - err = gerror.Wrapf(err, `io.Copy failed`) - return nil, err - } - return out.Bytes(), nil -} diff --git a/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash.go b/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash.go deleted file mode 100644 index de7d1573..00000000 --- a/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package ghash provides some classic hash functions(uint32/uint64) in go. -package ghash diff --git a/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_ap.go b/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_ap.go deleted file mode 100644 index 9ce369e8..00000000 --- a/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_ap.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package ghash - -// AP implements the classic AP hash algorithm for 32 bits. -func AP(str []byte) uint32 { - var hash uint32 - for i := 0; i < len(str); i++ { - if (i & 1) == 0 { - hash ^= (hash << 7) ^ uint32(str[i]) ^ (hash >> 3) - } else { - hash ^= ^((hash << 11) ^ uint32(str[i]) ^ (hash >> 5)) + 1 - } - } - return hash -} - -// AP64 implements the classic AP hash algorithm for 64 bits. -func AP64(str []byte) uint64 { - var hash uint64 - for i := 0; i < len(str); i++ { - if (i & 1) == 0 { - hash ^= (hash << 7) ^ uint64(str[i]) ^ (hash >> 3) - } else { - hash ^= ^((hash << 11) ^ uint64(str[i]) ^ (hash >> 5)) + 1 - } - } - return hash -} diff --git a/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_bkdr.go b/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_bkdr.go deleted file mode 100644 index 2f4cc969..00000000 --- a/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_bkdr.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package ghash - -// BKDR implements the classic BKDR hash algorithm for 32 bits. -func BKDR(str []byte) uint32 { - var ( - seed uint32 = 131 // 31 131 1313 13131 131313 etc.. - hash uint32 = 0 - ) - for i := 0; i < len(str); i++ { - hash = hash*seed + uint32(str[i]) - } - return hash -} - -// BKDR64 implements the classic BKDR hash algorithm for 64 bits. -func BKDR64(str []byte) uint64 { - var ( - seed uint64 = 131 // 31 131 1313 13131 131313 etc.. - hash uint64 = 0 - ) - for i := 0; i < len(str); i++ { - hash = hash*seed + uint64(str[i]) - } - return hash -} diff --git a/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_djb.go b/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_djb.go deleted file mode 100644 index da5f0646..00000000 --- a/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_djb.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package ghash - -// DJB implements the classic DJB hash algorithm for 32 bits. -func DJB(str []byte) uint32 { - var hash uint32 = 5381 - for i := 0; i < len(str); i++ { - hash += (hash << 5) + uint32(str[i]) - } - return hash -} - -// DJB64 implements the classic DJB hash algorithm for 64 bits. -func DJB64(str []byte) uint64 { - var hash uint64 = 5381 - for i := 0; i < len(str); i++ { - hash += (hash << 5) + uint64(str[i]) - } - return hash -} diff --git a/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_elf.go b/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_elf.go deleted file mode 100644 index 3562fc45..00000000 --- a/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_elf.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package ghash - -// ELF implements the classic ELF hash algorithm for 32 bits. -func ELF(str []byte) uint32 { - var ( - hash uint32 - x uint32 - ) - for i := 0; i < len(str); i++ { - hash = (hash << 4) + uint32(str[i]) - if x = hash & 0xF0000000; x != 0 { - hash ^= x >> 24 - hash &= ^x + 1 - } - } - return hash -} - -// ELF64 implements the classic ELF hash algorithm for 64 bits. -func ELF64(str []byte) uint64 { - var ( - hash uint64 - x uint64 - ) - for i := 0; i < len(str); i++ { - hash = (hash << 4) + uint64(str[i]) - if x = hash & 0xF000000000000000; x != 0 { - hash ^= x >> 24 - hash &= ^x + 1 - } - } - return hash -} diff --git a/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_jshash.go b/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_jshash.go deleted file mode 100644 index 91220c71..00000000 --- a/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_jshash.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package ghash - -// JS implements the classic JS hash algorithm for 32 bits. -func JS(str []byte) uint32 { - var hash uint32 = 1315423911 - for i := 0; i < len(str); i++ { - hash ^= (hash << 5) + uint32(str[i]) + (hash >> 2) - } - return hash -} - -// JS64 implements the classic JS hash algorithm for 64 bits. -func JS64(str []byte) uint64 { - var hash uint64 = 1315423911 - for i := 0; i < len(str); i++ { - hash ^= (hash << 5) + uint64(str[i]) + (hash >> 2) - } - return hash -} diff --git a/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_pjw.go b/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_pjw.go deleted file mode 100644 index 5b82ca1e..00000000 --- a/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_pjw.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package ghash - -// PJW implements the classic PJW hash algorithm for 32 bits. -func PJW(str []byte) uint32 { - var ( - BitsInUnsignedInt uint32 = 32 // 4 * 8 - ThreeQuarters = (BitsInUnsignedInt * 3) / 4 - OneEighth = BitsInUnsignedInt / 8 - HighBits uint32 = (0xFFFFFFFF) << (BitsInUnsignedInt - OneEighth) - hash uint32 - test uint32 - ) - for i := 0; i < len(str); i++ { - hash = (hash << OneEighth) + uint32(str[i]) - if test = hash & HighBits; test != 0 { - hash = (hash ^ (test >> ThreeQuarters)) & (^HighBits + 1) - } - } - return hash -} - -// PJW64 implements the classic PJW hash algorithm for 64 bits. -func PJW64(str []byte) uint64 { - var ( - BitsInUnsignedInt uint64 = 32 // 4 * 8 - ThreeQuarters = (BitsInUnsignedInt * 3) / 4 - OneEighth = BitsInUnsignedInt / 8 - HighBits uint64 = (0xFFFFFFFFFFFFFFFF) << (BitsInUnsignedInt - OneEighth) - hash uint64 - test uint64 - ) - for i := 0; i < len(str); i++ { - hash = (hash << OneEighth) + uint64(str[i]) - if test = hash & HighBits; test != 0 { - hash = (hash ^ (test >> ThreeQuarters)) & (^HighBits + 1) - } - } - return hash -} diff --git a/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_rs.go b/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_rs.go deleted file mode 100644 index e9e95563..00000000 --- a/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_rs.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package ghash - -// RS implements the classic RS hash algorithm for 32 bits. -func RS(str []byte) uint32 { - var ( - b uint32 = 378551 - a uint32 = 63689 - hash uint32 = 0 - ) - for i := 0; i < len(str); i++ { - hash = hash*a + uint32(str[i]) - a *= b - } - return hash -} - -// RS64 implements the classic RS hash algorithm for 64 bits. -func RS64(str []byte) uint64 { - var ( - b uint64 = 378551 - a uint64 = 63689 - hash uint64 = 0 - ) - for i := 0; i < len(str); i++ { - hash = hash*a + uint64(str[i]) - a *= b - } - return hash -} diff --git a/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_sdbm.go b/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_sdbm.go deleted file mode 100644 index bbda9437..00000000 --- a/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_sdbm.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package ghash - -// SDBM implements the classic SDBM hash algorithm for 32 bits. -func SDBM(str []byte) uint32 { - var hash uint32 - for i := 0; i < len(str); i++ { - // equivalent to: hash = 65599*hash + uint32(str[i]); - hash = uint32(str[i]) + (hash << 6) + (hash << 16) - hash - } - return hash -} - -// SDBM64 implements the classic SDBM hash algorithm for 64 bits. -func SDBM64(str []byte) uint64 { - var hash uint64 - for i := 0; i < len(str); i++ { - // equivalent to: hash = 65599*hash + uint32(str[i]) - hash = uint64(str[i]) + (hash << 6) + (hash << 16) - hash - } - return hash -} diff --git a/vendor/github.com/gogf/gf/v2/errors/gcode/gcode.go b/vendor/github.com/gogf/gf/v2/errors/gcode/gcode.go deleted file mode 100644 index b3bb1b37..00000000 --- a/vendor/github.com/gogf/gf/v2/errors/gcode/gcode.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright GoFrame gf Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package gcode provides universal error code definition and common error codes implements. -package gcode - -// Code is universal error code interface definition. -type Code interface { - // Code returns the integer number of current error code. - Code() int - - // Message returns the brief message for current error code. - Message() string - - // Detail returns the detailed information of current error code, - // which is mainly designed as an extension field for error code. - Detail() interface{} -} - -// ================================================================================================================ -// Common error code definition. -// There are reserved internal error code by framework: code < 1000. -// ================================================================================================================ - -var ( - CodeNil = localCode{-1, "", nil} // No error code specified. - CodeOK = localCode{0, "OK", nil} // It is OK. - CodeInternalError = localCode{50, "Internal Error", nil} // An error occurred internally. - CodeValidationFailed = localCode{51, "Validation Failed", nil} // Data validation failed. - CodeDbOperationError = localCode{52, "Database Operation Error", nil} // Database operation error. - CodeInvalidParameter = localCode{53, "Invalid Parameter", nil} // The given parameter for current operation is invalid. - CodeMissingParameter = localCode{54, "Missing Parameter", nil} // Parameter for current operation is missing. - CodeInvalidOperation = localCode{55, "Invalid Operation", nil} // The function cannot be used like this. - CodeInvalidConfiguration = localCode{56, "Invalid Configuration", nil} // The configuration is invalid for current operation. - CodeMissingConfiguration = localCode{57, "Missing Configuration", nil} // The configuration is missing for current operation. - CodeNotImplemented = localCode{58, "Not Implemented", nil} // The operation is not implemented yet. - CodeNotSupported = localCode{59, "Not Supported", nil} // The operation is not supported yet. - CodeOperationFailed = localCode{60, "Operation Failed", nil} // I tried, but I cannot give you what you want. - CodeNotAuthorized = localCode{61, "Not Authorized", nil} // Not Authorized. - CodeSecurityReason = localCode{62, "Security Reason", nil} // Security Reason. - CodeServerBusy = localCode{63, "Server Is Busy", nil} // Server is busy, please try again later. - CodeUnknown = localCode{64, "Unknown Error", nil} // Unknown error. - CodeNotFound = localCode{65, "Not Found", nil} // Resource does not exist. - CodeInvalidRequest = localCode{66, "Invalid Request", nil} // Invalid request. - CodeNecessaryPackageNotImport = localCode{67, "Necessary Package Not Import", nil} // It needs necessary package import. - CodeInternalPanic = localCode{68, "Internal Panic", nil} // An panic occurred internally. - CodeBusinessValidationFailed = localCode{300, "Business Validation Failed", nil} // Business validation failed. -) - -// New creates and returns an error code. -// Note that it returns an interface object of Code. -func New(code int, message string, detail interface{}) Code { - return localCode{ - code: code, - message: message, - detail: detail, - } -} - -// WithCode creates and returns a new error code based on given Code. -// The code and message is from given `code`, but the detail if from given `detail`. -func WithCode(code Code, detail interface{}) Code { - return localCode{ - code: code.Code(), - message: code.Message(), - detail: detail, - } -} diff --git a/vendor/github.com/gogf/gf/v2/errors/gcode/gcode_local.go b/vendor/github.com/gogf/gf/v2/errors/gcode/gcode_local.go deleted file mode 100644 index 1ec1d1ea..00000000 --- a/vendor/github.com/gogf/gf/v2/errors/gcode/gcode_local.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright GoFrame gf Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gcode - -import "fmt" - -// localCode is an implementer for interface Code for internal usage only. -type localCode struct { - code int // Error code, usually an integer. - message string // Brief message for this error code. - detail interface{} // As type of interface, it is mainly designed as an extension field for error code. -} - -// Code returns the integer number of current error code. -func (c localCode) Code() int { - return c.code -} - -// Message returns the brief message for current error code. -func (c localCode) Message() string { - return c.message -} - -// Detail returns the detailed information of current error code, -// which is mainly designed as an extension field for error code. -func (c localCode) Detail() interface{} { - return c.detail -} - -// String returns current error code as a string. -func (c localCode) String() string { - if c.detail != nil { - return fmt.Sprintf(`%d:%s %v`, c.code, c.message, c.detail) - } - if c.message != "" { - return fmt.Sprintf(`%d:%s`, c.code, c.message) - } - return fmt.Sprintf(`%d`, c.code) -} diff --git a/vendor/github.com/gogf/gf/v2/errors/gerror/gerror.go b/vendor/github.com/gogf/gf/v2/errors/gerror/gerror.go deleted file mode 100644 index 6bb4614c..00000000 --- a/vendor/github.com/gogf/gf/v2/errors/gerror/gerror.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright GoFrame gf Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package gerror provides rich functionalities to manipulate errors. -// -// For maintainers, please very note that, -// this package is quite a basic package, which SHOULD NOT import extra packages -// except standard packages and internal packages, to avoid cycle imports. -package gerror - -import ( - "github.com/gogf/gf/v2/errors/gcode" - "github.com/gogf/gf/v2/internal/command" -) - -// IIs is the interface for Is feature. -type IIs interface { - Error() string - Is(target error) bool -} - -// IEqual is the interface for Equal feature. -type IEqual interface { - Error() string - Equal(target error) bool -} - -// ICode is the interface for Code feature. -type ICode interface { - Error() string - Code() gcode.Code -} - -// IStack is the interface for Stack feature. -type IStack interface { - Error() string - Stack() string -} - -// ICause is the interface for Cause feature. -type ICause interface { - Error() string - Cause() error -} - -// ICurrent is the interface for Current feature. -type ICurrent interface { - Error() string - Current() error -} - -// IUnwrap is the interface for Unwrap feature. -type IUnwrap interface { - Error() string - Unwrap() error -} - -const ( - // commandEnvKeyForBrief is the command environment name for switch key for brief error stack. - commandEnvKeyForBrief = "gf.gerror.brief" - - // commaSeparatorSpace is the comma separator with space. - commaSeparatorSpace = ", " -) - -var ( - // isUsingBriefStack is the switch key for brief error stack. - isUsingBriefStack bool -) - -func init() { - value := command.GetOptWithEnv(commandEnvKeyForBrief) - if value == "1" || value == "true" { - isUsingBriefStack = true - } -} diff --git a/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_api.go b/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_api.go deleted file mode 100644 index 9f6a8c9e..00000000 --- a/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_api.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gerror - -import ( - "fmt" - - "github.com/gogf/gf/v2/errors/gcode" -) - -// New creates and returns an error which is formatted from given text. -func New(text string) error { - return &Error{ - stack: callers(), - text: text, - code: gcode.CodeNil, - } -} - -// Newf returns an error that formats as the given format and args. -func Newf(format string, args ...interface{}) error { - return &Error{ - stack: callers(), - text: fmt.Sprintf(format, args...), - code: gcode.CodeNil, - } -} - -// NewSkip creates and returns an error which is formatted from given text. -// The parameter `skip` specifies the stack callers skipped amount. -func NewSkip(skip int, text string) error { - return &Error{ - stack: callers(skip), - text: text, - code: gcode.CodeNil, - } -} - -// NewSkipf returns an error that formats as the given format and args. -// The parameter `skip` specifies the stack callers skipped amount. -func NewSkipf(skip int, format string, args ...interface{}) error { - return &Error{ - stack: callers(skip), - text: fmt.Sprintf(format, args...), - code: gcode.CodeNil, - } -} - -// Wrap wraps error with text. It returns nil if given err is nil. -// Note that it does not lose the error code of wrapped error, as it inherits the error code from it. -func Wrap(err error, text string) error { - if err == nil { - return nil - } - return &Error{ - error: err, - stack: callers(), - text: text, - code: Code(err), - } -} - -// Wrapf returns an error annotating err with a stack trace at the point Wrapf is called, and the format specifier. -// It returns nil if given `err` is nil. -// Note that it does not lose the error code of wrapped error, as it inherits the error code from it. -func Wrapf(err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - return &Error{ - error: err, - stack: callers(), - text: fmt.Sprintf(format, args...), - code: Code(err), - } -} - -// WrapSkip wraps error with text. It returns nil if given err is nil. -// The parameter `skip` specifies the stack callers skipped amount. -// Note that it does not lose the error code of wrapped error, as it inherits the error code from it. -func WrapSkip(skip int, err error, text string) error { - if err == nil { - return nil - } - return &Error{ - error: err, - stack: callers(skip), - text: text, - code: Code(err), - } -} - -// WrapSkipf wraps error with text that is formatted with given format and args. It returns nil if given err is nil. -// The parameter `skip` specifies the stack callers skipped amount. -// Note that it does not lose the error code of wrapped error, as it inherits the error code from it. -func WrapSkipf(skip int, err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - return &Error{ - error: err, - stack: callers(skip), - text: fmt.Sprintf(format, args...), - code: Code(err), - } -} diff --git a/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_api_code.go b/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_api_code.go deleted file mode 100644 index e4e4a2b6..00000000 --- a/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_api_code.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gerror - -import ( - "fmt" - "strings" - - "github.com/gogf/gf/v2/errors/gcode" -) - -// NewCode creates and returns an error that has error code and given text. -func NewCode(code gcode.Code, text ...string) error { - return &Error{ - stack: callers(), - text: strings.Join(text, commaSeparatorSpace), - code: code, - } -} - -// NewCodef returns an error that has error code and formats as the given format and args. -func NewCodef(code gcode.Code, format string, args ...interface{}) error { - return &Error{ - stack: callers(), - text: fmt.Sprintf(format, args...), - code: code, - } -} - -// NewCodeSkip creates and returns an error which has error code and is formatted from given text. -// The parameter `skip` specifies the stack callers skipped amount. -func NewCodeSkip(code gcode.Code, skip int, text ...string) error { - return &Error{ - stack: callers(skip), - text: strings.Join(text, commaSeparatorSpace), - code: code, - } -} - -// NewCodeSkipf returns an error that has error code and formats as the given format and args. -// The parameter `skip` specifies the stack callers skipped amount. -func NewCodeSkipf(code gcode.Code, skip int, format string, args ...interface{}) error { - return &Error{ - stack: callers(skip), - text: fmt.Sprintf(format, args...), - code: code, - } -} - -// WrapCode wraps error with code and text. -// It returns nil if given err is nil. -func WrapCode(code gcode.Code, err error, text ...string) error { - if err == nil { - return nil - } - return &Error{ - error: err, - stack: callers(), - text: strings.Join(text, commaSeparatorSpace), - code: code, - } -} - -// WrapCodef wraps error with code and format specifier. -// It returns nil if given `err` is nil. -func WrapCodef(code gcode.Code, err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - return &Error{ - error: err, - stack: callers(), - text: fmt.Sprintf(format, args...), - code: code, - } -} - -// WrapCodeSkip wraps error with code and text. -// It returns nil if given err is nil. -// The parameter `skip` specifies the stack callers skipped amount. -func WrapCodeSkip(code gcode.Code, skip int, err error, text ...string) error { - if err == nil { - return nil - } - return &Error{ - error: err, - stack: callers(skip), - text: strings.Join(text, commaSeparatorSpace), - code: code, - } -} - -// WrapCodeSkipf wraps error with code and text that is formatted with given format and args. -// It returns nil if given err is nil. -// The parameter `skip` specifies the stack callers skipped amount. -func WrapCodeSkipf(code gcode.Code, skip int, err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - return &Error{ - error: err, - stack: callers(skip), - text: fmt.Sprintf(format, args...), - code: code, - } -} - -// Code returns the error code of current error. -// It returns `CodeNil` if it has no error code neither it does not implement interface Code. -func Code(err error) gcode.Code { - if err == nil { - return gcode.CodeNil - } - if e, ok := err.(ICode); ok { - return e.Code() - } - if e, ok := err.(IUnwrap); ok { - return Code(e.Unwrap()) - } - return gcode.CodeNil -} - -// HasCode checks and reports whether `err` has `code` in its chaining errors. -func HasCode(err error, code gcode.Code) bool { - if err == nil { - return false - } - if e, ok := err.(ICode); ok { - return code == e.Code() - } - if e, ok := err.(IUnwrap); ok { - return HasCode(e.Unwrap(), code) - } - return false -} diff --git a/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_api_option.go b/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_api_option.go deleted file mode 100644 index 33ed881f..00000000 --- a/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_api_option.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gerror - -import "github.com/gogf/gf/v2/errors/gcode" - -// Option is option for creating error. -type Option struct { - Error error // Wrapped error if any. - Stack bool // Whether recording stack information into error. - Text string // Error text, which is created by New* functions. - Code gcode.Code // Error code if necessary. -} - -// NewOption creates and returns a custom error with Option. -// It is the senior usage for creating error, which is often used internally in framework. -func NewOption(option Option) error { - err := &Error{ - error: option.Error, - text: option.Text, - code: option.Code, - } - if option.Stack { - err.stack = callers() - } - return err -} diff --git a/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_api_stack.go b/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_api_stack.go deleted file mode 100644 index 79b4d6b0..00000000 --- a/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_api_stack.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gerror - -import ( - "runtime" -) - -// stack represents a stack of program counters. -type stack []uintptr - -const ( - // maxStackDepth marks the max stack depth for error back traces. - maxStackDepth = 64 -) - -// Cause returns the root cause error of `err`. -func Cause(err error) error { - if err == nil { - return nil - } - if e, ok := err.(ICause); ok { - return e.Cause() - } - if e, ok := err.(IUnwrap); ok { - return Cause(e.Unwrap()) - } - return err -} - -// Stack returns the stack callers as string. -// It returns the error string directly if the `err` does not support stacks. -func Stack(err error) string { - if err == nil { - return "" - } - if e, ok := err.(IStack); ok { - return e.Stack() - } - return err.Error() -} - -// Current creates and returns the current level error. -// It returns nil if current level error is nil. -func Current(err error) error { - if err == nil { - return nil - } - if e, ok := err.(ICurrent); ok { - return e.Current() - } - return err -} - -// Unwrap returns the next level error. -// It returns nil if current level error or the next level error is nil. -func Unwrap(err error) error { - if err == nil { - return nil - } - if e, ok := err.(IUnwrap); ok { - return e.Unwrap() - } - return nil -} - -// HasStack checks and reports whether `err` implemented interface `gerror.IStack`. -func HasStack(err error) bool { - _, ok := err.(IStack) - return ok -} - -// Equal reports whether current error `err` equals to error `target`. -// Please note that, in default comparison logic for `Error`, -// the errors are considered the same if both the `code` and `text` of them are the same. -func Equal(err, target error) bool { - if err == target { - return true - } - if e, ok := err.(IEqual); ok { - return e.Equal(target) - } - if e, ok := target.(IEqual); ok { - return e.Equal(err) - } - return false -} - -// Is reports whether current error `err` has error `target` in its chaining errors. -// It is just for implements for stdlib errors.Is from Go version 1.17. -func Is(err, target error) bool { - if e, ok := err.(IIs); ok { - return e.Is(target) - } - return false -} - -// HasError is alias of Is, which more easily understanding semantics. -func HasError(err, target error) bool { - return Is(err, target) -} - -// callers returns the stack callers. -// Note that it here just retrieves the caller memory address array not the caller information. -func callers(skip ...int) stack { - var ( - pcs [maxStackDepth]uintptr - n = 3 - ) - if len(skip) > 0 { - n += skip[0] - } - return pcs[:runtime.Callers(n, pcs[:])] -} diff --git a/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error.go b/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error.go deleted file mode 100644 index b05bfd1f..00000000 --- a/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gerror - -import ( - "errors" - "fmt" - "runtime" - "strings" - - "github.com/gogf/gf/v2/errors/gcode" -) - -// Error is custom error for additional features. -type Error struct { - error error // Wrapped error. - stack stack // Stack array, which records the stack information when this error is created or wrapped. - text string // Custom Error text when Error is created, might be empty when its code is not nil. - code gcode.Code // Error code if necessary. -} - -const ( - // Filtering key for current error module paths. - stackFilterKeyLocal = "/errors/gerror/gerror" -) - -var ( - // goRootForFilter is used for stack filtering in development environment purpose. - goRootForFilter = runtime.GOROOT() -) - -func init() { - if goRootForFilter != "" { - goRootForFilter = strings.ReplaceAll(goRootForFilter, "\\", "/") - } -} - -// Error implements the interface of Error, it returns all the error as string. -func (err *Error) Error() string { - if err == nil { - return "" - } - errStr := err.text - if errStr == "" && err.code != nil { - errStr = err.code.Message() - } - if err.error != nil { - if errStr != "" { - errStr += ": " - } - errStr += err.error.Error() - } - return errStr -} - -// Cause returns the root cause error. -func (err *Error) Cause() error { - if err == nil { - return nil - } - loop := err - for loop != nil { - if loop.error != nil { - if e, ok := loop.error.(*Error); ok { - // Internal Error struct. - loop = e - } else if e, ok := loop.error.(ICause); ok { - // Other Error that implements ApiCause interface. - return e.Cause() - } else { - return loop.error - } - } else { - // return loop - // - // To be compatible with Case of https://github.com/pkg/errors. - return errors.New(loop.text) - } - } - return nil -} - -// Current creates and returns the current level error. -// It returns nil if current level error is nil. -func (err *Error) Current() error { - if err == nil { - return nil - } - return &Error{ - error: nil, - stack: err.stack, - text: err.text, - code: err.code, - } -} - -// Unwrap is alias of function `Next`. -// It is just for implements for stdlib errors.Unwrap from Go version 1.17. -func (err *Error) Unwrap() error { - if err == nil { - return nil - } - return err.error -} - -// Equal reports whether current error `err` equals to error `target`. -// Please note that, in default comparison for `Error`, -// the errors are considered the same if both the `code` and `text` of them are the same. -func (err *Error) Equal(target error) bool { - if err == target { - return true - } - // Code should be the same. - // Note that if both errors have `nil` code, they are also considered equal. - if err.code != Code(target) { - return false - } - // Text should be the same. - if err.text != fmt.Sprintf(`%-s`, target) { - return false - } - return true -} - -// Is reports whether current error `err` has error `target` in its chaining errors. -// It is just for implements for stdlib errors.Is from Go version 1.17. -func (err *Error) Is(target error) bool { - if Equal(err, target) { - return true - } - nextErr := err.Unwrap() - if nextErr == nil { - return false - } - if Equal(nextErr, target) { - return true - } - if e, ok := nextErr.(IIs); ok { - return e.Is(target) - } - return false -} diff --git a/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error_code.go b/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error_code.go deleted file mode 100644 index 1000e9f9..00000000 --- a/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error_code.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gerror - -import ( - "github.com/gogf/gf/v2/errors/gcode" -) - -// Code returns the error code. -// It returns CodeNil if it has no error code. -func (err *Error) Code() gcode.Code { - if err == nil { - return gcode.CodeNil - } - if err.code == gcode.CodeNil { - return Code(err.Unwrap()) - } - return err.code -} - -// SetCode updates the internal code with given code. -func (err *Error) SetCode(code gcode.Code) { - if err == nil { - return - } - err.code = code -} diff --git a/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error_format.go b/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error_format.go deleted file mode 100644 index 16be393e..00000000 --- a/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error_format.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gerror - -import ( - "fmt" - "io" -) - -// Format formats the frame according to the fmt.Formatter interface. -// -// %v, %s : Print all the error string; -// %-v, %-s : Print current level error string; -// %+s : Print full stack error list; -// %+v : Print the error string and full stack error list -func (err *Error) Format(s fmt.State, verb rune) { - switch verb { - case 's', 'v': - switch { - case s.Flag('-'): - if err.text != "" { - _, _ = io.WriteString(s, err.text) - } else { - _, _ = io.WriteString(s, err.Error()) - } - case s.Flag('+'): - if verb == 's' { - _, _ = io.WriteString(s, err.Stack()) - } else { - _, _ = io.WriteString(s, err.Error()+"\n"+err.Stack()) - } - default: - _, _ = io.WriteString(s, err.Error()) - } - } -} diff --git a/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error_json.go b/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error_json.go deleted file mode 100644 index 5c290d7a..00000000 --- a/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error_json.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gerror - -// MarshalJSON implements the interface MarshalJSON for json.Marshal. -// Note that do not use pointer as its receiver here. -func (err Error) MarshalJSON() ([]byte, error) { - return []byte(`"` + err.Error() + `"`), nil -} diff --git a/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error_stack.go b/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error_stack.go deleted file mode 100644 index 598d8cac..00000000 --- a/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error_stack.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gerror - -import ( - "bytes" - "container/list" - "fmt" - "runtime" - "strings" - - "github.com/gogf/gf/v2/internal/consts" -) - -// stackInfo manages stack info of certain error. -type stackInfo struct { - Index int // Index is the index of current error in whole error stacks. - Message string // Error information string. - Lines *list.List // Lines contains all error stack lines of current error stack in sequence. -} - -// stackLine manages each line info of stack. -type stackLine struct { - Function string // Function name, which contains its full package path. - FileLine string // FileLine is the source file name and its line number of Function. -} - -// Stack returns the error stack information as string. -func (err *Error) Stack() string { - if err == nil { - return "" - } - var ( - loop = err - index = 1 - infos []*stackInfo - ) - for loop != nil { - info := &stackInfo{ - Index: index, - Message: fmt.Sprintf("%-v", loop), - } - index++ - infos = append(infos, info) - loopLinesOfStackInfo(loop.stack, info) - if loop.error != nil { - if e, ok := loop.error.(*Error); ok { - loop = e - } else { - infos = append(infos, &stackInfo{ - Index: index, - Message: loop.error.Error(), - }) - index++ - break - } - } else { - break - } - } - filterLinesOfStackInfos(infos) - return formatStackInfos(infos) -} - -// filterLinesOfStackInfos removes repeated lines, which exist in subsequent stacks, from top errors. -func filterLinesOfStackInfos(infos []*stackInfo) { - var ( - ok bool - set = make(map[string]struct{}) - info *stackInfo - line *stackLine - removes []*list.Element - ) - for i := len(infos) - 1; i >= 0; i-- { - info = infos[i] - if info.Lines == nil { - continue - } - for n, e := 0, info.Lines.Front(); n < info.Lines.Len(); n, e = n+1, e.Next() { - line = e.Value.(*stackLine) - if _, ok = set[line.FileLine]; ok { - removes = append(removes, e) - } else { - set[line.FileLine] = struct{}{} - } - } - if len(removes) > 0 { - for _, e := range removes { - info.Lines.Remove(e) - } - } - removes = removes[:0] - } -} - -// formatStackInfos formats and returns error stack information as string. -func formatStackInfos(infos []*stackInfo) string { - var buffer = bytes.NewBuffer(nil) - for i, info := range infos { - buffer.WriteString(fmt.Sprintf("%d. %s\n", i+1, info.Message)) - if info.Lines != nil && info.Lines.Len() > 0 { - formatStackLines(buffer, info.Lines) - } - } - return buffer.String() -} - -// formatStackLines formats and returns error stack lines as string. -func formatStackLines(buffer *bytes.Buffer, lines *list.List) string { - var ( - line *stackLine - space = " " - length = lines.Len() - ) - for i, e := 0, lines.Front(); i < length; i, e = i+1, e.Next() { - line = e.Value.(*stackLine) - // Graceful indent. - if i >= 9 { - space = " " - } - buffer.WriteString(fmt.Sprintf( - " %d).%s%s\n %s\n", - i+1, space, line.Function, line.FileLine, - )) - } - return buffer.String() -} - -// loopLinesOfStackInfo iterates the stack info lines and produces the stack line info. -func loopLinesOfStackInfo(st stack, info *stackInfo) { - if st == nil { - return - } - for _, p := range st { - if fn := runtime.FuncForPC(p - 1); fn != nil { - file, line := fn.FileLine(p - 1) - if isUsingBriefStack { - // filter whole GoFrame packages stack paths. - if strings.Contains(file, consts.StackFilterKeyForGoFrame) { - continue - } - } else { - // package path stack filtering. - if strings.Contains(file, stackFilterKeyLocal) { - continue - } - } - // Avoid stack string like "`autogenerated`" - if strings.Contains(file, "<") { - continue - } - // Ignore GO ROOT paths. - if goRootForFilter != "" && - len(file) >= len(goRootForFilter) && - file[0:len(goRootForFilter)] == goRootForFilter { - continue - } - if info.Lines == nil { - info.Lines = list.New() - } - info.Lines.PushBack(&stackLine{ - Function: fn.Name(), - FileLine: fmt.Sprintf(`%s:%d`, file, line), - }) - } - } -} diff --git a/vendor/github.com/gogf/gf/v2/internal/command/command.go b/vendor/github.com/gogf/gf/v2/internal/command/command.go deleted file mode 100644 index 688201eb..00000000 --- a/vendor/github.com/gogf/gf/v2/internal/command/command.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. -// - -// Package command provides console operations, like options/arguments reading. -package command - -import ( - "os" - "regexp" - "strings" -) - -var ( - defaultParsedArgs = make([]string, 0) - defaultParsedOptions = make(map[string]string) - argumentRegex = regexp.MustCompile(`^\-{1,2}([\w\?\.\-]+)(=){0,1}(.*)$`) -) - -// Init does custom initialization. -func Init(args ...string) { - if len(args) == 0 { - if len(defaultParsedArgs) == 0 && len(defaultParsedOptions) == 0 { - args = os.Args - } else { - return - } - } else { - defaultParsedArgs = make([]string, 0) - defaultParsedOptions = make(map[string]string) - } - // Parsing os.Args with default algorithm. - defaultParsedArgs, defaultParsedOptions = ParseUsingDefaultAlgorithm(args...) -} - -// ParseUsingDefaultAlgorithm parses arguments using default algorithm. -func ParseUsingDefaultAlgorithm(args ...string) (parsedArgs []string, parsedOptions map[string]string) { - parsedArgs = make([]string, 0) - parsedOptions = make(map[string]string) - for i := 0; i < len(args); { - array := argumentRegex.FindStringSubmatch(args[i]) - if len(array) > 2 { - if array[2] == "=" { - parsedOptions[array[1]] = array[3] - } else if i < len(args)-1 { - if len(args[i+1]) > 0 && args[i+1][0] == '-' { - // Eg: gf gen -d -n 1 - parsedOptions[array[1]] = array[3] - } else { - // Eg: gf gen -n 2 - parsedOptions[array[1]] = args[i+1] - i += 2 - continue - } - } else { - // Eg: gf gen -h - parsedOptions[array[1]] = array[3] - } - } else { - parsedArgs = append(parsedArgs, args[i]) - } - i++ - } - return -} - -// GetOpt returns the option value named `name`. -func GetOpt(name string, def ...string) string { - Init() - if v, ok := defaultParsedOptions[name]; ok { - return v - } - if len(def) > 0 { - return def[0] - } - return "" -} - -// GetOptAll returns all parsed options. -func GetOptAll() map[string]string { - Init() - return defaultParsedOptions -} - -// ContainsOpt checks whether option named `name` exist in the arguments. -func ContainsOpt(name string) bool { - Init() - _, ok := defaultParsedOptions[name] - return ok -} - -// GetArg returns the argument at `index`. -func GetArg(index int, def ...string) string { - Init() - if index < len(defaultParsedArgs) { - return defaultParsedArgs[index] - } - if len(def) > 0 { - return def[0] - } - return "" -} - -// GetArgAll returns all parsed arguments. -func GetArgAll() []string { - Init() - return defaultParsedArgs -} - -// GetOptWithEnv returns the command line argument of the specified `key`. -// If the argument does not exist, then it returns the environment variable with specified `key`. -// It returns the default value `def` if none of them exists. -// -// Fetching Rules: -// 1. Command line arguments are in lowercase format, eg: gf.package.variable; -// 2. Environment arguments are in uppercase format, eg: GF_PACKAGE_VARIABLE; -func GetOptWithEnv(key string, def ...string) string { - cmdKey := strings.ToLower(strings.ReplaceAll(key, "_", ".")) - if ContainsOpt(cmdKey) { - return GetOpt(cmdKey) - } else { - envKey := strings.ToUpper(strings.ReplaceAll(key, ".", "_")) - if r, ok := os.LookupEnv(envKey); ok { - return r - } else { - if len(def) > 0 { - return def[0] - } - } - } - return "" -} diff --git a/vendor/github.com/gogf/gf/v2/internal/consts/consts.go b/vendor/github.com/gogf/gf/v2/internal/consts/consts.go deleted file mode 100644 index 8b1bd37f..00000000 --- a/vendor/github.com/gogf/gf/v2/internal/consts/consts.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright GoFrame gf Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package consts defines constants that are shared all among packages of framework. -package consts - -const ( - ConfigNodeNameDatabase = "database" - ConfigNodeNameLogger = "logger" - ConfigNodeNameRedis = "redis" - ConfigNodeNameViewer = "viewer" - ConfigNodeNameServer = "server" // General version configuration item name. - ConfigNodeNameServerSecondary = "httpserver" // New version configuration item name support from v2. - - // StackFilterKeyForGoFrame is the stack filtering key for all GoFrame module paths. - // Eg: .../pkg/mod/github.com/gogf/gf/v2@v2.0.0-20211011134327-54dd11f51122/debug/gdebug/gdebug_caller.go - StackFilterKeyForGoFrame = "github.com/gogf/gf/" -) diff --git a/vendor/github.com/gogf/gf/v2/internal/deepcopy/deepcopy.go b/vendor/github.com/gogf/gf/v2/internal/deepcopy/deepcopy.go deleted file mode 100644 index e379f5fd..00000000 --- a/vendor/github.com/gogf/gf/v2/internal/deepcopy/deepcopy.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright GoFrame gf Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package deepcopy makes deep copies of things using reflection. -// -// This package is maintained from: https://github.com/mohae/deepcopy -package deepcopy - -import ( - "reflect" - "time" -) - -// Interface for delegating copy process to type -type Interface interface { - DeepCopy() interface{} -} - -// Copy creates a deep copy of whatever is passed to it and returns the copy -// in an interface{}. The returned value will need to be asserted to the -// correct type. -func Copy(src interface{}) interface{} { - if src == nil { - return nil - } - - // Copy by type assertion. - switch r := src.(type) { - case - int, int8, int16, int32, int64, - uint, uint8, uint16, uint32, uint64, - float32, float64, - complex64, complex128, - string, - bool: - return r - - default: - if v, ok := src.(Interface); ok { - return v.DeepCopy() - } - var ( - original = reflect.ValueOf(src) // Make the interface a reflect.Value - dst = reflect.New(original.Type()).Elem() // Make a copy of the same type as the original. - ) - // Recursively copy the original. - copyRecursive(original, dst) - // Return the copy as an interface. - return dst.Interface() - } -} - -// copyRecursive does the actual copying of the interface. It currently has -// limited support for what it can handle. Add as needed. -func copyRecursive(original, cpy reflect.Value) { - // check for implement deepcopy.Interface - if original.CanInterface() && original.IsValid() && !original.IsZero() { - if copier, ok := original.Interface().(Interface); ok { - cpy.Set(reflect.ValueOf(copier.DeepCopy())) - return - } - } - - // handle according to original's Kind - switch original.Kind() { - case reflect.Ptr: - // Get the actual value being pointed to. - originalValue := original.Elem() - - // if it isn't valid, return. - if !originalValue.IsValid() { - return - } - cpy.Set(reflect.New(originalValue.Type())) - copyRecursive(originalValue, cpy.Elem()) - - case reflect.Interface: - // If this is a nil, don't do anything - if original.IsNil() { - return - } - // Get the value for the interface, not the pointer. - originalValue := original.Elem() - - // Get the value by calling Elem(). - copyValue := reflect.New(originalValue.Type()).Elem() - copyRecursive(originalValue, copyValue) - cpy.Set(copyValue) - - case reflect.Struct: - t, ok := original.Interface().(time.Time) - if ok { - cpy.Set(reflect.ValueOf(t)) - return - } - // Go through each field of the struct and copy it. - for i := 0; i < original.NumField(); i++ { - // The Type's StructField for a given field is checked to see if StructField.PkgPath - // is set to determine if the field is exported or not because CanSet() returns false - // for settable fields. I'm not sure why. -mohae - if original.Type().Field(i).PkgPath != "" { - continue - } - copyRecursive(original.Field(i), cpy.Field(i)) - } - - case reflect.Slice: - if original.IsNil() { - return - } - // Make a new slice and copy each element. - cpy.Set(reflect.MakeSlice(original.Type(), original.Len(), original.Cap())) - for i := 0; i < original.Len(); i++ { - copyRecursive(original.Index(i), cpy.Index(i)) - } - - case reflect.Map: - if original.IsNil() { - return - } - cpy.Set(reflect.MakeMap(original.Type())) - for _, key := range original.MapKeys() { - originalValue := original.MapIndex(key) - copyValue := reflect.New(originalValue.Type()).Elem() - copyRecursive(originalValue, copyValue) - copyKey := Copy(key.Interface()) - cpy.SetMapIndex(reflect.ValueOf(copyKey), copyValue) - } - - default: - cpy.Set(original) - } -} diff --git a/vendor/github.com/gogf/gf/v2/internal/empty/empty.go b/vendor/github.com/gogf/gf/v2/internal/empty/empty.go deleted file mode 100644 index 07fee1e2..00000000 --- a/vendor/github.com/gogf/gf/v2/internal/empty/empty.go +++ /dev/null @@ -1,235 +0,0 @@ -// Copyright GoFrame gf Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package empty provides functions for checking empty/nil variables. -package empty - -import ( - "reflect" - "time" - - "github.com/gogf/gf/v2/internal/reflection" -) - -// iString is used for type assert api for String(). -type iString interface { - String() string -} - -// iInterfaces is used for type assert api for Interfaces. -type iInterfaces interface { - Interfaces() []interface{} -} - -// iMapStrAny is the interface support for converting struct parameter to map. -type iMapStrAny interface { - MapStrAny() map[string]interface{} -} - -type iTime interface { - Date() (year int, month time.Month, day int) - IsZero() bool -} - -// IsEmpty checks whether given `value` empty. -// It returns true if `value` is in: 0, nil, false, "", len(slice/map/chan) == 0, -// or else it returns false. -// -// The parameter `traceSource` is used for tracing to the source variable if given `value` is type of pointer -// that also points to a pointer. It returns true if the source is empty when `traceSource` is true. -// Note that it might use reflect feature which affects performance a little. -func IsEmpty(value interface{}, traceSource ...bool) bool { - if value == nil { - return true - } - // It firstly checks the variable as common types using assertion to enhance the performance, - // and then using reflection. - switch result := value.(type) { - case int: - return result == 0 - case int8: - return result == 0 - case int16: - return result == 0 - case int32: - return result == 0 - case int64: - return result == 0 - case uint: - return result == 0 - case uint8: - return result == 0 - case uint16: - return result == 0 - case uint32: - return result == 0 - case uint64: - return result == 0 - case float32: - return result == 0 - case float64: - return result == 0 - case bool: - return !result - case string: - return result == "" - case []byte: - return len(result) == 0 - case []rune: - return len(result) == 0 - case []int: - return len(result) == 0 - case []string: - return len(result) == 0 - case []float32: - return len(result) == 0 - case []float64: - return len(result) == 0 - case map[string]interface{}: - return len(result) == 0 - - default: - // Finally, using reflect. - var rv reflect.Value - if v, ok := value.(reflect.Value); ok { - rv = v - } else { - // ========================= - // Common interfaces checks. - // ========================= - if f, ok := value.(iTime); ok { - if f == (*time.Time)(nil) { - return true - } - return f.IsZero() - } - if f, ok := value.(iString); ok { - if f == nil { - return true - } - return f.String() == "" - } - if f, ok := value.(iInterfaces); ok { - if f == nil { - return true - } - return len(f.Interfaces()) == 0 - } - if f, ok := value.(iMapStrAny); ok { - if f == nil { - return true - } - return len(f.MapStrAny()) == 0 - } - - rv = reflect.ValueOf(value) - } - - switch rv.Kind() { - case reflect.Bool: - return !rv.Bool() - - case - reflect.Int, - reflect.Int8, - reflect.Int16, - reflect.Int32, - reflect.Int64: - return rv.Int() == 0 - - case - reflect.Uint, - reflect.Uint8, - reflect.Uint16, - reflect.Uint32, - reflect.Uint64, - reflect.Uintptr: - return rv.Uint() == 0 - - case - reflect.Float32, - reflect.Float64: - return rv.Float() == 0 - - case reflect.String: - return rv.Len() == 0 - - case reflect.Struct: - var fieldValueInterface interface{} - for i := 0; i < rv.NumField(); i++ { - fieldValueInterface, _ = reflection.ValueToInterface(rv.Field(i)) - if !IsEmpty(fieldValueInterface) { - return false - } - } - return true - - case - reflect.Chan, - reflect.Map, - reflect.Slice, - reflect.Array: - return rv.Len() == 0 - - case reflect.Ptr: - if len(traceSource) > 0 && traceSource[0] { - return IsEmpty(rv.Elem()) - } - return rv.IsNil() - - case - reflect.Func, - reflect.Interface, - reflect.UnsafePointer: - return rv.IsNil() - - case reflect.Invalid: - return true - } - } - return false -} - -// IsNil checks whether given `value` is nil, especially for interface{} type value. -// Parameter `traceSource` is used for tracing to the source variable if given `value` is type of pointer -// that also points to a pointer. It returns nil if the source is nil when `traceSource` is true. -// Note that it might use reflect feature which affects performance a little. -func IsNil(value interface{}, traceSource ...bool) bool { - if value == nil { - return true - } - var rv reflect.Value - if v, ok := value.(reflect.Value); ok { - rv = v - } else { - rv = reflect.ValueOf(value) - } - switch rv.Kind() { - case reflect.Chan, - reflect.Map, - reflect.Slice, - reflect.Func, - reflect.Interface, - reflect.UnsafePointer: - return !rv.IsValid() || rv.IsNil() - - case reflect.Ptr: - if len(traceSource) > 0 && traceSource[0] { - for rv.Kind() == reflect.Ptr { - rv = rv.Elem() - } - if !rv.IsValid() { - return true - } - if rv.Kind() == reflect.Ptr { - return rv.IsNil() - } - } else { - return !rv.IsValid() || rv.IsNil() - } - } - return false -} diff --git a/vendor/github.com/gogf/gf/v2/internal/intlog/intlog.go b/vendor/github.com/gogf/gf/v2/internal/intlog/intlog.go deleted file mode 100644 index e86380dc..00000000 --- a/vendor/github.com/gogf/gf/v2/internal/intlog/intlog.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package intlog provides internal logging for GoFrame development usage only. -package intlog - -import ( - "bytes" - "context" - "fmt" - "path/filepath" - "time" - - "go.opentelemetry.io/otel/trace" - - "github.com/gogf/gf/v2/debug/gdebug" - "github.com/gogf/gf/v2/internal/utils" -) - -const ( - stackFilterKey = "/internal/intlog" -) - -// Print prints `v` with newline using fmt.Println. -// The parameter `v` can be multiple variables. -func Print(ctx context.Context, v ...interface{}) { - if !utils.IsDebugEnabled() { - return - } - doPrint(ctx, fmt.Sprint(v...), false) -} - -// Printf prints `v` with format `format` using fmt.Printf. -// The parameter `v` can be multiple variables. -func Printf(ctx context.Context, format string, v ...interface{}) { - if !utils.IsDebugEnabled() { - return - } - doPrint(ctx, fmt.Sprintf(format, v...), false) -} - -// Error prints `v` with newline using fmt.Println. -// The parameter `v` can be multiple variables. -func Error(ctx context.Context, v ...interface{}) { - if !utils.IsDebugEnabled() { - return - } - doPrint(ctx, fmt.Sprint(v...), true) -} - -// Errorf prints `v` with format `format` using fmt.Printf. -func Errorf(ctx context.Context, format string, v ...interface{}) { - if !utils.IsDebugEnabled() { - return - } - doPrint(ctx, fmt.Sprintf(format, v...), true) -} - -// PrintFunc prints the output from function `f`. -// It only calls function `f` if debug mode is enabled. -func PrintFunc(ctx context.Context, f func() string) { - if !utils.IsDebugEnabled() { - return - } - s := f() - if s == "" { - return - } - doPrint(ctx, s, false) -} - -// ErrorFunc prints the output from function `f`. -// It only calls function `f` if debug mode is enabled. -func ErrorFunc(ctx context.Context, f func() string) { - if !utils.IsDebugEnabled() { - return - } - s := f() - if s == "" { - return - } - doPrint(ctx, s, true) -} - -func doPrint(ctx context.Context, content string, stack bool) { - if !utils.IsDebugEnabled() { - return - } - buffer := bytes.NewBuffer(nil) - buffer.WriteString(time.Now().Format("2006-01-02 15:04:05.000")) - buffer.WriteString(" [INTE] ") - buffer.WriteString(file()) - buffer.WriteString(" ") - if s := traceIdStr(ctx); s != "" { - buffer.WriteString(s + " ") - } - buffer.WriteString(content) - buffer.WriteString("\n") - if stack { - buffer.WriteString("Caller Stack:\n") - buffer.WriteString(gdebug.StackWithFilter([]string{stackFilterKey})) - } - fmt.Print(buffer.String()) -} - -// traceIdStr retrieves and returns the trace id string for logging output. -func traceIdStr(ctx context.Context) string { - if ctx == nil { - return "" - } - spanCtx := trace.SpanContextFromContext(ctx) - if traceId := spanCtx.TraceID(); traceId.IsValid() { - return "{" + traceId.String() + "}" - } - return "" -} - -// file returns caller file name along with its line number. -func file() string { - _, p, l := gdebug.CallerWithFilter([]string{stackFilterKey}) - return fmt.Sprintf(`%s:%d`, filepath.Base(p), l) -} diff --git a/vendor/github.com/gogf/gf/v2/internal/json/json.go b/vendor/github.com/gogf/gf/v2/internal/json/json.go deleted file mode 100644 index 374aec68..00000000 --- a/vendor/github.com/gogf/gf/v2/internal/json/json.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package json provides json operations wrapping ignoring stdlib or third-party lib json. -package json - -import ( - "bytes" - "encoding/json" - "io" - - "github.com/gogf/gf/v2/errors/gerror" -) - -// RawMessage is a raw encoded JSON value. -// It implements Marshaler and Unmarshaler and can -// be used to delay JSON decoding or precompute a JSON encoding. -type RawMessage = json.RawMessage - -// Marshal adapts to json/encoding Marshal API. -// -// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API -// Refer to https://godoc.org/encoding/json#Marshal for more information. -func Marshal(v interface{}) (marshaledBytes []byte, err error) { - marshaledBytes, err = json.Marshal(v) - if err != nil { - err = gerror.Wrap(err, `json.Marshal failed`) - } - return -} - -// MarshalIndent same as json.MarshalIndent. -func MarshalIndent(v interface{}, prefix, indent string) (marshaledBytes []byte, err error) { - marshaledBytes, err = json.MarshalIndent(v, prefix, indent) - if err != nil { - err = gerror.Wrap(err, `json.MarshalIndent failed`) - } - return -} - -// Unmarshal adapts to json/encoding Unmarshal API -// -// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v. -// Refer to https://godoc.org/encoding/json#Unmarshal for more information. -func Unmarshal(data []byte, v interface{}) (err error) { - err = json.Unmarshal(data, v) - if err != nil { - err = gerror.Wrap(err, `json.Unmarshal failed`) - } - return -} - -// UnmarshalUseNumber decodes the json data bytes to target interface using number option. -func UnmarshalUseNumber(data []byte, v interface{}) (err error) { - decoder := NewDecoder(bytes.NewReader(data)) - decoder.UseNumber() - err = decoder.Decode(v) - if err != nil { - err = gerror.Wrap(err, `json.UnmarshalUseNumber failed`) - } - return -} - -// NewEncoder same as json.NewEncoder -func NewEncoder(writer io.Writer) *json.Encoder { - return json.NewEncoder(writer) -} - -// NewDecoder adapts to json/stream NewDecoder API. -// -// NewDecoder returns a new decoder that reads from r. -// -// Instead of a json/encoding Decoder, a Decoder is returned -// Refer to https://godoc.org/encoding/json#NewDecoder for more information. -func NewDecoder(reader io.Reader) *json.Decoder { - return json.NewDecoder(reader) -} - -// Valid reports whether data is a valid JSON encoding. -func Valid(data []byte) bool { - return json.Valid(data) -} diff --git a/vendor/github.com/gogf/gf/v2/internal/reflection/reflection.go b/vendor/github.com/gogf/gf/v2/internal/reflection/reflection.go deleted file mode 100644 index 30a4cded..00000000 --- a/vendor/github.com/gogf/gf/v2/internal/reflection/reflection.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package reflection provides some reflection functions for internal usage. -package reflection - -import ( - "reflect" -) - -type OriginValueAndKindOutput struct { - InputValue reflect.Value - InputKind reflect.Kind - OriginValue reflect.Value - OriginKind reflect.Kind -} - -// OriginValueAndKind retrieves and returns the original reflect value and kind. -func OriginValueAndKind(value interface{}) (out OriginValueAndKindOutput) { - if v, ok := value.(reflect.Value); ok { - out.InputValue = v - } else { - out.InputValue = reflect.ValueOf(value) - } - out.InputKind = out.InputValue.Kind() - out.OriginValue = out.InputValue - out.OriginKind = out.InputKind - for out.OriginKind == reflect.Ptr { - out.OriginValue = out.OriginValue.Elem() - out.OriginKind = out.OriginValue.Kind() - } - return -} - -type OriginTypeAndKindOutput struct { - InputType reflect.Type - InputKind reflect.Kind - OriginType reflect.Type - OriginKind reflect.Kind -} - -// OriginTypeAndKind retrieves and returns the original reflect type and kind. -func OriginTypeAndKind(value interface{}) (out OriginTypeAndKindOutput) { - if value == nil { - return - } - if reflectType, ok := value.(reflect.Type); ok { - out.InputType = reflectType - } else { - if reflectValue, ok := value.(reflect.Value); ok { - out.InputType = reflectValue.Type() - } else { - out.InputType = reflect.TypeOf(value) - } - } - out.InputKind = out.InputType.Kind() - out.OriginType = out.InputType - out.OriginKind = out.InputKind - for out.OriginKind == reflect.Ptr { - out.OriginType = out.OriginType.Elem() - out.OriginKind = out.OriginType.Kind() - } - return -} - -// ValueToInterface converts reflect value to its interface type. -func ValueToInterface(v reflect.Value) (value interface{}, ok bool) { - if v.IsValid() && v.CanInterface() { - return v.Interface(), true - } - switch v.Kind() { - case reflect.Bool: - return v.Bool(), true - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int(), true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint(), true - case reflect.Float32, reflect.Float64: - return v.Float(), true - case reflect.Complex64, reflect.Complex128: - return v.Complex(), true - case reflect.String: - return v.String(), true - case reflect.Ptr: - return ValueToInterface(v.Elem()) - case reflect.Interface: - return ValueToInterface(v.Elem()) - default: - return nil, false - } -} diff --git a/vendor/github.com/gogf/gf/v2/internal/rwmutex/rwmutex.go b/vendor/github.com/gogf/gf/v2/internal/rwmutex/rwmutex.go deleted file mode 100644 index 17a67fec..00000000 --- a/vendor/github.com/gogf/gf/v2/internal/rwmutex/rwmutex.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package rwmutex provides switch of concurrent safety feature for sync.RWMutex. -package rwmutex - -import ( - "sync" -) - -// RWMutex is a sync.RWMutex with a switch for concurrent safe feature. -// If its attribute *sync.RWMutex is not nil, it means it's in concurrent safety usage. -// Its attribute *sync.RWMutex is nil in default, which makes this struct mush lightweight. -type RWMutex struct { - // Underlying mutex. - mutex *sync.RWMutex -} - -// New creates and returns a new *RWMutex. -// The parameter `safe` is used to specify whether using this mutex in concurrent safety, -// which is false in default. -func New(safe ...bool) *RWMutex { - mu := Create(safe...) - return &mu -} - -// Create creates and returns a new RWMutex object. -// The parameter `safe` is used to specify whether using this mutex in concurrent safety, -// which is false in default. -func Create(safe ...bool) RWMutex { - if len(safe) > 0 && safe[0] { - return RWMutex{ - mutex: new(sync.RWMutex), - } - } - return RWMutex{} -} - -// IsSafe checks and returns whether current mutex is in concurrent-safe usage. -func (mu *RWMutex) IsSafe() bool { - return mu.mutex != nil -} - -// Lock locks mutex for writing. -// It does nothing if it is not in concurrent-safe usage. -func (mu *RWMutex) Lock() { - if mu.mutex != nil { - mu.mutex.Lock() - } -} - -// Unlock unlocks mutex for writing. -// It does nothing if it is not in concurrent-safe usage. -func (mu *RWMutex) Unlock() { - if mu.mutex != nil { - mu.mutex.Unlock() - } -} - -// RLock locks mutex for reading. -// It does nothing if it is not in concurrent-safe usage. -func (mu *RWMutex) RLock() { - if mu.mutex != nil { - mu.mutex.RLock() - } -} - -// RUnlock unlocks mutex for reading. -// It does nothing if it is not in concurrent-safe usage. -func (mu *RWMutex) RUnlock() { - if mu.mutex != nil { - mu.mutex.RUnlock() - } -} diff --git a/vendor/github.com/gogf/gf/v2/internal/tracing/tracing.go b/vendor/github.com/gogf/gf/v2/internal/tracing/tracing.go deleted file mode 100644 index c9d7a6f4..00000000 --- a/vendor/github.com/gogf/gf/v2/internal/tracing/tracing.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package tracing provides some utility functions for tracing functionality. -package tracing - -import ( - "math" - "time" - - "go.opentelemetry.io/otel/trace" - - "github.com/gogf/gf/v2/container/gtype" - "github.com/gogf/gf/v2/encoding/gbinary" - "github.com/gogf/gf/v2/util/grand" -) - -var ( - randomInitSequence = int32(grand.Intn(math.MaxInt32)) - sequence = gtype.NewInt32(randomInitSequence) -) - -// NewIDs creates and returns a new trace and span ID. -func NewIDs() (traceID trace.TraceID, spanID trace.SpanID) { - return NewTraceID(), NewSpanID() -} - -// NewTraceID creates and returns a trace ID. -func NewTraceID() (traceID trace.TraceID) { - var ( - timestampNanoBytes = gbinary.EncodeInt64(time.Now().UnixNano()) - sequenceBytes = gbinary.EncodeInt32(sequence.Add(1)) - randomBytes = grand.B(4) - ) - copy(traceID[:], timestampNanoBytes) - copy(traceID[8:], sequenceBytes) - copy(traceID[12:], randomBytes) - return -} - -// NewSpanID creates and returns a span ID. -func NewSpanID() (spanID trace.SpanID) { - copy(spanID[:], gbinary.EncodeInt64(time.Now().UnixNano()/1e3)) - copy(spanID[4:], grand.B(4)) - return -} diff --git a/vendor/github.com/gogf/gf/v2/internal/utils/utils.go b/vendor/github.com/gogf/gf/v2/internal/utils/utils.go deleted file mode 100644 index 414a90ca..00000000 --- a/vendor/github.com/gogf/gf/v2/internal/utils/utils.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package utils provides some utility functions for internal usage. -package utils diff --git a/vendor/github.com/gogf/gf/v2/internal/utils/utils_array.go b/vendor/github.com/gogf/gf/v2/internal/utils/utils_array.go deleted file mode 100644 index b96e039e..00000000 --- a/vendor/github.com/gogf/gf/v2/internal/utils/utils_array.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package utils - -import "reflect" - -// IsArray checks whether given value is array/slice. -// Note that it uses reflect internally implementing this feature. -func IsArray(value interface{}) bool { - rv := reflect.ValueOf(value) - kind := rv.Kind() - if kind == reflect.Ptr { - rv = rv.Elem() - kind = rv.Kind() - } - switch kind { - case reflect.Array, reflect.Slice: - return true - default: - return false - } -} diff --git a/vendor/github.com/gogf/gf/v2/internal/utils/utils_debug.go b/vendor/github.com/gogf/gf/v2/internal/utils/utils_debug.go deleted file mode 100644 index 5584341b..00000000 --- a/vendor/github.com/gogf/gf/v2/internal/utils/utils_debug.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package utils - -import ( - "github.com/gogf/gf/v2/internal/command" -) - -const ( - // Debug key for checking if in debug mode. - commandEnvKeyForDebugKey = "gf.debug" -) - -var ( - // isDebugEnabled marks whether GoFrame debug mode is enabled. - isDebugEnabled = false -) - -func init() { - // Debugging configured. - value := command.GetOptWithEnv(commandEnvKeyForDebugKey) - if value == "" || value == "0" || value == "false" { - isDebugEnabled = false - } else { - isDebugEnabled = true - } -} - -// IsDebugEnabled checks and returns whether debug mode is enabled. -// The debug mode is enabled when command argument "gf.debug" or environment "GF_DEBUG" is passed. -func IsDebugEnabled() bool { - return isDebugEnabled -} - -// SetDebugEnabled enables/disables the internal debug info. -func SetDebugEnabled(enabled bool) { - isDebugEnabled = enabled -} diff --git a/vendor/github.com/gogf/gf/v2/internal/utils/utils_io.go b/vendor/github.com/gogf/gf/v2/internal/utils/utils_io.go deleted file mode 100644 index c2916937..00000000 --- a/vendor/github.com/gogf/gf/v2/internal/utils/utils_io.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package utils - -import ( - "io" -) - -// ReadCloser implements the io.ReadCloser interface -// which is used for reading request body content multiple times. -// -// Note that it cannot be closed. -type ReadCloser struct { - index int // Current read position. - content []byte // Content. - repeatable bool // Mark the content can be repeatable read. -} - -// NewReadCloser creates and returns a RepeatReadCloser object. -func NewReadCloser(content []byte, repeatable bool) io.ReadCloser { - return &ReadCloser{ - content: content, - repeatable: repeatable, - } -} - -// Read implements the io.ReadCloser interface. -func (b *ReadCloser) Read(p []byte) (n int, err error) { - // Make it repeatable reading. - if b.index >= len(b.content) && b.repeatable { - b.index = 0 - } - n = copy(p, b.content[b.index:]) - b.index += n - if b.index >= len(b.content) { - return n, io.EOF - } - return n, nil -} - -// Close implements the io.ReadCloser interface. -func (b *ReadCloser) Close() error { - return nil -} diff --git a/vendor/github.com/gogf/gf/v2/internal/utils/utils_is.go b/vendor/github.com/gogf/gf/v2/internal/utils/utils_is.go deleted file mode 100644 index d90f9215..00000000 --- a/vendor/github.com/gogf/gf/v2/internal/utils/utils_is.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package utils - -import ( - "reflect" - - "github.com/gogf/gf/v2/internal/empty" -) - -// IsNil checks whether `value` is nil, especially for interface{} type value. -func IsNil(value interface{}) bool { - return empty.IsNil(value) -} - -// IsEmpty checks whether `value` is empty. -func IsEmpty(value interface{}) bool { - return empty.IsEmpty(value) -} - -// IsInt checks whether `value` is type of int. -func IsInt(value interface{}) bool { - switch value.(type) { - case int, *int, int8, *int8, int16, *int16, int32, *int32, int64, *int64: - return true - } - return false -} - -// IsUint checks whether `value` is type of uint. -func IsUint(value interface{}) bool { - switch value.(type) { - case uint, *uint, uint8, *uint8, uint16, *uint16, uint32, *uint32, uint64, *uint64: - return true - } - return false -} - -// IsFloat checks whether `value` is type of float. -func IsFloat(value interface{}) bool { - switch value.(type) { - case float32, *float32, float64, *float64: - return true - } - return false -} - -// IsSlice checks whether `value` is type of slice. -func IsSlice(value interface{}) bool { - var ( - reflectValue = reflect.ValueOf(value) - reflectKind = reflectValue.Kind() - ) - for reflectKind == reflect.Ptr { - reflectValue = reflectValue.Elem() - } - switch reflectKind { - case reflect.Slice, reflect.Array: - return true - } - return false -} - -// IsMap checks whether `value` is type of map. -func IsMap(value interface{}) bool { - var ( - reflectValue = reflect.ValueOf(value) - reflectKind = reflectValue.Kind() - ) - for reflectKind == reflect.Ptr { - reflectValue = reflectValue.Elem() - } - switch reflectKind { - case reflect.Map: - return true - } - return false -} - -// IsStruct checks whether `value` is type of struct. -func IsStruct(value interface{}) bool { - var reflectType = reflect.TypeOf(value) - if reflectType == nil { - return false - } - var reflectKind = reflectType.Kind() - for reflectKind == reflect.Ptr { - reflectType = reflectType.Elem() - reflectKind = reflectType.Kind() - } - switch reflectKind { - case reflect.Struct: - return true - } - return false -} diff --git a/vendor/github.com/gogf/gf/v2/internal/utils/utils_list.go b/vendor/github.com/gogf/gf/v2/internal/utils/utils_list.go deleted file mode 100644 index 355ad9f8..00000000 --- a/vendor/github.com/gogf/gf/v2/internal/utils/utils_list.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package utils - -import "fmt" - -// ListToMapByKey converts `list` to a map[string]interface{} of which key is specified by `key`. -// Note that the item value may be type of slice. -func ListToMapByKey(list []map[string]interface{}, key string) map[string]interface{} { - var ( - s = "" - m = make(map[string]interface{}) - tempMap = make(map[string][]interface{}) - hasMultiValues bool - ) - for _, item := range list { - if k, ok := item[key]; ok { - s = fmt.Sprintf(`%v`, k) - tempMap[s] = append(tempMap[s], item) - if len(tempMap[s]) > 1 { - hasMultiValues = true - } - } - } - for k, v := range tempMap { - if hasMultiValues { - m[k] = v - } else { - m[k] = v[0] - } - } - return m -} diff --git a/vendor/github.com/gogf/gf/v2/internal/utils/utils_map.go b/vendor/github.com/gogf/gf/v2/internal/utils/utils_map.go deleted file mode 100644 index fba7da77..00000000 --- a/vendor/github.com/gogf/gf/v2/internal/utils/utils_map.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package utils - -// MapPossibleItemByKey tries to find the possible key-value pair for given key ignoring cases and symbols. -// -// Note that this function might be of low performance. -func MapPossibleItemByKey(data map[string]interface{}, key string) (foundKey string, foundValue interface{}) { - if len(data) == 0 { - return - } - if v, ok := data[key]; ok { - return key, v - } - // Loop checking. - for k, v := range data { - if EqualFoldWithoutChars(k, key) { - return k, v - } - } - return "", nil -} - -// MapContainsPossibleKey checks if the given `key` is contained in given map `data`. -// It checks the key ignoring cases and symbols. -// -// Note that this function might be of low performance. -func MapContainsPossibleKey(data map[string]interface{}, key string) bool { - if k, _ := MapPossibleItemByKey(data, key); k != "" { - return true - } - return false -} diff --git a/vendor/github.com/gogf/gf/v2/internal/utils/utils_str.go b/vendor/github.com/gogf/gf/v2/internal/utils/utils_str.go deleted file mode 100644 index d2224573..00000000 --- a/vendor/github.com/gogf/gf/v2/internal/utils/utils_str.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package utils - -import ( - "bytes" - "strings" -) - -var ( - // DefaultTrimChars are the characters which are stripped by Trim* functions in default. - DefaultTrimChars = string([]byte{ - '\t', // Tab. - '\v', // Vertical tab. - '\n', // New line (line feed). - '\r', // Carriage return. - '\f', // New page. - ' ', // Ordinary space. - 0x00, // NUL-byte. - 0x85, // Delete. - 0xA0, // Non-breaking space. - }) -) - -// IsLetterUpper checks whether the given byte b is in upper case. -func IsLetterUpper(b byte) bool { - if b >= byte('A') && b <= byte('Z') { - return true - } - return false -} - -// IsLetterLower checks whether the given byte b is in lower case. -func IsLetterLower(b byte) bool { - if b >= byte('a') && b <= byte('z') { - return true - } - return false -} - -// IsLetter checks whether the given byte b is a letter. -func IsLetter(b byte) bool { - return IsLetterUpper(b) || IsLetterLower(b) -} - -// IsNumeric checks whether the given string s is numeric. -// Note that float string like "123.456" is also numeric. -func IsNumeric(s string) bool { - var ( - dotCount = 0 - length = len(s) - ) - if length == 0 { - return false - } - for i := 0; i < length; i++ { - if s[i] == '-' && i == 0 { - continue - } - if s[i] == '.' { - dotCount++ - if i > 0 && i < length-1 { - continue - } else { - return false - } - } - if s[i] < '0' || s[i] > '9' { - return false - } - } - return dotCount <= 1 -} - -// UcFirst returns a copy of the string s with the first letter mapped to its upper case. -func UcFirst(s string) string { - if len(s) == 0 { - return s - } - if IsLetterLower(s[0]) { - return string(s[0]-32) + s[1:] - } - return s -} - -// ReplaceByMap returns a copy of `origin`, -// which is replaced by a map in unordered way, case-sensitively. -func ReplaceByMap(origin string, replaces map[string]string) string { - for k, v := range replaces { - origin = strings.ReplaceAll(origin, k, v) - } - return origin -} - -// RemoveSymbols removes all symbols from string and lefts only numbers and letters. -func RemoveSymbols(s string) string { - var b = make([]rune, 0, len(s)) - for _, c := range s { - if c > 127 { - b = append(b, c) - } else if (c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') { - b = append(b, c) - } - } - return string(b) -} - -// EqualFoldWithoutChars checks string `s1` and `s2` equal case-insensitively, -// with/without chars '-'/'_'/'.'/' '. -func EqualFoldWithoutChars(s1, s2 string) bool { - return strings.EqualFold(RemoveSymbols(s1), RemoveSymbols(s2)) -} - -// SplitAndTrim splits string `str` by a string `delimiter` to an array, -// and calls Trim to every element of this array. It ignores the elements -// which are empty after Trim. -func SplitAndTrim(str, delimiter string, characterMask ...string) []string { - array := make([]string, 0) - for _, v := range strings.Split(str, delimiter) { - v = Trim(v, characterMask...) - if v != "" { - array = append(array, v) - } - } - return array -} - -// Trim strips whitespace (or other characters) from the beginning and end of a string. -// The optional parameter `characterMask` specifies the additional stripped characters. -func Trim(str string, characterMask ...string) string { - trimChars := DefaultTrimChars - if len(characterMask) > 0 { - trimChars += characterMask[0] - } - return strings.Trim(str, trimChars) -} - -// FormatCmdKey formats string `s` as command key using uniformed format. -func FormatCmdKey(s string) string { - return strings.ToLower(strings.ReplaceAll(s, "_", ".")) -} - -// FormatEnvKey formats string `s` as environment key using uniformed format. -func FormatEnvKey(s string) string { - return strings.ToUpper(strings.ReplaceAll(s, ".", "_")) -} - -// StripSlashes un-quotes a quoted string by AddSlashes. -func StripSlashes(str string) string { - var buf bytes.Buffer - l, skip := len(str), false - for i, char := range str { - if skip { - skip = false - } else if char == '\\' { - if i+1 < l && str[i+1] == '\\' { - skip = true - } - continue - } - buf.WriteRune(char) - } - return buf.String() -} diff --git a/vendor/github.com/gogf/gf/v2/net/gipv4/gipv4.go b/vendor/github.com/gogf/gf/v2/net/gipv4/gipv4.go deleted file mode 100644 index ea52f913..00000000 --- a/vendor/github.com/gogf/gf/v2/net/gipv4/gipv4.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. -// - -// Package gipv4 provides useful API for IPv4 address handling. -package gipv4 - -import ( - "encoding/binary" - "fmt" - "net" - "strconv" - - "github.com/gogf/gf/v2/text/gregex" -) - -// Ip2long converts ip address to an uint32 integer. -func Ip2long(ip string) uint32 { - netIp := net.ParseIP(ip) - if netIp == nil { - return 0 - } - return binary.BigEndian.Uint32(netIp.To4()) -} - -// Long2ip converts an uint32 integer ip address to its string type address. -func Long2ip(long uint32) string { - ipByte := make([]byte, 4) - binary.BigEndian.PutUint32(ipByte, long) - return net.IP(ipByte).String() -} - -// Validate checks whether given `ip` a valid IPv4 address. -func Validate(ip string) bool { - return gregex.IsMatchString(`^((25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(25[0-5]|2[0-4]\d|[01]?\d\d?)$`, ip) -} - -// ParseAddress parses `address` to its ip and port. -// Eg: 192.168.1.1:80 -> 192.168.1.1, 80 -func ParseAddress(address string) (string, int) { - match, err := gregex.MatchString(`^(.+):(\d+)$`, address) - if err == nil { - i, _ := strconv.Atoi(match[2]) - return match[1], i - } - return "", 0 -} - -// GetSegment returns the segment of given ip address. -// Eg: 192.168.2.102 -> 192.168.2 -func GetSegment(ip string) string { - match, err := gregex.MatchString(`^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})$`, ip) - if err != nil || len(match) < 4 { - return "" - } - return fmt.Sprintf("%s.%s.%s", match[1], match[2], match[3]) -} diff --git a/vendor/github.com/gogf/gf/v2/net/gipv4/gipv4_ip.go b/vendor/github.com/gogf/gf/v2/net/gipv4/gipv4_ip.go deleted file mode 100644 index 95bdb848..00000000 --- a/vendor/github.com/gogf/gf/v2/net/gipv4/gipv4_ip.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. -// - -package gipv4 - -import ( - "net" - "strconv" - "strings" - - "github.com/gogf/gf/v2/errors/gerror" -) - -// GetIpArray retrieves and returns all the ip of current host. -func GetIpArray() (ips []string, err error) { - interfaceAddr, err := net.InterfaceAddrs() - if err != nil { - err = gerror.Wrap(err, `net.InterfaceAddrs failed`) - return nil, err - } - for _, address := range interfaceAddr { - ipNet, isValidIpNet := address.(*net.IPNet) - if isValidIpNet && !ipNet.IP.IsLoopback() { - if ipNet.IP.To4() != nil { - ips = append(ips, ipNet.IP.String()) - } - } - } - return ips, nil -} - -// MustGetIntranetIp performs as GetIntranetIp, but it panics if any error occurs. -func MustGetIntranetIp() string { - ip, err := GetIntranetIp() - if err != nil { - panic(err) - } - return ip -} - -// GetIntranetIp retrieves and returns the first intranet ip of current machine. -func GetIntranetIp() (ip string, err error) { - ips, err := GetIntranetIpArray() - if err != nil { - return "", err - } - if len(ips) == 0 { - return "", gerror.New("no intranet ip found") - } - return ips[0], nil -} - -// GetIntranetIpArray retrieves and returns the intranet ip list of current machine. -func GetIntranetIpArray() (ips []string, err error) { - var ( - addresses []net.Addr - interFaces []net.Interface - ) - interFaces, err = net.Interfaces() - if err != nil { - err = gerror.Wrap(err, `net.Interfaces failed`) - return ips, err - } - for _, interFace := range interFaces { - if interFace.Flags&net.FlagUp == 0 { - // interface down - continue - } - if interFace.Flags&net.FlagLoopback != 0 { - // loop back interface - continue - } - // ignore warden bridge - if strings.HasPrefix(interFace.Name, "w-") { - continue - } - addresses, err = interFace.Addrs() - if err != nil { - err = gerror.Wrap(err, `interFace.Addrs failed`) - return ips, err - } - for _, addr := range addresses { - var ip net.IP - switch v := addr.(type) { - case *net.IPNet: - ip = v.IP - case *net.IPAddr: - ip = v.IP - } - - if ip == nil || ip.IsLoopback() { - continue - } - ip = ip.To4() - if ip == nil { - // not an ipv4 address - continue - } - ipStr := ip.String() - if IsIntranet(ipStr) { - ips = append(ips, ipStr) - } - } - } - return ips, nil -} - -// IsIntranet checks and returns whether given ip an intranet ip. -// -// Local: 127.0.0.1 -// A: 10.0.0.0--10.255.255.255 -// B: 172.16.0.0--172.31.255.255 -// C: 192.168.0.0--192.168.255.255 -func IsIntranet(ip string) bool { - if ip == "127.0.0.1" { - return true - } - array := strings.Split(ip, ".") - if len(array) != 4 { - return false - } - // A - if array[0] == "10" || (array[0] == "192" && array[1] == "168") { - return true - } - // C - if array[0] == "192" && array[1] == "168" { - return true - } - // B - if array[0] == "172" { - second, err := strconv.ParseInt(array[1], 10, 64) - if err != nil { - return false - } - if second >= 16 && second <= 31 { - return true - } - } - return false -} diff --git a/vendor/github.com/gogf/gf/v2/net/gipv4/gipv4_lookup.go b/vendor/github.com/gogf/gf/v2/net/gipv4/gipv4_lookup.go deleted file mode 100644 index f6aed4b0..00000000 --- a/vendor/github.com/gogf/gf/v2/net/gipv4/gipv4_lookup.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. -// - -package gipv4 - -import ( - "net" - "strings" -) - -// GetHostByName returns the IPv4 address corresponding to a given Internet host name. -func GetHostByName(hostname string) (string, error) { - ips, err := net.LookupIP(hostname) - if ips != nil { - for _, v := range ips { - if v.To4() != nil { - return v.String(), nil - } - } - return "", nil - } - return "", err -} - -// GetHostsByName returns a list of IPv4 addresses corresponding to a given Internet -// host name. -func GetHostsByName(hostname string) ([]string, error) { - ips, err := net.LookupIP(hostname) - if ips != nil { - var ipStrings []string - for _, v := range ips { - if v.To4() != nil { - ipStrings = append(ipStrings, v.String()) - } - } - return ipStrings, nil - } - return nil, err -} - -// GetNameByAddr returns the Internet host name corresponding to a given IP address. -func GetNameByAddr(ipAddress string) (string, error) { - names, err := net.LookupAddr(ipAddress) - if names != nil { - return strings.TrimRight(names[0], "."), nil - } - return "", err -} diff --git a/vendor/github.com/gogf/gf/v2/net/gipv4/gipv4_mac.go b/vendor/github.com/gogf/gf/v2/net/gipv4/gipv4_mac.go deleted file mode 100644 index a0952055..00000000 --- a/vendor/github.com/gogf/gf/v2/net/gipv4/gipv4_mac.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. -// - -package gipv4 - -import ( - "net" - - "github.com/gogf/gf/v2/errors/gerror" -) - -// GetMac retrieves and returns the first mac address of current host. -func GetMac() (mac string, err error) { - macs, err := GetMacArray() - if err != nil { - return "", err - } - if len(macs) > 0 { - return macs[0], nil - } - return "", nil -} - -// GetMacArray retrieves and returns all the mac address of current host. -func GetMacArray() (macs []string, err error) { - netInterfaces, err := net.Interfaces() - if err != nil { - err = gerror.Wrap(err, `net.Interfaces failed`) - return nil, err - } - for _, netInterface := range netInterfaces { - macAddr := netInterface.HardwareAddr.String() - if len(macAddr) == 0 { - continue - } - macs = append(macs, macAddr) - } - return macs, nil -} diff --git a/vendor/github.com/gogf/gf/v2/net/gtrace/gtrace.go b/vendor/github.com/gogf/gf/v2/net/gtrace/gtrace.go deleted file mode 100644 index 0eb96eba..00000000 --- a/vendor/github.com/gogf/gf/v2/net/gtrace/gtrace.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package gtrace provides convenience wrapping functionality for tracing feature using OpenTelemetry. -package gtrace - -import ( - "context" - "os" - "strings" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/propagation" - semconv "go.opentelemetry.io/otel/semconv/v1.4.0" - "go.opentelemetry.io/otel/trace" - - "github.com/gogf/gf/v2/container/gmap" - "github.com/gogf/gf/v2/container/gvar" - "github.com/gogf/gf/v2/errors/gcode" - "github.com/gogf/gf/v2/errors/gerror" - "github.com/gogf/gf/v2/internal/command" - "github.com/gogf/gf/v2/net/gipv4" - "github.com/gogf/gf/v2/net/gtrace/internal/provider" - "github.com/gogf/gf/v2/text/gstr" - "github.com/gogf/gf/v2/util/gconv" -) - -const ( - tracingCommonKeyIpIntranet = `ip.intranet` - tracingCommonKeyIpHostname = `hostname` - commandEnvKeyForMaxContentLogSize = "gf.gtrace.max.content.log.size" // To avoid too big tracing content. - commandEnvKeyForTracingInternal = "gf.gtrace.tracing.internal" // For detailed controlling for tracing content. -) - -var ( - intranetIps, _ = gipv4.GetIntranetIpArray() - intranetIpStr = strings.Join(intranetIps, ",") - hostname, _ = os.Hostname() - tracingInternal = true // tracingInternal enables tracing for internal type spans. - tracingMaxContentLogSize = 512 * 1024 // Max log size for request and response body, especially for HTTP/RPC request. - // defaultTextMapPropagator is the default propagator for context propagation between peers. - defaultTextMapPropagator = propagation.NewCompositeTextMapPropagator( - propagation.TraceContext{}, - propagation.Baggage{}, - ) -) - -func init() { - tracingInternal = gconv.Bool(command.GetOptWithEnv(commandEnvKeyForTracingInternal, "true")) - if maxContentLogSize := gconv.Int(command.GetOptWithEnv(commandEnvKeyForMaxContentLogSize)); maxContentLogSize > 0 { - tracingMaxContentLogSize = maxContentLogSize - } - // Default trace provider. - otel.SetTracerProvider(provider.New()) - CheckSetDefaultTextMapPropagator() -} - -// IsUsingDefaultProvider checks and return if currently using default trace provider. -func IsUsingDefaultProvider() bool { - _, ok := otel.GetTracerProvider().(*provider.TracerProvider) - return ok -} - -// IsTracingInternal returns whether tracing spans of internal components. -func IsTracingInternal() bool { - return tracingInternal -} - -// MaxContentLogSize returns the max log size for request and response body, especially for HTTP/RPC request. -func MaxContentLogSize() int { - return tracingMaxContentLogSize -} - -// CommonLabels returns common used attribute labels: -// ip.intranet, hostname. -func CommonLabels() []attribute.KeyValue { - return []attribute.KeyValue{ - attribute.String(tracingCommonKeyIpHostname, hostname), - attribute.String(tracingCommonKeyIpIntranet, intranetIpStr), - semconv.HostNameKey.String(hostname), - } -} - -// CheckSetDefaultTextMapPropagator sets the default TextMapPropagator if it is not set previously. -func CheckSetDefaultTextMapPropagator() { - p := otel.GetTextMapPropagator() - if len(p.Fields()) == 0 { - otel.SetTextMapPropagator(GetDefaultTextMapPropagator()) - } -} - -// GetDefaultTextMapPropagator returns the default propagator for context propagation between peers. -func GetDefaultTextMapPropagator() propagation.TextMapPropagator { - return defaultTextMapPropagator -} - -// GetTraceID retrieves and returns TraceId from context. -// It returns an empty string is tracing feature is not activated. -func GetTraceID(ctx context.Context) string { - if ctx == nil { - return "" - } - traceID := trace.SpanContextFromContext(ctx).TraceID() - if traceID.IsValid() { - return traceID.String() - } - return "" -} - -// GetSpanID retrieves and returns SpanId from context. -// It returns an empty string is tracing feature is not activated. -func GetSpanID(ctx context.Context) string { - if ctx == nil { - return "" - } - spanID := trace.SpanContextFromContext(ctx).SpanID() - if spanID.IsValid() { - return spanID.String() - } - return "" -} - -// SetBaggageValue is a convenient function for adding one key-value pair to baggage. -// Note that it uses attribute.Any to set the key-value pair. -func SetBaggageValue(ctx context.Context, key string, value interface{}) context.Context { - return NewBaggage(ctx).SetValue(key, value) -} - -// SetBaggageMap is a convenient function for adding map key-value pairs to baggage. -// Note that it uses attribute.Any to set the key-value pair. -func SetBaggageMap(ctx context.Context, data map[string]interface{}) context.Context { - return NewBaggage(ctx).SetMap(data) -} - -// GetBaggageMap retrieves and returns the baggage values as map. -func GetBaggageMap(ctx context.Context) *gmap.StrAnyMap { - return NewBaggage(ctx).GetMap() -} - -// GetBaggageVar retrieves value and returns a *gvar.Var for specified key from baggage. -func GetBaggageVar(ctx context.Context, key string) *gvar.Var { - return NewBaggage(ctx).GetVar(key) -} - -// WithUUID injects custom trace id with UUID into context to propagate. -func WithUUID(ctx context.Context, uuid string) (context.Context, error) { - return WithTraceID(ctx, gstr.Replace(uuid, "-", "")) -} - -// WithTraceID injects custom trace id into context to propagate. -func WithTraceID(ctx context.Context, traceID string) (context.Context, error) { - generatedTraceID, err := trace.TraceIDFromHex(traceID) - if err != nil { - return ctx, gerror.WrapCodef( - gcode.CodeInvalidParameter, - err, - `invalid custom traceID "%s", a traceID string should be composed with [0-f] and fixed length 32`, - traceID, - ) - } - sc := trace.SpanContextFromContext(ctx) - if !sc.HasTraceID() { - var span trace.Span - ctx, span = NewSpan(ctx, "gtrace.WithTraceID") - defer span.End() - sc = trace.SpanContextFromContext(ctx) - } - ctx = trace.ContextWithRemoteSpanContext(ctx, trace.NewSpanContext(trace.SpanContextConfig{ - TraceID: generatedTraceID, - SpanID: sc.SpanID(), - TraceFlags: sc.TraceFlags(), - TraceState: sc.TraceState(), - Remote: sc.IsRemote(), - })) - return ctx, nil -} diff --git a/vendor/github.com/gogf/gf/v2/net/gtrace/gtrace_baggage.go b/vendor/github.com/gogf/gf/v2/net/gtrace/gtrace_baggage.go deleted file mode 100644 index 26a9eb86..00000000 --- a/vendor/github.com/gogf/gf/v2/net/gtrace/gtrace_baggage.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gtrace - -import ( - "context" - - "go.opentelemetry.io/otel/baggage" - - "github.com/gogf/gf/v2/container/gmap" - "github.com/gogf/gf/v2/container/gvar" - "github.com/gogf/gf/v2/util/gconv" -) - -// Baggage holds the data through all tracing spans. -type Baggage struct { - ctx context.Context -} - -// NewBaggage creates and returns a new Baggage object from given tracing context. -func NewBaggage(ctx context.Context) *Baggage { - if ctx == nil { - ctx = context.Background() - } - return &Baggage{ - ctx: ctx, - } -} - -// Ctx returns the context that Baggage holds. -func (b *Baggage) Ctx() context.Context { - return b.ctx -} - -// SetValue is a convenient function for adding one key-value pair to baggage. -// Note that it uses attribute.Any to set the key-value pair. -func (b *Baggage) SetValue(key string, value interface{}) context.Context { - member, _ := baggage.NewMember(key, gconv.String(value)) - bag, _ := baggage.New(member) - b.ctx = baggage.ContextWithBaggage(b.ctx, bag) - return b.ctx -} - -// SetMap is a convenient function for adding map key-value pairs to baggage. -// Note that it uses attribute.Any to set the key-value pair. -func (b *Baggage) SetMap(data map[string]interface{}) context.Context { - members := make([]baggage.Member, 0) - for k, v := range data { - member, _ := baggage.NewMember(k, gconv.String(v)) - members = append(members, member) - } - bag, _ := baggage.New(members...) - b.ctx = baggage.ContextWithBaggage(b.ctx, bag) - return b.ctx -} - -// GetMap retrieves and returns the baggage values as map. -func (b *Baggage) GetMap() *gmap.StrAnyMap { - m := gmap.NewStrAnyMap() - members := baggage.FromContext(b.ctx).Members() - for i := range members { - m.Set(members[i].Key(), members[i].Value()) - } - return m -} - -// GetVar retrieves value and returns a *gvar.Var for specified key from baggage. -func (b *Baggage) GetVar(key string) *gvar.Var { - value := baggage.FromContext(b.ctx).Member(key).Value() - return gvar.New(value) -} diff --git a/vendor/github.com/gogf/gf/v2/net/gtrace/gtrace_carrier.go b/vendor/github.com/gogf/gf/v2/net/gtrace/gtrace_carrier.go deleted file mode 100644 index e29d7007..00000000 --- a/vendor/github.com/gogf/gf/v2/net/gtrace/gtrace_carrier.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gtrace - -import ( - "github.com/gogf/gf/v2/internal/json" - "github.com/gogf/gf/v2/util/gconv" -) - -// Carrier is the storage medium used by a TextMapPropagator. -type Carrier map[string]interface{} - -// NewCarrier creates and returns a Carrier. -func NewCarrier(data ...map[string]interface{}) Carrier { - if len(data) > 0 && data[0] != nil { - return data[0] - } - return make(map[string]interface{}) -} - -// Get returns the value associated with the passed key. -func (c Carrier) Get(k string) string { - return gconv.String(c[k]) -} - -// Set stores the key-value pair. -func (c Carrier) Set(k, v string) { - c[k] = v -} - -// Keys lists the keys stored in this carrier. -func (c Carrier) Keys() []string { - keys := make([]string, 0, len(c)) - for k := range c { - keys = append(keys, k) - } - return keys -} - -// MustMarshal .returns the JSON encoding of c -func (c Carrier) MustMarshal() []byte { - b, err := json.Marshal(c) - if err != nil { - panic(err) - } - return b -} - -// String converts and returns current Carrier as string. -func (c Carrier) String() string { - return string(c.MustMarshal()) -} - -// UnmarshalJSON implements interface UnmarshalJSON for package json. -func (c Carrier) UnmarshalJSON(b []byte) error { - carrier := NewCarrier(nil) - return json.UnmarshalUseNumber(b, carrier) -} diff --git a/vendor/github.com/gogf/gf/v2/net/gtrace/gtrace_span.go b/vendor/github.com/gogf/gf/v2/net/gtrace/gtrace_span.go deleted file mode 100644 index 0d7fb240..00000000 --- a/vendor/github.com/gogf/gf/v2/net/gtrace/gtrace_span.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gtrace - -import ( - "context" - - "go.opentelemetry.io/otel/trace" -) - -// Span warps trace.Span for compatibility and extension. -type Span struct { - trace.Span -} - -// NewSpan creates a span using default tracer. -func NewSpan(ctx context.Context, spanName string, opts ...trace.SpanStartOption) (context.Context, *Span) { - ctx, span := NewTracer().Start(ctx, spanName, opts...) - return ctx, &Span{ - Span: span, - } -} diff --git a/vendor/github.com/gogf/gf/v2/net/gtrace/gtrace_tracer.go b/vendor/github.com/gogf/gf/v2/net/gtrace/gtrace_tracer.go deleted file mode 100644 index 47d2baa7..00000000 --- a/vendor/github.com/gogf/gf/v2/net/gtrace/gtrace_tracer.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gtrace - -import ( - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/trace" -) - -// Tracer warps trace.Tracer for compatibility and extension. -type Tracer struct { - trace.Tracer -} - -// NewTracer Tracer is a short function for retrieving Tracer. -func NewTracer(name ...string) *Tracer { - tracerName := "" - if len(name) > 0 { - tracerName = name[0] - } - return &Tracer{ - Tracer: otel.Tracer(tracerName), - } -} diff --git a/vendor/github.com/gogf/gf/v2/net/gtrace/internal/provider/provider.go b/vendor/github.com/gogf/gf/v2/net/gtrace/internal/provider/provider.go deleted file mode 100644 index 28159d5a..00000000 --- a/vendor/github.com/gogf/gf/v2/net/gtrace/internal/provider/provider.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package provider - -import ( - sdkTrace "go.opentelemetry.io/otel/sdk/trace" -) - -type TracerProvider struct { - *sdkTrace.TracerProvider -} - -// New returns a new and configured TracerProvider, which has no SpanProcessor. -// -// In default the returned TracerProvider is configured with: -// - a ParentBased(AlwaysSample) Sampler; -// - a unix nano timestamp and random umber based IDGenerator; -// - the resource.Default() Resource; -// - the default SpanLimits. -// -// The passed opts are used to override these default values and configure the -// returned TracerProvider appropriately. -func New() *TracerProvider { - return &TracerProvider{ - TracerProvider: sdkTrace.NewTracerProvider( - sdkTrace.WithIDGenerator(NewIDGenerator()), - ), - } -} diff --git a/vendor/github.com/gogf/gf/v2/net/gtrace/internal/provider/provider_idgenerator.go b/vendor/github.com/gogf/gf/v2/net/gtrace/internal/provider/provider_idgenerator.go deleted file mode 100644 index 6c5baec3..00000000 --- a/vendor/github.com/gogf/gf/v2/net/gtrace/internal/provider/provider_idgenerator.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package provider - -import ( - "context" - - "go.opentelemetry.io/otel/trace" - - "github.com/gogf/gf/v2/internal/tracing" -) - -// IDGenerator is a trace ID generator. -type IDGenerator struct{} - -// NewIDGenerator returns a new IDGenerator. -func NewIDGenerator() *IDGenerator { - return &IDGenerator{} -} - -// NewIDs creates and returns a new trace and span ID. -func (id *IDGenerator) NewIDs(ctx context.Context) (traceID trace.TraceID, spanID trace.SpanID) { - return tracing.NewIDs() -} - -// NewSpanID returns an ID for a new span in the trace with traceID. -func (id *IDGenerator) NewSpanID(ctx context.Context, traceID trace.TraceID) (spanID trace.SpanID) { - return tracing.NewSpanID() -} diff --git a/vendor/github.com/gogf/gf/v2/os/gcache/gcache.go b/vendor/github.com/gogf/gf/v2/os/gcache/gcache.go deleted file mode 100644 index a0f8b1cc..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gcache/gcache.go +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package gcache provides kinds of cache management for process. -// -// It provides a concurrent-safe in-memory cache adapter for process in default. -package gcache - -import ( - "context" - "time" - - "github.com/gogf/gf/v2/container/gvar" -) - -// Func is the cache function that calculates and returns the value. -type Func func(ctx context.Context) (value interface{}, err error) - -// Default cache object. -var defaultCache = New() - -// Set sets cache with `key`-`value` pair, which is expired after `duration`. -// -// It does not expire if `duration` == 0. -// It deletes the keys of `data` if `duration` < 0 or given `value` is nil. -func Set(ctx context.Context, key interface{}, value interface{}, duration time.Duration) error { - return defaultCache.Set(ctx, key, value, duration) -} - -// SetMap batch sets cache with key-value pairs by `data` map, which is expired after `duration`. -// -// It does not expire if `duration` == 0. -// It deletes the keys of `data` if `duration` < 0 or given `value` is nil. -func SetMap(ctx context.Context, data map[interface{}]interface{}, duration time.Duration) error { - return defaultCache.SetMap(ctx, data, duration) -} - -// SetIfNotExist sets cache with `key`-`value` pair which is expired after `duration` -// if `key` does not exist in the cache. It returns true the `key` does not exist in the -// cache, and it sets `value` successfully to the cache, or else it returns false. -// -// It does not expire if `duration` == 0. -// It deletes the `key` if `duration` < 0 or given `value` is nil. -func SetIfNotExist(ctx context.Context, key interface{}, value interface{}, duration time.Duration) (bool, error) { - return defaultCache.SetIfNotExist(ctx, key, value, duration) -} - -// SetIfNotExistFunc sets `key` with result of function `f` and returns true -// if `key` does not exist in the cache, or else it does nothing and returns false if `key` already exists. -// -// The parameter `value` can be type of `func() interface{}`, but it does nothing if its -// result is nil. -// -// It does not expire if `duration` == 0. -// It deletes the `key` if `duration` < 0 or given `value` is nil. -func SetIfNotExistFunc(ctx context.Context, key interface{}, f Func, duration time.Duration) (bool, error) { - return defaultCache.SetIfNotExistFunc(ctx, key, f, duration) -} - -// SetIfNotExistFuncLock sets `key` with result of function `f` and returns true -// if `key` does not exist in the cache, or else it does nothing and returns false if `key` already exists. -// -// It does not expire if `duration` == 0. -// It deletes the `key` if `duration` < 0 or given `value` is nil. -// -// Note that it differs from function `SetIfNotExistFunc` is that the function `f` is executed within -// writing mutex lock for concurrent safety purpose. -func SetIfNotExistFuncLock(ctx context.Context, key interface{}, f Func, duration time.Duration) (bool, error) { - return defaultCache.SetIfNotExistFuncLock(ctx, key, f, duration) -} - -// Get retrieves and returns the associated value of given `key`. -// It returns nil if it does not exist, or its value is nil, or it's expired. -// If you would like to check if the `key` exists in the cache, it's better using function Contains. -func Get(ctx context.Context, key interface{}) (*gvar.Var, error) { - return defaultCache.Get(ctx, key) -} - -// GetOrSet retrieves and returns the value of `key`, or sets `key`-`value` pair and -// returns `value` if `key` does not exist in the cache. The key-value pair expires -// after `duration`. -// -// It does not expire if `duration` == 0. -// It deletes the `key` if `duration` < 0 or given `value` is nil, but it does nothing -// if `value` is a function and the function result is nil. -func GetOrSet(ctx context.Context, key interface{}, value interface{}, duration time.Duration) (*gvar.Var, error) { - return defaultCache.GetOrSet(ctx, key, value, duration) -} - -// GetOrSetFunc retrieves and returns the value of `key`, or sets `key` with result of -// function `f` and returns its result if `key` does not exist in the cache. The key-value -// pair expires after `duration`. -// -// It does not expire if `duration` == 0. -// It deletes the `key` if `duration` < 0 or given `value` is nil, but it does nothing -// if `value` is a function and the function result is nil. -func GetOrSetFunc(ctx context.Context, key interface{}, f Func, duration time.Duration) (*gvar.Var, error) { - return defaultCache.GetOrSetFunc(ctx, key, f, duration) -} - -// GetOrSetFuncLock retrieves and returns the value of `key`, or sets `key` with result of -// function `f` and returns its result if `key` does not exist in the cache. The key-value -// pair expires after `duration`. -// -// It does not expire if `duration` == 0. -// It deletes the `key` if `duration` < 0 or given `value` is nil, but it does nothing -// if `value` is a function and the function result is nil. -// -// Note that it differs from function `GetOrSetFunc` is that the function `f` is executed within -// writing mutex lock for concurrent safety purpose. -func GetOrSetFuncLock(ctx context.Context, key interface{}, f Func, duration time.Duration) (*gvar.Var, error) { - return defaultCache.GetOrSetFuncLock(ctx, key, f, duration) -} - -// Contains checks and returns true if `key` exists in the cache, or else returns false. -func Contains(ctx context.Context, key interface{}) (bool, error) { - return defaultCache.Contains(ctx, key) -} - -// GetExpire retrieves and returns the expiration of `key` in the cache. -// -// Note that, -// It returns 0 if the `key` does not expire. -// It returns -1 if the `key` does not exist in the cache. -func GetExpire(ctx context.Context, key interface{}) (time.Duration, error) { - return defaultCache.GetExpire(ctx, key) -} - -// Remove deletes one or more keys from cache, and returns its value. -// If multiple keys are given, it returns the value of the last deleted item. -func Remove(ctx context.Context, keys ...interface{}) (value *gvar.Var, err error) { - return defaultCache.Remove(ctx, keys...) -} - -// Removes deletes `keys` in the cache. -func Removes(ctx context.Context, keys []interface{}) error { - return defaultCache.Removes(ctx, keys) -} - -// Update updates the value of `key` without changing its expiration and returns the old value. -// The returned value `exist` is false if the `key` does not exist in the cache. -// -// It deletes the `key` if given `value` is nil. -// It does nothing if `key` does not exist in the cache. -func Update(ctx context.Context, key interface{}, value interface{}) (oldValue *gvar.Var, exist bool, err error) { - return defaultCache.Update(ctx, key, value) -} - -// UpdateExpire updates the expiration of `key` and returns the old expiration duration value. -// -// It returns -1 and does nothing if the `key` does not exist in the cache. -// It deletes the `key` if `duration` < 0. -func UpdateExpire(ctx context.Context, key interface{}, duration time.Duration) (oldDuration time.Duration, err error) { - return defaultCache.UpdateExpire(ctx, key, duration) -} - -// Size returns the number of items in the cache. -func Size(ctx context.Context) (int, error) { - return defaultCache.Size(ctx) -} - -// Data returns a copy of all key-value pairs in the cache as map type. -// Note that this function may lead lots of memory usage, you can implement this function -// if necessary. -func Data(ctx context.Context) (map[interface{}]interface{}, error) { - return defaultCache.Data(ctx) -} - -// Keys returns all keys in the cache as slice. -func Keys(ctx context.Context) ([]interface{}, error) { - return defaultCache.Keys(ctx) -} - -// KeyStrings returns all keys in the cache as string slice. -func KeyStrings(ctx context.Context) ([]string, error) { - return defaultCache.KeyStrings(ctx) -} - -// Values returns all values in the cache as slice. -func Values(ctx context.Context) ([]interface{}, error) { - return defaultCache.Values(ctx) -} - -// MustGet acts like Get, but it panics if any error occurs. -func MustGet(ctx context.Context, key interface{}) *gvar.Var { - return defaultCache.MustGet(ctx, key) -} - -// MustGetOrSet acts like GetOrSet, but it panics if any error occurs. -func MustGetOrSet(ctx context.Context, key interface{}, value interface{}, duration time.Duration) *gvar.Var { - return defaultCache.MustGetOrSet(ctx, key, value, duration) -} - -// MustGetOrSetFunc acts like GetOrSetFunc, but it panics if any error occurs. -func MustGetOrSetFunc(ctx context.Context, key interface{}, f Func, duration time.Duration) *gvar.Var { - return defaultCache.MustGetOrSetFunc(ctx, key, f, duration) -} - -// MustGetOrSetFuncLock acts like GetOrSetFuncLock, but it panics if any error occurs. -func MustGetOrSetFuncLock(ctx context.Context, key interface{}, f Func, duration time.Duration) *gvar.Var { - return defaultCache.MustGetOrSetFuncLock(ctx, key, f, duration) -} - -// MustContains acts like Contains, but it panics if any error occurs. -func MustContains(ctx context.Context, key interface{}) bool { - return defaultCache.MustContains(ctx, key) -} - -// MustGetExpire acts like GetExpire, but it panics if any error occurs. -func MustGetExpire(ctx context.Context, key interface{}) time.Duration { - return defaultCache.MustGetExpire(ctx, key) -} - -// MustSize acts like Size, but it panics if any error occurs. -func MustSize(ctx context.Context) int { - return defaultCache.MustSize(ctx) -} - -// MustData acts like Data, but it panics if any error occurs. -func MustData(ctx context.Context) map[interface{}]interface{} { - return defaultCache.MustData(ctx) -} - -// MustKeys acts like Keys, but it panics if any error occurs. -func MustKeys(ctx context.Context) []interface{} { - return defaultCache.MustKeys(ctx) -} - -// MustKeyStrings acts like KeyStrings, but it panics if any error occurs. -func MustKeyStrings(ctx context.Context) []string { - return defaultCache.MustKeyStrings(ctx) -} - -// MustValues acts like Values, but it panics if any error occurs. -func MustValues(ctx context.Context) []interface{} { - return defaultCache.MustValues(ctx) -} diff --git a/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter.go b/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter.go deleted file mode 100644 index 3c98011b..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gcache - -import ( - "context" - "time" - - "github.com/gogf/gf/v2/container/gvar" -) - -// Adapter is the core adapter for cache features implements. -// -// Note that the implementer itself should guarantee the concurrent safety of these functions. -type Adapter interface { - // Set sets cache with `key`-`value` pair, which is expired after `duration`. - // - // It does not expire if `duration` == 0. - // It deletes the keys of `data` if `duration` < 0 or given `value` is nil. - Set(ctx context.Context, key interface{}, value interface{}, duration time.Duration) error - - // SetMap batch sets cache with key-value pairs by `data` map, which is expired after `duration`. - // - // It does not expire if `duration` == 0. - // It deletes the keys of `data` if `duration` < 0 or given `value` is nil. - SetMap(ctx context.Context, data map[interface{}]interface{}, duration time.Duration) error - - // SetIfNotExist sets cache with `key`-`value` pair which is expired after `duration` - // if `key` does not exist in the cache. It returns true the `key` does not exist in the - // cache, and it sets `value` successfully to the cache, or else it returns false. - // - // It does not expire if `duration` == 0. - // It deletes the `key` if `duration` < 0 or given `value` is nil. - SetIfNotExist(ctx context.Context, key interface{}, value interface{}, duration time.Duration) (ok bool, err error) - - // SetIfNotExistFunc sets `key` with result of function `f` and returns true - // if `key` does not exist in the cache, or else it does nothing and returns false if `key` already exists. - // - // The parameter `value` can be type of `func() interface{}`, but it does nothing if its - // result is nil. - // - // It does not expire if `duration` == 0. - // It deletes the `key` if `duration` < 0 or given `value` is nil. - SetIfNotExistFunc(ctx context.Context, key interface{}, f Func, duration time.Duration) (ok bool, err error) - - // SetIfNotExistFuncLock sets `key` with result of function `f` and returns true - // if `key` does not exist in the cache, or else it does nothing and returns false if `key` already exists. - // - // It does not expire if `duration` == 0. - // It deletes the `key` if `duration` < 0 or given `value` is nil. - // - // Note that it differs from function `SetIfNotExistFunc` is that the function `f` is executed within - // writing mutex lock for concurrent safety purpose. - SetIfNotExistFuncLock(ctx context.Context, key interface{}, f Func, duration time.Duration) (ok bool, err error) - - // Get retrieves and returns the associated value of given `key`. - // It returns nil if it does not exist, or its value is nil, or it's expired. - // If you would like to check if the `key` exists in the cache, it's better using function Contains. - Get(ctx context.Context, key interface{}) (*gvar.Var, error) - - // GetOrSet retrieves and returns the value of `key`, or sets `key`-`value` pair and - // returns `value` if `key` does not exist in the cache. The key-value pair expires - // after `duration`. - // - // It does not expire if `duration` == 0. - // It deletes the `key` if `duration` < 0 or given `value` is nil, but it does nothing - // if `value` is a function and the function result is nil. - GetOrSet(ctx context.Context, key interface{}, value interface{}, duration time.Duration) (result *gvar.Var, err error) - - // GetOrSetFunc retrieves and returns the value of `key`, or sets `key` with result of - // function `f` and returns its result if `key` does not exist in the cache. The key-value - // pair expires after `duration`. - // - // It does not expire if `duration` == 0. - // It deletes the `key` if `duration` < 0 or given `value` is nil, but it does nothing - // if `value` is a function and the function result is nil. - GetOrSetFunc(ctx context.Context, key interface{}, f Func, duration time.Duration) (result *gvar.Var, err error) - - // GetOrSetFuncLock retrieves and returns the value of `key`, or sets `key` with result of - // function `f` and returns its result if `key` does not exist in the cache. The key-value - // pair expires after `duration`. - // - // It does not expire if `duration` == 0. - // It deletes the `key` if `duration` < 0 or given `value` is nil, but it does nothing - // if `value` is a function and the function result is nil. - // - // Note that it differs from function `GetOrSetFunc` is that the function `f` is executed within - // writing mutex lock for concurrent safety purpose. - GetOrSetFuncLock(ctx context.Context, key interface{}, f Func, duration time.Duration) (result *gvar.Var, err error) - - // Contains checks and returns true if `key` exists in the cache, or else returns false. - Contains(ctx context.Context, key interface{}) (bool, error) - - // Size returns the number of items in the cache. - Size(ctx context.Context) (size int, err error) - - // Data returns a copy of all key-value pairs in the cache as map type. - // Note that this function may lead lots of memory usage, you can implement this function - // if necessary. - Data(ctx context.Context) (data map[interface{}]interface{}, err error) - - // Keys returns all keys in the cache as slice. - Keys(ctx context.Context) (keys []interface{}, err error) - - // Values returns all values in the cache as slice. - Values(ctx context.Context) (values []interface{}, err error) - - // Update updates the value of `key` without changing its expiration and returns the old value. - // The returned value `exist` is false if the `key` does not exist in the cache. - // - // It deletes the `key` if given `value` is nil. - // It does nothing if `key` does not exist in the cache. - Update(ctx context.Context, key interface{}, value interface{}) (oldValue *gvar.Var, exist bool, err error) - - // UpdateExpire updates the expiration of `key` and returns the old expiration duration value. - // - // It returns -1 and does nothing if the `key` does not exist in the cache. - // It deletes the `key` if `duration` < 0. - UpdateExpire(ctx context.Context, key interface{}, duration time.Duration) (oldDuration time.Duration, err error) - - // GetExpire retrieves and returns the expiration of `key` in the cache. - // - // Note that, - // It returns 0 if the `key` does not expire. - // It returns -1 if the `key` does not exist in the cache. - GetExpire(ctx context.Context, key interface{}) (time.Duration, error) - - // Remove deletes one or more keys from cache, and returns its value. - // If multiple keys are given, it returns the value of the last deleted item. - Remove(ctx context.Context, keys ...interface{}) (lastValue *gvar.Var, err error) - - // Clear clears all data of the cache. - // Note that this function is sensitive and should be carefully used. - Clear(ctx context.Context) error - - // Close closes the cache if necessary. - Close(ctx context.Context) error -} diff --git a/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory.go b/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory.go deleted file mode 100644 index 707b04bd..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory.go +++ /dev/null @@ -1,476 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gcache - -import ( - "context" - "math" - "time" - - "github.com/gogf/gf/v2/container/glist" - "github.com/gogf/gf/v2/container/gset" - "github.com/gogf/gf/v2/container/gtype" - "github.com/gogf/gf/v2/container/gvar" - "github.com/gogf/gf/v2/os/gtime" - "github.com/gogf/gf/v2/os/gtimer" -) - -// AdapterMemory is an adapter implements using memory. -type AdapterMemory struct { - // cap limits the size of the cache pool. - // If the size of the cache exceeds the cap, - // the cache expiration process performs according to the LRU algorithm. - // It is 0 in default which means no limits. - cap int - data *adapterMemoryData // data is the underlying cache data which is stored in a hash table. - expireTimes *adapterMemoryExpireTimes // expireTimes is the expiring key to its timestamp mapping, which is used for quick indexing and deleting. - expireSets *adapterMemoryExpireSets // expireSets is the expiring timestamp to its key set mapping, which is used for quick indexing and deleting. - lru *adapterMemoryLru // lru is the LRU manager, which is enabled when attribute cap > 0. - lruGetList *glist.List // lruGetList is the LRU history according to Get function. - eventList *glist.List // eventList is the asynchronous event list for internal data synchronization. - closed *gtype.Bool // closed controls the cache closed or not. -} - -// Internal cache item. -type adapterMemoryItem struct { - v interface{} // Value. - e int64 // Expire timestamp in milliseconds. -} - -// Internal event item. -type adapterMemoryEvent struct { - k interface{} // Key. - e int64 // Expire time in milliseconds. -} - -const ( - // defaultMaxExpire is the default expire time for no expiring items. - // It equals to math.MaxInt64/1000000. - defaultMaxExpire = 9223372036854 -) - -// NewAdapterMemory creates and returns a new memory cache object. -func NewAdapterMemory(lruCap ...int) Adapter { - c := &AdapterMemory{ - data: newAdapterMemoryData(), - lruGetList: glist.New(true), - expireTimes: newAdapterMemoryExpireTimes(), - expireSets: newAdapterMemoryExpireSets(), - eventList: glist.New(true), - closed: gtype.NewBool(), - } - if len(lruCap) > 0 { - c.cap = lruCap[0] - c.lru = newMemCacheLru(c) - } - return c -} - -// Set sets cache with `key`-`value` pair, which is expired after `duration`. -// -// It does not expire if `duration` == 0. -// It deletes the keys of `data` if `duration` < 0 or given `value` is nil. -func (c *AdapterMemory) Set(ctx context.Context, key interface{}, value interface{}, duration time.Duration) error { - expireTime := c.getInternalExpire(duration) - c.data.Set(key, adapterMemoryItem{ - v: value, - e: expireTime, - }) - c.eventList.PushBack(&adapterMemoryEvent{ - k: key, - e: expireTime, - }) - return nil -} - -// SetMap batch sets cache with key-value pairs by `data` map, which is expired after `duration`. -// -// It does not expire if `duration` == 0. -// It deletes the keys of `data` if `duration` < 0 or given `value` is nil. -func (c *AdapterMemory) SetMap(ctx context.Context, data map[interface{}]interface{}, duration time.Duration) error { - var ( - expireTime = c.getInternalExpire(duration) - err = c.data.SetMap(data, expireTime) - ) - if err != nil { - return err - } - for k := range data { - c.eventList.PushBack(&adapterMemoryEvent{ - k: k, - e: expireTime, - }) - } - return nil -} - -// SetIfNotExist sets cache with `key`-`value` pair which is expired after `duration` -// if `key` does not exist in the cache. It returns true the `key` does not exist in the -// cache, and it sets `value` successfully to the cache, or else it returns false. -// -// It does not expire if `duration` == 0. -// It deletes the `key` if `duration` < 0 or given `value` is nil. -func (c *AdapterMemory) SetIfNotExist(ctx context.Context, key interface{}, value interface{}, duration time.Duration) (bool, error) { - isContained, err := c.Contains(ctx, key) - if err != nil { - return false, err - } - if !isContained { - if _, err = c.doSetWithLockCheck(ctx, key, value, duration); err != nil { - return false, err - } - return true, nil - } - return false, nil -} - -// SetIfNotExistFunc sets `key` with result of function `f` and returns true -// if `key` does not exist in the cache, or else it does nothing and returns false if `key` already exists. -// -// The parameter `value` can be type of `func() interface{}`, but it does nothing if its -// result is nil. -// -// It does not expire if `duration` == 0. -// It deletes the `key` if `duration` < 0 or given `value` is nil. -func (c *AdapterMemory) SetIfNotExistFunc(ctx context.Context, key interface{}, f Func, duration time.Duration) (bool, error) { - isContained, err := c.Contains(ctx, key) - if err != nil { - return false, err - } - if !isContained { - value, err := f(ctx) - if err != nil { - return false, err - } - if _, err = c.doSetWithLockCheck(ctx, key, value, duration); err != nil { - return false, err - } - return true, nil - } - return false, nil -} - -// SetIfNotExistFuncLock sets `key` with result of function `f` and returns true -// if `key` does not exist in the cache, or else it does nothing and returns false if `key` already exists. -// -// It does not expire if `duration` == 0. -// It deletes the `key` if `duration` < 0 or given `value` is nil. -// -// Note that it differs from function `SetIfNotExistFunc` is that the function `f` is executed within -// writing mutex lock for concurrent safety purpose. -func (c *AdapterMemory) SetIfNotExistFuncLock(ctx context.Context, key interface{}, f Func, duration time.Duration) (bool, error) { - isContained, err := c.Contains(ctx, key) - if err != nil { - return false, err - } - if !isContained { - if _, err = c.doSetWithLockCheck(ctx, key, f, duration); err != nil { - return false, err - } - return true, nil - } - return false, nil -} - -// Get retrieves and returns the associated value of given `key`. -// It returns nil if it does not exist, or its value is nil, or it's expired. -// If you would like to check if the `key` exists in the cache, it's better using function Contains. -func (c *AdapterMemory) Get(ctx context.Context, key interface{}) (*gvar.Var, error) { - item, ok := c.data.Get(key) - if ok && !item.IsExpired() { - // Adding to LRU history if LRU feature is enabled. - if c.cap > 0 { - c.lruGetList.PushBack(key) - } - return gvar.New(item.v), nil - } - return nil, nil -} - -// GetOrSet retrieves and returns the value of `key`, or sets `key`-`value` pair and -// returns `value` if `key` does not exist in the cache. The key-value pair expires -// after `duration`. -// -// It does not expire if `duration` == 0. -// It deletes the `key` if `duration` < 0 or given `value` is nil, but it does nothing -// if `value` is a function and the function result is nil. -func (c *AdapterMemory) GetOrSet(ctx context.Context, key interface{}, value interface{}, duration time.Duration) (*gvar.Var, error) { - v, err := c.Get(ctx, key) - if err != nil { - return nil, err - } - if v == nil { - return c.doSetWithLockCheck(ctx, key, value, duration) - } - return v, nil -} - -// GetOrSetFunc retrieves and returns the value of `key`, or sets `key` with result of -// function `f` and returns its result if `key` does not exist in the cache. The key-value -// pair expires after `duration`. -// -// It does not expire if `duration` == 0. -// It deletes the `key` if `duration` < 0 or given `value` is nil, but it does nothing -// if `value` is a function and the function result is nil. -func (c *AdapterMemory) GetOrSetFunc(ctx context.Context, key interface{}, f Func, duration time.Duration) (*gvar.Var, error) { - v, err := c.Get(ctx, key) - if err != nil { - return nil, err - } - if v == nil { - value, err := f(ctx) - if err != nil { - return nil, err - } - if value == nil { - return nil, nil - } - return c.doSetWithLockCheck(ctx, key, value, duration) - } - return v, nil -} - -// GetOrSetFuncLock retrieves and returns the value of `key`, or sets `key` with result of -// function `f` and returns its result if `key` does not exist in the cache. The key-value -// pair expires after `duration`. -// -// It does not expire if `duration` == 0. -// It deletes the `key` if `duration` < 0 or given `value` is nil, but it does nothing -// if `value` is a function and the function result is nil. -// -// Note that it differs from function `GetOrSetFunc` is that the function `f` is executed within -// writing mutex lock for concurrent safety purpose. -func (c *AdapterMemory) GetOrSetFuncLock(ctx context.Context, key interface{}, f Func, duration time.Duration) (*gvar.Var, error) { - v, err := c.Get(ctx, key) - if err != nil { - return nil, err - } - if v == nil { - return c.doSetWithLockCheck(ctx, key, f, duration) - } - return v, nil -} - -// Contains checks and returns true if `key` exists in the cache, or else returns false. -func (c *AdapterMemory) Contains(ctx context.Context, key interface{}) (bool, error) { - v, err := c.Get(ctx, key) - if err != nil { - return false, err - } - return v != nil, nil -} - -// GetExpire retrieves and returns the expiration of `key` in the cache. -// -// Note that, -// It returns 0 if the `key` does not expire. -// It returns -1 if the `key` does not exist in the cache. -func (c *AdapterMemory) GetExpire(ctx context.Context, key interface{}) (time.Duration, error) { - if item, ok := c.data.Get(key); ok { - return time.Duration(item.e-gtime.TimestampMilli()) * time.Millisecond, nil - } - return -1, nil -} - -// Remove deletes one or more keys from cache, and returns its value. -// If multiple keys are given, it returns the value of the last deleted item. -func (c *AdapterMemory) Remove(ctx context.Context, keys ...interface{}) (*gvar.Var, error) { - var removedKeys []interface{} - removedKeys, value, err := c.data.Remove(keys...) - if err != nil { - return nil, err - } - for _, key := range removedKeys { - c.eventList.PushBack(&adapterMemoryEvent{ - k: key, - e: gtime.TimestampMilli() - 1000000, - }) - } - return gvar.New(value), nil -} - -// Update updates the value of `key` without changing its expiration and returns the old value. -// The returned value `exist` is false if the `key` does not exist in the cache. -// -// It deletes the `key` if given `value` is nil. -// It does nothing if `key` does not exist in the cache. -func (c *AdapterMemory) Update(ctx context.Context, key interface{}, value interface{}) (oldValue *gvar.Var, exist bool, err error) { - v, exist, err := c.data.Update(key, value) - return gvar.New(v), exist, err -} - -// UpdateExpire updates the expiration of `key` and returns the old expiration duration value. -// -// It returns -1 and does nothing if the `key` does not exist in the cache. -// It deletes the `key` if `duration` < 0. -func (c *AdapterMemory) UpdateExpire(ctx context.Context, key interface{}, duration time.Duration) (oldDuration time.Duration, err error) { - newExpireTime := c.getInternalExpire(duration) - oldDuration, err = c.data.UpdateExpire(key, newExpireTime) - if err != nil { - return - } - if oldDuration != -1 { - c.eventList.PushBack(&adapterMemoryEvent{ - k: key, - e: newExpireTime, - }) - } - return -} - -// Size returns the size of the cache. -func (c *AdapterMemory) Size(ctx context.Context) (size int, err error) { - return c.data.Size() -} - -// Data returns a copy of all key-value pairs in the cache as map type. -func (c *AdapterMemory) Data(ctx context.Context) (map[interface{}]interface{}, error) { - return c.data.Data() -} - -// Keys returns all keys in the cache as slice. -func (c *AdapterMemory) Keys(ctx context.Context) ([]interface{}, error) { - return c.data.Keys() -} - -// Values returns all values in the cache as slice. -func (c *AdapterMemory) Values(ctx context.Context) ([]interface{}, error) { - return c.data.Values() -} - -// Clear clears all data of the cache. -// Note that this function is sensitive and should be carefully used. -func (c *AdapterMemory) Clear(ctx context.Context) error { - return c.data.Clear() -} - -// Close closes the cache. -func (c *AdapterMemory) Close(ctx context.Context) error { - if c.cap > 0 { - c.lru.Close() - } - c.closed.Set(true) - return nil -} - -// doSetWithLockCheck sets cache with `key`-`value` pair if `key` does not exist in the -// cache, which is expired after `duration`. -// -// It does not expire if `duration` == 0. -// The parameter `value` can be type of , but it does nothing if the -// function result is nil. -// -// It doubly checks the `key` whether exists in the cache using mutex writing lock -// before setting it to the cache. -func (c *AdapterMemory) doSetWithLockCheck(ctx context.Context, key interface{}, value interface{}, duration time.Duration) (result *gvar.Var, err error) { - expireTimestamp := c.getInternalExpire(duration) - v, err := c.data.SetWithLock(ctx, key, value, expireTimestamp) - c.eventList.PushBack(&adapterMemoryEvent{k: key, e: expireTimestamp}) - return gvar.New(v), err -} - -// getInternalExpire converts and returns the expiration time with given expired duration in milliseconds. -func (c *AdapterMemory) getInternalExpire(duration time.Duration) int64 { - if duration == 0 { - return defaultMaxExpire - } - return gtime.TimestampMilli() + duration.Nanoseconds()/1000000 -} - -// makeExpireKey groups the `expire` in milliseconds to its according seconds. -func (c *AdapterMemory) makeExpireKey(expire int64) int64 { - return int64(math.Ceil(float64(expire/1000)+1) * 1000) -} - -// syncEventAndClearExpired does the asynchronous task loop: -// 1. Asynchronously process the data in the event list, -// and synchronize the results to the `expireTimes` and `expireSets` properties. -// 2. Clean up the expired key-value pair data. -func (c *AdapterMemory) syncEventAndClearExpired(ctx context.Context) { - if c.closed.Val() { - gtimer.Exit() - return - } - var ( - event *adapterMemoryEvent - oldExpireTime int64 - newExpireTime int64 - ) - // ======================== - // Data Synchronization. - // ======================== - for { - v := c.eventList.PopFront() - if v == nil { - break - } - event = v.(*adapterMemoryEvent) - // Fetching the old expire set. - oldExpireTime = c.expireTimes.Get(event.k) - // Calculating the new expiration time set. - newExpireTime = c.makeExpireKey(event.e) - if newExpireTime != oldExpireTime { - c.expireSets.GetOrNew(newExpireTime).Add(event.k) - if oldExpireTime != 0 { - c.expireSets.GetOrNew(oldExpireTime).Remove(event.k) - } - // Updating the expired time for . - c.expireTimes.Set(event.k, newExpireTime) - } - // Adding the key the LRU history by writing operations. - if c.cap > 0 { - c.lru.Push(event.k) - } - } - // Processing expired keys from LRU. - if c.cap > 0 { - if c.lruGetList.Len() > 0 { - for { - if v := c.lruGetList.PopFront(); v != nil { - c.lru.Push(v) - } else { - break - } - } - } - c.lru.SyncAndClear(ctx) - } - // ======================== - // Data Cleaning up. - // ======================== - var ( - expireSet *gset.Set - ek = c.makeExpireKey(gtime.TimestampMilli()) - eks = []int64{ek - 1000, ek - 2000, ek - 3000, ek - 4000, ek - 5000} - ) - for _, expireTime := range eks { - if expireSet = c.expireSets.Get(expireTime); expireSet != nil { - // Iterating the set to delete all keys in it. - expireSet.Iterator(func(key interface{}) bool { - c.clearByKey(key) - return true - }) - // Deleting the set after all of its keys are deleted. - c.expireSets.Delete(expireTime) - } - } -} - -// clearByKey deletes the key-value pair with given `key`. -// The parameter `force` specifies whether doing this deleting forcibly. -func (c *AdapterMemory) clearByKey(key interface{}, force ...bool) { - // Doubly check before really deleting it from cache. - c.data.DeleteWithDoubleCheck(key, force...) - - // Deleting its expiration time from `expireTimes`. - c.expireTimes.Delete(key) - - // Deleting it from LRU. - if c.cap > 0 { - c.lru.Remove(key) - } -} diff --git a/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_data.go b/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_data.go deleted file mode 100644 index 941339d0..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_data.go +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gcache - -import ( - "context" - "sync" - "time" - - "github.com/gogf/gf/v2/os/gtime" -) - -type adapterMemoryData struct { - mu sync.RWMutex // dataMu ensures the concurrent safety of underlying data map. - data map[interface{}]adapterMemoryItem // data is the underlying cache data which is stored in a hash table. -} - -func newAdapterMemoryData() *adapterMemoryData { - return &adapterMemoryData{ - data: make(map[interface{}]adapterMemoryItem), - } -} - -// Update updates the value of `key` without changing its expiration and returns the old value. -// The returned value `exist` is false if the `key` does not exist in the cache. -// -// It deletes the `key` if given `value` is nil. -// It does nothing if `key` does not exist in the cache. -func (d *adapterMemoryData) Update(key interface{}, value interface{}) (oldValue interface{}, exist bool, err error) { - d.mu.Lock() - defer d.mu.Unlock() - if item, ok := d.data[key]; ok { - d.data[key] = adapterMemoryItem{ - v: value, - e: item.e, - } - return item.v, true, nil - } - return nil, false, nil -} - -// UpdateExpire updates the expiration of `key` and returns the old expiration duration value. -// -// It returns -1 and does nothing if the `key` does not exist in the cache. -// It deletes the `key` if `duration` < 0. -func (d *adapterMemoryData) UpdateExpire(key interface{}, expireTime int64) (oldDuration time.Duration, err error) { - d.mu.Lock() - defer d.mu.Unlock() - if item, ok := d.data[key]; ok { - d.data[key] = adapterMemoryItem{ - v: item.v, - e: expireTime, - } - return time.Duration(item.e-gtime.TimestampMilli()) * time.Millisecond, nil - } - return -1, nil -} - -// Remove deletes the one or more keys from cache, and returns its value. -// If multiple keys are given, it returns the value of the deleted last item. -func (d *adapterMemoryData) Remove(keys ...interface{}) (removedKeys []interface{}, value interface{}, err error) { - d.mu.Lock() - defer d.mu.Unlock() - removedKeys = make([]interface{}, 0) - for _, key := range keys { - item, ok := d.data[key] - if ok { - value = item.v - delete(d.data, key) - removedKeys = append(removedKeys, key) - } - } - return removedKeys, value, nil -} - -// Data returns a copy of all key-value pairs in the cache as map type. -func (d *adapterMemoryData) Data() (map[interface{}]interface{}, error) { - d.mu.RLock() - m := make(map[interface{}]interface{}, len(d.data)) - for k, v := range d.data { - if !v.IsExpired() { - m[k] = v.v - } - } - d.mu.RUnlock() - return m, nil -} - -// Keys returns all keys in the cache as slice. -func (d *adapterMemoryData) Keys() ([]interface{}, error) { - d.mu.RLock() - var ( - index = 0 - keys = make([]interface{}, len(d.data)) - ) - for k, v := range d.data { - if !v.IsExpired() { - keys[index] = k - index++ - } - } - d.mu.RUnlock() - return keys, nil -} - -// Values returns all values in the cache as slice. -func (d *adapterMemoryData) Values() ([]interface{}, error) { - d.mu.RLock() - var ( - index = 0 - values = make([]interface{}, len(d.data)) - ) - for _, v := range d.data { - if !v.IsExpired() { - values[index] = v.v - index++ - } - } - d.mu.RUnlock() - return values, nil -} - -// Size returns the size of the cache. -func (d *adapterMemoryData) Size() (size int, err error) { - d.mu.RLock() - size = len(d.data) - d.mu.RUnlock() - return size, nil -} - -// Clear clears all data of the cache. -// Note that this function is sensitive and should be carefully used. -func (d *adapterMemoryData) Clear() error { - d.mu.Lock() - defer d.mu.Unlock() - d.data = make(map[interface{}]adapterMemoryItem) - return nil -} - -func (d *adapterMemoryData) Get(key interface{}) (item adapterMemoryItem, ok bool) { - d.mu.RLock() - item, ok = d.data[key] - d.mu.RUnlock() - return -} - -func (d *adapterMemoryData) Set(key interface{}, value adapterMemoryItem) { - d.mu.Lock() - d.data[key] = value - d.mu.Unlock() -} - -// SetMap batch sets cache with key-value pairs by `data`, which is expired after `duration`. -// -// It does not expire if `duration` == 0. -// It deletes the keys of `data` if `duration` < 0 or given `value` is nil. -func (d *adapterMemoryData) SetMap(data map[interface{}]interface{}, expireTime int64) error { - d.mu.Lock() - for k, v := range data { - d.data[k] = adapterMemoryItem{ - v: v, - e: expireTime, - } - } - d.mu.Unlock() - return nil -} - -func (d *adapterMemoryData) SetWithLock(ctx context.Context, key interface{}, value interface{}, expireTimestamp int64) (interface{}, error) { - d.mu.Lock() - defer d.mu.Unlock() - var ( - err error - ) - if v, ok := d.data[key]; ok && !v.IsExpired() { - return v.v, nil - } - f, ok := value.(Func) - if !ok { - // Compatible with raw function value. - f, ok = value.(func(ctx context.Context) (value interface{}, err error)) - } - if ok { - if value, err = f(ctx); err != nil { - return nil, err - } - if value == nil { - return nil, nil - } - } - d.data[key] = adapterMemoryItem{v: value, e: expireTimestamp} - return value, nil -} - -func (d *adapterMemoryData) DeleteWithDoubleCheck(key interface{}, force ...bool) { - d.mu.Lock() - // Doubly check before really deleting it from cache. - if item, ok := d.data[key]; (ok && item.IsExpired()) || (len(force) > 0 && force[0]) { - delete(d.data, key) - } - d.mu.Unlock() -} diff --git a/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_expire_sets.go b/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_expire_sets.go deleted file mode 100644 index b49678c7..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_expire_sets.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gcache - -import ( - "sync" - - "github.com/gogf/gf/v2/container/gset" -) - -type adapterMemoryExpireSets struct { - mu sync.RWMutex // expireSetMu ensures the concurrent safety of expireSets map. - expireSets map[int64]*gset.Set // expireSets is the expiring timestamp to its key set mapping, which is used for quick indexing and deleting. -} - -func newAdapterMemoryExpireSets() *adapterMemoryExpireSets { - return &adapterMemoryExpireSets{ - expireSets: make(map[int64]*gset.Set), - } -} - -func (d *adapterMemoryExpireSets) Get(key int64) (result *gset.Set) { - d.mu.RLock() - result = d.expireSets[key] - d.mu.RUnlock() - return -} - -func (d *adapterMemoryExpireSets) GetOrNew(key int64) (result *gset.Set) { - if result = d.Get(key); result != nil { - return - } - d.mu.Lock() - if es, ok := d.expireSets[key]; ok { - result = es - } else { - result = gset.New(true) - d.expireSets[key] = result - } - d.mu.Unlock() - return -} - -func (d *adapterMemoryExpireSets) Delete(key int64) { - d.mu.Lock() - delete(d.expireSets, key) - d.mu.Unlock() -} diff --git a/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_expire_times.go b/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_expire_times.go deleted file mode 100644 index af3d4b41..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_expire_times.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gcache - -import ( - "sync" -) - -type adapterMemoryExpireTimes struct { - mu sync.RWMutex // expireTimeMu ensures the concurrent safety of expireTimes map. - expireTimes map[interface{}]int64 // expireTimes is the expiring key to its timestamp mapping, which is used for quick indexing and deleting. -} - -func newAdapterMemoryExpireTimes() *adapterMemoryExpireTimes { - return &adapterMemoryExpireTimes{ - expireTimes: make(map[interface{}]int64), - } -} - -func (d *adapterMemoryExpireTimes) Get(key interface{}) (value int64) { - d.mu.RLock() - value = d.expireTimes[key] - d.mu.RUnlock() - return -} - -func (d *adapterMemoryExpireTimes) Set(key interface{}, value int64) { - d.mu.Lock() - d.expireTimes[key] = value - d.mu.Unlock() -} - -func (d *adapterMemoryExpireTimes) Delete(key interface{}) { - d.mu.Lock() - delete(d.expireTimes, key) - d.mu.Unlock() -} diff --git a/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_item.go b/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_item.go deleted file mode 100644 index 5a7862ca..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_item.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gcache - -import ( - "github.com/gogf/gf/v2/os/gtime" -) - -// IsExpired checks whether `item` is expired. -func (item *adapterMemoryItem) IsExpired() bool { - // Note that it should use greater than or equal judgement here - // imagining that the cache time is only 1 millisecond. - - return item.e < gtime.TimestampMilli() -} diff --git a/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_lru.go b/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_lru.go deleted file mode 100644 index 6583ec96..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_lru.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gcache - -import ( - "context" - - "github.com/gogf/gf/v2/container/glist" - "github.com/gogf/gf/v2/container/gmap" - "github.com/gogf/gf/v2/container/gtype" - "github.com/gogf/gf/v2/os/gtimer" -) - -// LRU cache object. -// It uses list.List from stdlib for its underlying doubly linked list. -type adapterMemoryLru struct { - cache *AdapterMemory // Parent cache object. - data *gmap.Map // Key mapping to the item of the list. - list *glist.List // Key list. - rawList *glist.List // History for key adding. - closed *gtype.Bool // Closed or not. -} - -// newMemCacheLru creates and returns a new LRU object. -func newMemCacheLru(cache *AdapterMemory) *adapterMemoryLru { - lru := &adapterMemoryLru{ - cache: cache, - data: gmap.New(true), - list: glist.New(true), - rawList: glist.New(true), - closed: gtype.NewBool(), - } - return lru -} - -// Close closes the LRU object. -func (lru *adapterMemoryLru) Close() { - lru.closed.Set(true) -} - -// Remove deletes the `key` FROM `lru`. -func (lru *adapterMemoryLru) Remove(key interface{}) { - if v := lru.data.Get(key); v != nil { - lru.data.Remove(key) - lru.list.Remove(v.(*glist.Element)) - } -} - -// Size returns the size of `lru`. -func (lru *adapterMemoryLru) Size() int { - return lru.data.Size() -} - -// Push pushes `key` to the tail of `lru`. -func (lru *adapterMemoryLru) Push(key interface{}) { - lru.rawList.PushBack(key) -} - -// Pop deletes and returns the key from tail of `lru`. -func (lru *adapterMemoryLru) Pop() interface{} { - if v := lru.list.PopBack(); v != nil { - lru.data.Remove(v) - return v - } - return nil -} - -// SyncAndClear synchronizes the keys from `rawList` to `list` and `data` -// using Least Recently Used algorithm. -func (lru *adapterMemoryLru) SyncAndClear(ctx context.Context) { - if lru.closed.Val() { - gtimer.Exit() - return - } - // Data synchronization. - var alreadyExistItem interface{} - for { - if rawListItem := lru.rawList.PopFront(); rawListItem != nil { - // Deleting the key from list. - if alreadyExistItem = lru.data.Get(rawListItem); alreadyExistItem != nil { - lru.list.Remove(alreadyExistItem.(*glist.Element)) - } - // Pushing key to the head of the list - // and setting its list item to hash table for quick indexing. - lru.data.Set(rawListItem, lru.list.PushFront(rawListItem)) - } else { - break - } - } - // Data cleaning up. - for clearLength := lru.Size() - lru.cache.cap; clearLength > 0; clearLength-- { - if topKey := lru.Pop(); topKey != nil { - lru.cache.clearByKey(topKey, true) - } - } -} diff --git a/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_redis.go b/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_redis.go deleted file mode 100644 index b9cb45b3..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_redis.go +++ /dev/null @@ -1,438 +0,0 @@ -// Copyright 2020 gf Author(https://github.com/gogf/gf). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gcache - -import ( - "context" - "time" - - "github.com/gogf/gf/v2/container/gvar" - "github.com/gogf/gf/v2/database/gredis" - "github.com/gogf/gf/v2/util/gconv" -) - -// AdapterRedis is the gcache adapter implements using Redis server. -type AdapterRedis struct { - redis *gredis.Redis -} - -// NewAdapterRedis creates and returns a new memory cache object. -func NewAdapterRedis(redis *gredis.Redis) Adapter { - return &AdapterRedis{ - redis: redis, - } -} - -// Set sets cache with `key`-`value` pair, which is expired after `duration`. -// -// It does not expire if `duration` == 0. -// It deletes the keys of `data` if `duration` < 0 or given `value` is nil. -func (c *AdapterRedis) Set(ctx context.Context, key interface{}, value interface{}, duration time.Duration) (err error) { - redisKey := gconv.String(key) - if value == nil || duration < 0 { - _, err = c.redis.Del(ctx, redisKey) - } else { - if duration == 0 { - _, err = c.redis.Set(ctx, redisKey, value) - } else { - err = c.redis.SetEX(ctx, redisKey, value, int64(duration.Seconds())) - } - } - return err -} - -// SetMap batch sets cache with key-value pairs by `data` map, which is expired after `duration`. -// -// It does not expire if `duration` == 0. -// It deletes the keys of `data` if `duration` < 0 or given `value` is nil. -func (c *AdapterRedis) SetMap(ctx context.Context, data map[interface{}]interface{}, duration time.Duration) error { - if len(data) == 0 { - return nil - } - // DEL. - if duration < 0 { - var ( - index = 0 - keys = make([]string, len(data)) - ) - for k := range data { - keys[index] = gconv.String(k) - index += 1 - } - _, err := c.redis.Del(ctx, keys...) - if err != nil { - return err - } - } - if duration == 0 { - err := c.redis.MSet(ctx, gconv.Map(data)) - if err != nil { - return err - } - } - if duration > 0 { - var err error - for k, v := range data { - if err = c.Set(ctx, k, v, duration); err != nil { - return err - } - } - } - return nil -} - -// SetIfNotExist sets cache with `key`-`value` pair which is expired after `duration` -// if `key` does not exist in the cache. It returns true the `key` does not exist in the -// cache, and it sets `value` successfully to the cache, or else it returns false. -// -// It does not expire if `duration` == 0. -// It deletes the `key` if `duration` < 0 or given `value` is nil. -func (c *AdapterRedis) SetIfNotExist(ctx context.Context, key interface{}, value interface{}, duration time.Duration) (bool, error) { - var ( - err error - redisKey = gconv.String(key) - ) - // Execute the function and retrieve the result. - f, ok := value.(Func) - if !ok { - // Compatible with raw function value. - f, ok = value.(func(ctx context.Context) (value interface{}, err error)) - } - if ok { - if value, err = f(ctx); err != nil { - return false, err - } - } - // DEL. - if duration < 0 || value == nil { - var delResult int64 - delResult, err = c.redis.Del(ctx, redisKey) - if err != nil { - return false, err - } - if delResult == 1 { - return true, err - } - return false, err - } - ok, err = c.redis.SetNX(ctx, redisKey, value) - if err != nil { - return ok, err - } - if ok && duration > 0 { - // Set the expiration. - _, err = c.redis.Expire(ctx, redisKey, int64(duration.Seconds())) - if err != nil { - return ok, err - } - return ok, err - } - return ok, err -} - -// SetIfNotExistFunc sets `key` with result of function `f` and returns true -// if `key` does not exist in the cache, or else it does nothing and returns false if `key` already exists. -// -// The parameter `value` can be type of `func() interface{}`, but it does nothing if its -// result is nil. -// -// It does not expire if `duration` == 0. -// It deletes the `key` if `duration` < 0 or given `value` is nil. -func (c *AdapterRedis) SetIfNotExistFunc(ctx context.Context, key interface{}, f Func, duration time.Duration) (ok bool, err error) { - value, err := f(ctx) - if err != nil { - return false, err - } - return c.SetIfNotExist(ctx, key, value, duration) -} - -// SetIfNotExistFuncLock sets `key` with result of function `f` and returns true -// if `key` does not exist in the cache, or else it does nothing and returns false if `key` already exists. -// -// It does not expire if `duration` == 0. -// It deletes the `key` if `duration` < 0 or given `value` is nil. -// -// Note that it differs from function `SetIfNotExistFunc` is that the function `f` is executed within -// writing mutex lock for concurrent safety purpose. -func (c *AdapterRedis) SetIfNotExistFuncLock(ctx context.Context, key interface{}, f Func, duration time.Duration) (ok bool, err error) { - value, err := f(ctx) - if err != nil { - return false, err - } - return c.SetIfNotExist(ctx, key, value, duration) -} - -// Get retrieves and returns the associated value of given . -// It returns nil if it does not exist or its value is nil. -func (c *AdapterRedis) Get(ctx context.Context, key interface{}) (*gvar.Var, error) { - return c.redis.Get(ctx, gconv.String(key)) -} - -// GetOrSet retrieves and returns the value of `key`, or sets `key`-`value` pair and -// returns `value` if `key` does not exist in the cache. The key-value pair expires -// after `duration`. -// -// It does not expire if `duration` == 0. -// It deletes the `key` if `duration` < 0 or given `value` is nil, but it does nothing -// if `value` is a function and the function result is nil. -func (c *AdapterRedis) GetOrSet(ctx context.Context, key interface{}, value interface{}, duration time.Duration) (result *gvar.Var, err error) { - result, err = c.Get(ctx, key) - if err != nil { - return nil, err - } - if result.IsNil() { - return gvar.New(value), c.Set(ctx, key, value, duration) - } - return -} - -// GetOrSetFunc retrieves and returns the value of `key`, or sets `key` with result of -// function `f` and returns its result if `key` does not exist in the cache. The key-value -// pair expires after `duration`. -// -// It does not expire if `duration` == 0. -// It deletes the `key` if `duration` < 0 or given `value` is nil, but it does nothing -// if `value` is a function and the function result is nil. -func (c *AdapterRedis) GetOrSetFunc(ctx context.Context, key interface{}, f Func, duration time.Duration) (result *gvar.Var, err error) { - v, err := c.Get(ctx, key) - if err != nil { - return nil, err - } - if v.IsNil() { - value, err := f(ctx) - if err != nil { - return nil, err - } - if value == nil { - return nil, nil - } - return gvar.New(value), c.Set(ctx, key, value, duration) - } else { - return v, nil - } -} - -// GetOrSetFuncLock retrieves and returns the value of `key`, or sets `key` with result of -// function `f` and returns its result if `key` does not exist in the cache. The key-value -// pair expires after `duration`. -// -// It does not expire if `duration` == 0. -// It deletes the `key` if `duration` < 0 or given `value` is nil, but it does nothing -// if `value` is a function and the function result is nil. -// -// Note that it differs from function `GetOrSetFunc` is that the function `f` is executed within -// writing mutex lock for concurrent safety purpose. -func (c *AdapterRedis) GetOrSetFuncLock(ctx context.Context, key interface{}, f Func, duration time.Duration) (result *gvar.Var, err error) { - return c.GetOrSetFunc(ctx, key, f, duration) -} - -// Contains checks and returns true if `key` exists in the cache, or else returns false. -func (c *AdapterRedis) Contains(ctx context.Context, key interface{}) (bool, error) { - n, err := c.redis.Exists(ctx, gconv.String(key)) - if err != nil { - return false, err - } - return n > 0, nil -} - -// Size returns the number of items in the cache. -func (c *AdapterRedis) Size(ctx context.Context) (size int, err error) { - n, err := c.redis.DBSize(ctx) - if err != nil { - return 0, err - } - return int(n), nil -} - -// Data returns a copy of all key-value pairs in the cache as map type. -// Note that this function may lead lots of memory usage, you can implement this function -// if necessary. -func (c *AdapterRedis) Data(ctx context.Context) (map[interface{}]interface{}, error) { - // Keys. - keys, err := c.redis.Keys(ctx, "*") - if err != nil { - return nil, err - } - // Key-Value pairs. - var m map[string]*gvar.Var - m, err = c.redis.MGet(ctx, keys...) - if err != nil { - return nil, err - } - // Type converting. - data := make(map[interface{}]interface{}) - for k, v := range m { - data[k] = v.Val() - } - return data, nil -} - -// Keys returns all keys in the cache as slice. -func (c *AdapterRedis) Keys(ctx context.Context) ([]interface{}, error) { - keys, err := c.redis.Keys(ctx, "*") - if err != nil { - return nil, err - } - return gconv.Interfaces(keys), nil -} - -// Values returns all values in the cache as slice. -func (c *AdapterRedis) Values(ctx context.Context) ([]interface{}, error) { - // Keys. - keys, err := c.redis.Keys(ctx, "*") - if err != nil { - return nil, err - } - // Key-Value pairs. - var m map[string]*gvar.Var - m, err = c.redis.MGet(ctx, keys...) - if err != nil { - return nil, err - } - // Values. - var values []interface{} - for _, key := range keys { - if v := m[key]; !v.IsNil() { - values = append(values, v.Val()) - } - } - return values, nil -} - -// Update updates the value of `key` without changing its expiration and returns the old value. -// The returned value `exist` is false if the `key` does not exist in the cache. -// -// It deletes the `key` if given `value` is nil. -// It does nothing if `key` does not exist in the cache. -func (c *AdapterRedis) Update(ctx context.Context, key interface{}, value interface{}) (oldValue *gvar.Var, exist bool, err error) { - var ( - v *gvar.Var - oldTTL int64 - redisKey = gconv.String(key) - ) - // TTL. - oldTTL, err = c.redis.TTL(ctx, redisKey) - if err != nil { - return - } - if oldTTL == -2 { - // It does not exist. - return - } - // Check existence. - v, err = c.redis.Get(ctx, redisKey) - if err != nil { - return - } - oldValue = v - // DEL. - if value == nil { - _, err = c.redis.Del(ctx, redisKey) - if err != nil { - return - } - return - } - // Update the value. - if oldTTL == -1 { - _, err = c.redis.Set(ctx, redisKey, value) - } else { - err = c.redis.SetEX(ctx, redisKey, value, oldTTL) - } - return oldValue, true, err -} - -// UpdateExpire updates the expiration of `key` and returns the old expiration duration value. -// -// It returns -1 and does nothing if the `key` does not exist in the cache. -// It deletes the `key` if `duration` < 0. -func (c *AdapterRedis) UpdateExpire(ctx context.Context, key interface{}, duration time.Duration) (oldDuration time.Duration, err error) { - var ( - v *gvar.Var - oldTTL int64 - redisKey = gconv.String(key) - ) - // TTL. - oldTTL, err = c.redis.TTL(ctx, redisKey) - if err != nil { - return - } - if oldTTL == -2 { - // It does not exist. - oldTTL = -1 - return - } - oldDuration = time.Duration(oldTTL) * time.Second - // DEL. - if duration < 0 { - _, err = c.redis.Del(ctx, redisKey) - return - } - // Update the expiration. - if duration > 0 { - _, err = c.redis.Expire(ctx, redisKey, int64(duration.Seconds())) - } - // No expire. - if duration == 0 { - v, err = c.redis.Get(ctx, redisKey) - if err != nil { - return - } - _, err = c.redis.Set(ctx, redisKey, v.Val()) - } - return -} - -// GetExpire retrieves and returns the expiration of `key` in the cache. -// -// Note that, -// It returns 0 if the `key` does not expire. -// It returns -1 if the `key` does not exist in the cache. -func (c *AdapterRedis) GetExpire(ctx context.Context, key interface{}) (time.Duration, error) { - ttl, err := c.redis.TTL(ctx, gconv.String(key)) - if err != nil { - return 0, err - } - switch ttl { - case -1: - return 0, nil - case -2: - return -1, nil - default: - return time.Duration(ttl) * time.Second, nil - } -} - -// Remove deletes the one or more keys from cache, and returns its value. -// If multiple keys are given, it returns the value of the deleted last item. -func (c *AdapterRedis) Remove(ctx context.Context, keys ...interface{}) (lastValue *gvar.Var, err error) { - if len(keys) == 0 { - return nil, nil - } - // Retrieves the last key value. - if lastValue, err = c.redis.Get(ctx, gconv.String(keys[len(keys)-1])); err != nil { - return nil, err - } - // Deletes all given keys. - _, err = c.redis.Del(ctx, gconv.Strings(keys)...) - return -} - -// Clear clears all data of the cache. -// Note that this function is sensitive and should be carefully used. -// It uses `FLUSHDB` command in redis server, which might be disabled in server. -func (c *AdapterRedis) Clear(ctx context.Context) (err error) { - // The "FLUSHDB" may not be available. - err = c.redis.FlushDB(ctx) - return -} - -// Close closes the cache. -func (c *AdapterRedis) Close(ctx context.Context) error { - // It does nothing. - return nil -} diff --git a/vendor/github.com/gogf/gf/v2/os/gcache/gcache_cache.go b/vendor/github.com/gogf/gf/v2/os/gcache/gcache_cache.go deleted file mode 100644 index 9a039457..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gcache/gcache_cache.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gcache - -import ( - "context" - "time" - - "github.com/gogf/gf/v2/os/gtimer" - "github.com/gogf/gf/v2/util/gconv" -) - -// Cache struct. -type Cache struct { - localAdapter -} - -// localAdapter is alias of Adapter, for embedded attribute purpose only. -type localAdapter = Adapter - -// New creates and returns a new cache object using default memory adapter. -// Note that the LRU feature is only available using memory adapter. -func New(lruCap ...int) *Cache { - memAdapter := NewAdapterMemory(lruCap...) - c := &Cache{ - localAdapter: memAdapter, - } - // Here may be a "timer leak" if adapter is manually changed from memory adapter. - // Do not worry about this, as adapter is less changed, and it does nothing if it's not used. - gtimer.AddSingleton(context.Background(), time.Second, memAdapter.(*AdapterMemory).syncEventAndClearExpired) - return c -} - -// NewWithAdapter creates and returns a Cache object with given Adapter implements. -func NewWithAdapter(adapter Adapter) *Cache { - return &Cache{ - localAdapter: adapter, - } -} - -// SetAdapter changes the adapter for this cache. -// Be very note that, this setting function is not concurrent-safe, which means you should not call -// this setting function concurrently in multiple goroutines. -func (c *Cache) SetAdapter(adapter Adapter) { - c.localAdapter = adapter -} - -// GetAdapter returns the adapter that is set in current Cache. -func (c *Cache) GetAdapter() Adapter { - return c.localAdapter -} - -// Removes deletes `keys` in the cache. -func (c *Cache) Removes(ctx context.Context, keys []interface{}) error { - _, err := c.Remove(ctx, keys...) - return err -} - -// KeyStrings returns all keys in the cache as string slice. -func (c *Cache) KeyStrings(ctx context.Context) ([]string, error) { - keys, err := c.Keys(ctx) - if err != nil { - return nil, err - } - return gconv.Strings(keys), nil -} diff --git a/vendor/github.com/gogf/gf/v2/os/gcache/gcache_cache_must.go b/vendor/github.com/gogf/gf/v2/os/gcache/gcache_cache_must.go deleted file mode 100644 index 65961a00..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gcache/gcache_cache_must.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gcache - -import ( - "context" - "time" - - "github.com/gogf/gf/v2/container/gvar" -) - -// MustGet acts like Get, but it panics if any error occurs. -func (c *Cache) MustGet(ctx context.Context, key interface{}) *gvar.Var { - v, err := c.Get(ctx, key) - if err != nil { - panic(err) - } - return v -} - -// MustGetOrSet acts like GetOrSet, but it panics if any error occurs. -func (c *Cache) MustGetOrSet(ctx context.Context, key interface{}, value interface{}, duration time.Duration) *gvar.Var { - v, err := c.GetOrSet(ctx, key, value, duration) - if err != nil { - panic(err) - } - return v -} - -// MustGetOrSetFunc acts like GetOrSetFunc, but it panics if any error occurs. -func (c *Cache) MustGetOrSetFunc(ctx context.Context, key interface{}, f Func, duration time.Duration) *gvar.Var { - v, err := c.GetOrSetFunc(ctx, key, f, duration) - if err != nil { - panic(err) - } - return v -} - -// MustGetOrSetFuncLock acts like GetOrSetFuncLock, but it panics if any error occurs. -func (c *Cache) MustGetOrSetFuncLock(ctx context.Context, key interface{}, f Func, duration time.Duration) *gvar.Var { - v, err := c.GetOrSetFuncLock(ctx, key, f, duration) - if err != nil { - panic(err) - } - return v -} - -// MustContains acts like Contains, but it panics if any error occurs. -func (c *Cache) MustContains(ctx context.Context, key interface{}) bool { - v, err := c.Contains(ctx, key) - if err != nil { - panic(err) - } - return v -} - -// MustGetExpire acts like GetExpire, but it panics if any error occurs. -func (c *Cache) MustGetExpire(ctx context.Context, key interface{}) time.Duration { - v, err := c.GetExpire(ctx, key) - if err != nil { - panic(err) - } - return v -} - -// MustSize acts like Size, but it panics if any error occurs. -func (c *Cache) MustSize(ctx context.Context) int { - v, err := c.Size(ctx) - if err != nil { - panic(err) - } - return v -} - -// MustData acts like Data, but it panics if any error occurs. -func (c *Cache) MustData(ctx context.Context) map[interface{}]interface{} { - v, err := c.Data(ctx) - if err != nil { - panic(err) - } - return v -} - -// MustKeys acts like Keys, but it panics if any error occurs. -func (c *Cache) MustKeys(ctx context.Context) []interface{} { - v, err := c.Keys(ctx) - if err != nil { - panic(err) - } - return v -} - -// MustKeyStrings acts like KeyStrings, but it panics if any error occurs. -func (c *Cache) MustKeyStrings(ctx context.Context) []string { - v, err := c.KeyStrings(ctx) - if err != nil { - panic(err) - } - return v -} - -// MustValues acts like Values, but it panics if any error occurs. -func (c *Cache) MustValues(ctx context.Context) []interface{} { - v, err := c.Values(ctx) - if err != nil { - panic(err) - } - return v -} diff --git a/vendor/github.com/gogf/gf/v2/os/gcron/gcron.go b/vendor/github.com/gogf/gf/v2/os/gcron/gcron.go deleted file mode 100644 index 05e834d3..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gcron/gcron.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package gcron implements a cron pattern parser and job runner. -package gcron - -import ( - "context" - "time" - - "github.com/gogf/gf/v2/os/glog" - "github.com/gogf/gf/v2/os/gtimer" -) - -const ( - StatusReady = gtimer.StatusReady - StatusRunning = gtimer.StatusRunning - StatusStopped = gtimer.StatusStopped - StatusClosed = gtimer.StatusClosed -) - -var ( - // Default cron object. - defaultCron = New() -) - -// SetLogger sets the logger for cron. -func SetLogger(logger glog.ILogger) { - defaultCron.SetLogger(logger) -} - -// GetLogger returns the logger in the cron. -func GetLogger() glog.ILogger { - return defaultCron.GetLogger() -} - -// Add adds a timed task to default cron object. -// A unique `name` can be bound with the timed task. -// It returns and error if the `name` is already used. -func Add(ctx context.Context, pattern string, job JobFunc, name ...string) (*Entry, error) { - return defaultCron.Add(ctx, pattern, job, name...) -} - -// AddSingleton adds a singleton timed task, to default cron object. -// A singleton timed task is that can only be running one single instance at the same time. -// A unique `name` can be bound with the timed task. -// It returns and error if the `name` is already used. -func AddSingleton(ctx context.Context, pattern string, job JobFunc, name ...string) (*Entry, error) { - return defaultCron.AddSingleton(ctx, pattern, job, name...) -} - -// AddOnce adds a timed task which can be run only once, to default cron object. -// A unique `name` can be bound with the timed task. -// It returns and error if the `name` is already used. -func AddOnce(ctx context.Context, pattern string, job JobFunc, name ...string) (*Entry, error) { - return defaultCron.AddOnce(ctx, pattern, job, name...) -} - -// AddTimes adds a timed task which can be run specified times, to default cron object. -// A unique `name` can be bound with the timed task. -// It returns and error if the `name` is already used. -func AddTimes(ctx context.Context, pattern string, times int, job JobFunc, name ...string) (*Entry, error) { - return defaultCron.AddTimes(ctx, pattern, times, job, name...) -} - -// DelayAdd adds a timed task to default cron object after `delay` time. -func DelayAdd(ctx context.Context, delay time.Duration, pattern string, job JobFunc, name ...string) { - defaultCron.DelayAdd(ctx, delay, pattern, job, name...) -} - -// DelayAddSingleton adds a singleton timed task after `delay` time to default cron object. -func DelayAddSingleton(ctx context.Context, delay time.Duration, pattern string, job JobFunc, name ...string) { - defaultCron.DelayAddSingleton(ctx, delay, pattern, job, name...) -} - -// DelayAddOnce adds a timed task after `delay` time to default cron object. -// This timed task can be run only once. -func DelayAddOnce(ctx context.Context, delay time.Duration, pattern string, job JobFunc, name ...string) { - defaultCron.DelayAddOnce(ctx, delay, pattern, job, name...) -} - -// DelayAddTimes adds a timed task after `delay` time to default cron object. -// This timed task can be run specified times. -func DelayAddTimes(ctx context.Context, delay time.Duration, pattern string, times int, job JobFunc, name ...string) { - defaultCron.DelayAddTimes(ctx, delay, pattern, times, job, name...) -} - -// Search returns a scheduled task with the specified `name`. -// It returns nil if no found. -func Search(name string) *Entry { - return defaultCron.Search(name) -} - -// Remove deletes scheduled task which named `name`. -func Remove(name string) { - defaultCron.Remove(name) -} - -// Size returns the size of the timed tasks of default cron. -func Size() int { - return defaultCron.Size() -} - -// Entries return all timed tasks as slice. -func Entries() []*Entry { - return defaultCron.Entries() -} - -// Start starts running the specified timed task named `name`. -// If no`name` specified, it starts the entire cron. -func Start(name ...string) { - defaultCron.Start(name...) -} - -// Stop stops running the specified timed task named `name`. -// If no`name` specified, it stops the entire cron. -func Stop(name ...string) { - defaultCron.Stop(name...) -} diff --git a/vendor/github.com/gogf/gf/v2/os/gcron/gcron_cron.go b/vendor/github.com/gogf/gf/v2/os/gcron/gcron_cron.go deleted file mode 100644 index 1a83f57b..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gcron/gcron_cron.go +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gcron - -import ( - "context" - "time" - - "github.com/gogf/gf/v2/container/garray" - "github.com/gogf/gf/v2/container/gmap" - "github.com/gogf/gf/v2/container/gtype" - "github.com/gogf/gf/v2/os/glog" - "github.com/gogf/gf/v2/os/gtimer" -) - -type Cron struct { - idGen *gtype.Int64 // Used for unique name generation. - status *gtype.Int // Timed task status(0: Not Start; 1: Running; 2: Stopped; -1: Closed) - entries *gmap.StrAnyMap // All timed task entries. - logger glog.ILogger // Logger, it is nil in default. -} - -// New returns a new Cron object with default settings. -func New() *Cron { - return &Cron{ - idGen: gtype.NewInt64(), - status: gtype.NewInt(StatusRunning), - entries: gmap.NewStrAnyMap(true), - } -} - -// SetLogger sets the logger for cron. -func (c *Cron) SetLogger(logger glog.ILogger) { - c.logger = logger -} - -// GetLogger returns the logger in the cron. -func (c *Cron) GetLogger() glog.ILogger { - return c.logger -} - -// AddEntry creates and returns a new Entry object. -func (c *Cron) AddEntry(ctx context.Context, pattern string, job JobFunc, times int, isSingleton bool, name ...string) (*Entry, error) { - var ( - entryName = "" - infinite = false - ) - if len(name) > 0 { - entryName = name[0] - } - if times <= 0 { - infinite = true - } - return c.doAddEntry(doAddEntryInput{ - Name: entryName, - Job: job, - Ctx: ctx, - Times: times, - Pattern: pattern, - IsSingleton: isSingleton, - Infinite: infinite, - }) -} - -// Add adds a timed task. -// A unique `name` can be bound with the timed task. -// It returns and error if the `name` is already used. -func (c *Cron) Add(ctx context.Context, pattern string, job JobFunc, name ...string) (*Entry, error) { - return c.AddEntry(ctx, pattern, job, -1, false, name...) -} - -// AddSingleton adds a singleton timed task. -// A singleton timed task is that can only be running one single instance at the same time. -// A unique `name` can be bound with the timed task. -// It returns and error if the `name` is already used. -func (c *Cron) AddSingleton(ctx context.Context, pattern string, job JobFunc, name ...string) (*Entry, error) { - return c.AddEntry(ctx, pattern, job, -1, true, name...) -} - -// AddTimes adds a timed task which can be run specified times. -// A unique `name` can be bound with the timed task. -// It returns and error if the `name` is already used. -func (c *Cron) AddTimes(ctx context.Context, pattern string, times int, job JobFunc, name ...string) (*Entry, error) { - return c.AddEntry(ctx, pattern, job, times, false, name...) -} - -// AddOnce adds a timed task which can be run only once. -// A unique `name` can be bound with the timed task. -// It returns and error if the `name` is already used. -func (c *Cron) AddOnce(ctx context.Context, pattern string, job JobFunc, name ...string) (*Entry, error) { - return c.AddEntry(ctx, pattern, job, 1, false, name...) -} - -// DelayAddEntry adds a timed task after `delay` time. -func (c *Cron) DelayAddEntry(ctx context.Context, delay time.Duration, pattern string, job JobFunc, times int, isSingleton bool, name ...string) { - gtimer.AddOnce(ctx, delay, func(ctx context.Context) { - if _, err := c.AddEntry(ctx, pattern, job, times, isSingleton, name...); err != nil { - panic(err) - } - }) -} - -// DelayAdd adds a timed task after `delay` time. -func (c *Cron) DelayAdd(ctx context.Context, delay time.Duration, pattern string, job JobFunc, name ...string) { - gtimer.AddOnce(ctx, delay, func(ctx context.Context) { - if _, err := c.Add(ctx, pattern, job, name...); err != nil { - panic(err) - } - }) -} - -// DelayAddSingleton adds a singleton timed task after `delay` time. -func (c *Cron) DelayAddSingleton(ctx context.Context, delay time.Duration, pattern string, job JobFunc, name ...string) { - gtimer.AddOnce(ctx, delay, func(ctx context.Context) { - if _, err := c.AddSingleton(ctx, pattern, job, name...); err != nil { - panic(err) - } - }) -} - -// DelayAddOnce adds a timed task after `delay` time. -// This timed task can be run only once. -func (c *Cron) DelayAddOnce(ctx context.Context, delay time.Duration, pattern string, job JobFunc, name ...string) { - gtimer.AddOnce(ctx, delay, func(ctx context.Context) { - if _, err := c.AddOnce(ctx, pattern, job, name...); err != nil { - panic(err) - } - }) -} - -// DelayAddTimes adds a timed task after `delay` time. -// This timed task can be run specified times. -func (c *Cron) DelayAddTimes(ctx context.Context, delay time.Duration, pattern string, times int, job JobFunc, name ...string) { - gtimer.AddOnce(ctx, delay, func(ctx context.Context) { - if _, err := c.AddTimes(ctx, pattern, times, job, name...); err != nil { - panic(err) - } - }) -} - -// Search returns a scheduled task with the specified `name`. -// It returns nil if not found. -func (c *Cron) Search(name string) *Entry { - if v := c.entries.Get(name); v != nil { - return v.(*Entry) - } - return nil -} - -// Start starts running the specified timed task named `name`. -// If no`name` specified, it starts the entire cron. -func (c *Cron) Start(name ...string) { - if len(name) > 0 { - for _, v := range name { - if entry := c.Search(v); entry != nil { - entry.Start() - } - } - } else { - c.status.Set(StatusReady) - } -} - -// Stop stops running the specified timed task named `name`. -// If no`name` specified, it stops the entire cron. -func (c *Cron) Stop(name ...string) { - if len(name) > 0 { - for _, v := range name { - if entry := c.Search(v); entry != nil { - entry.Stop() - } - } - } else { - c.status.Set(StatusStopped) - } -} - -// Remove deletes scheduled task which named `name`. -func (c *Cron) Remove(name string) { - if v := c.entries.Get(name); v != nil { - v.(*Entry).Close() - } -} - -// Close stops and closes current cron. -func (c *Cron) Close() { - c.status.Set(StatusClosed) -} - -// Size returns the size of the timed tasks. -func (c *Cron) Size() int { - return c.entries.Size() -} - -// Entries return all timed tasks as slice(order by registered time asc). -func (c *Cron) Entries() []*Entry { - array := garray.NewSortedArraySize(c.entries.Size(), func(v1, v2 interface{}) int { - entry1 := v1.(*Entry) - entry2 := v2.(*Entry) - if entry1.Time.Nanosecond() > entry2.Time.Nanosecond() { - return 1 - } - return -1 - }, true) - c.entries.RLockFunc(func(m map[string]interface{}) { - for _, v := range m { - array.Add(v.(*Entry)) - } - }) - entries := make([]*Entry, array.Len()) - array.RLockFunc(func(array []interface{}) { - for k, v := range array { - entries[k] = v.(*Entry) - } - }) - return entries -} diff --git a/vendor/github.com/gogf/gf/v2/os/gcron/gcron_entry.go b/vendor/github.com/gogf/gf/v2/os/gcron/gcron_entry.go deleted file mode 100644 index 878f2fa8..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gcron/gcron_entry.go +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gcron - -import ( - "context" - "fmt" - "reflect" - "runtime" - "time" - - "github.com/gogf/gf/v2/container/gtype" - "github.com/gogf/gf/v2/errors/gcode" - "github.com/gogf/gf/v2/errors/gerror" - "github.com/gogf/gf/v2/os/glog" - "github.com/gogf/gf/v2/os/gtimer" - "github.com/gogf/gf/v2/util/gconv" -) - -// JobFunc is the timing called job function in cron. -type JobFunc = gtimer.JobFunc - -// Entry is timing task entry. -type Entry struct { - cron *Cron // Cron object belonged to. - timerEntry *gtimer.Entry // Associated timer Entry. - schedule *cronSchedule // Timed schedule object. - jobName string // Callback function name(address info). - times *gtype.Int // Running times limit. - infinite *gtype.Bool // No times limit. - Name string // Entry name. - Job JobFunc `json:"-"` // Callback function. - Time time.Time // Registered time. -} - -type doAddEntryInput struct { - Name string // Name names this entry for manual control. - Job JobFunc // Job is the callback function for timed task execution. - Ctx context.Context // The context for the job. - Times int // Times specifies the running limit times for the entry. - Pattern string // Pattern is the crontab style string for scheduler. - IsSingleton bool // Singleton specifies whether timed task executing in singleton mode. - Infinite bool // Infinite specifies whether this entry is running with no times limit. -} - -// doAddEntry creates and returns a new Entry object. -func (c *Cron) doAddEntry(in doAddEntryInput) (*Entry, error) { - if in.Name != "" { - if c.Search(in.Name) != nil { - return nil, gerror.NewCodef(gcode.CodeInvalidOperation, `cron job "%s" already exists`, in.Name) - } - } - schedule, err := newSchedule(in.Pattern) - if err != nil { - return nil, err - } - // No limit for `times`, for timer checking scheduling every second. - entry := &Entry{ - cron: c, - schedule: schedule, - jobName: runtime.FuncForPC(reflect.ValueOf(in.Job).Pointer()).Name(), - times: gtype.NewInt(in.Times), - infinite: gtype.NewBool(in.Infinite), - Job: in.Job, - Time: time.Now(), - } - if in.Name != "" { - entry.Name = in.Name - } else { - entry.Name = "cron-" + gconv.String(c.idGen.Add(1)) - } - // When you add a scheduled task, you cannot allow it to run. - // It cannot start running when added to timer. - // It should start running after the entry is added to the Cron entries map, to avoid the task - // from running during adding where the entries do not have the entry information, which might cause panic. - entry.timerEntry = gtimer.AddEntry( - in.Ctx, - time.Second, - entry.checkAndRun, - in.IsSingleton, - -1, - gtimer.StatusStopped, - ) - c.entries.Set(entry.Name, entry) - entry.timerEntry.Start() - return entry, nil -} - -// IsSingleton return whether this entry is a singleton timed task. -func (entry *Entry) IsSingleton() bool { - return entry.timerEntry.IsSingleton() -} - -// SetSingleton sets the entry running in singleton mode. -func (entry *Entry) SetSingleton(enabled bool) { - entry.timerEntry.SetSingleton(enabled) -} - -// SetTimes sets the times which the entry can run. -func (entry *Entry) SetTimes(times int) { - entry.times.Set(times) - entry.infinite.Set(false) -} - -// Status returns the status of entry. -func (entry *Entry) Status() int { - return entry.timerEntry.Status() -} - -// SetStatus sets the status of the entry. -func (entry *Entry) SetStatus(status int) int { - return entry.timerEntry.SetStatus(status) -} - -// Start starts running the entry. -func (entry *Entry) Start() { - entry.timerEntry.Start() -} - -// Stop stops running the entry. -func (entry *Entry) Stop() { - entry.timerEntry.Stop() -} - -// Close stops and removes the entry from cron. -func (entry *Entry) Close() { - entry.cron.entries.Remove(entry.Name) - entry.timerEntry.Close() -} - -// checkAndRun is the core timing task check logic. -func (entry *Entry) checkAndRun(ctx context.Context) { - currentTime := time.Now() - if !entry.schedule.checkMeetAndUpdateLastSeconds(ctx, currentTime) { - return - } - switch entry.cron.status.Val() { - case StatusStopped: - return - - case StatusClosed: - entry.logDebugf(ctx, `cron job "%s" is removed`, entry.getJobNameWithPattern()) - entry.Close() - - case StatusReady, StatusRunning: - defer func() { - if exception := recover(); exception != nil { - // Exception caught, it logs the error content to logger in default behavior. - entry.logErrorf(ctx, - `cron job "%s(%s)" end with error: %+v`, - entry.jobName, entry.schedule.pattern, exception, - ) - } else { - entry.logDebugf(ctx, `cron job "%s" ends`, entry.getJobNameWithPattern()) - } - if entry.timerEntry.Status() == StatusClosed { - entry.Close() - } - }() - - // Running times check. - if !entry.infinite.Val() { - times := entry.times.Add(-1) - if times <= 0 { - if entry.timerEntry.SetStatus(StatusClosed) == StatusClosed || times < 0 { - return - } - } - } - entry.logDebugf(ctx, `cron job "%s" starts`, entry.getJobNameWithPattern()) - entry.Job(ctx) - } -} - -func (entry *Entry) getJobNameWithPattern() string { - return fmt.Sprintf(`%s(%s)`, entry.jobName, entry.schedule.pattern) -} - -func (entry *Entry) logDebugf(ctx context.Context, format string, v ...interface{}) { - if logger := entry.cron.GetLogger(); logger != nil { - logger.Debugf(ctx, format, v...) - } -} - -func (entry *Entry) logErrorf(ctx context.Context, format string, v ...interface{}) { - logger := entry.cron.GetLogger() - if logger == nil { - logger = glog.DefaultLogger() - } - logger.Errorf(ctx, format, v...) -} diff --git a/vendor/github.com/gogf/gf/v2/os/gcron/gcron_schedule.go b/vendor/github.com/gogf/gf/v2/os/gcron/gcron_schedule.go deleted file mode 100644 index abeb345e..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gcron/gcron_schedule.go +++ /dev/null @@ -1,412 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gcron - -import ( - "context" - "strconv" - "strings" - "time" - - "github.com/gogf/gf/v2/container/gtype" - "github.com/gogf/gf/v2/errors/gcode" - "github.com/gogf/gf/v2/errors/gerror" - "github.com/gogf/gf/v2/os/gtime" - "github.com/gogf/gf/v2/text/gregex" -) - -// cronSchedule is the schedule for cron job. -type cronSchedule struct { - createTimestamp int64 // Created timestamp in seconds. - everySeconds int64 // Running interval in seconds. - pattern string // The raw cron pattern string. - secondMap map[int]struct{} // Job can run in these second numbers. - minuteMap map[int]struct{} // Job can run in these minute numbers. - hourMap map[int]struct{} // Job can run in these hour numbers. - dayMap map[int]struct{} // Job can run in these day numbers. - weekMap map[int]struct{} // Job can run in these week numbers. - monthMap map[int]struct{} // Job can run in these moth numbers. - lastTimestamp *gtype.Int64 // Last timestamp number, for timestamp fix in some delay. -} - -const ( - // regular expression for cron pattern, which contains 6 parts of time units. - regexForCron = `^([\-/\d\*\?,]+)\s+([\-/\d\*\?,]+)\s+([\-/\d\*\?,]+)\s+([\-/\d\*\?,]+)\s+([\-/\d\*\?,A-Za-z]+)\s+([\-/\d\*\?,A-Za-z]+)$` - patternItemTypeUnknown = iota - patternItemTypeWeek - patternItemTypeMonth -) - -var ( - // Predefined pattern map. - predefinedPatternMap = map[string]string{ - "@yearly": "0 0 0 1 1 *", - "@annually": "0 0 0 1 1 *", - "@monthly": "0 0 0 1 * *", - "@weekly": "0 0 0 * * 0", - "@daily": "0 0 0 * * *", - "@midnight": "0 0 0 * * *", - "@hourly": "0 0 * * * *", - } - // Short month name to its number. - monthShortNameMap = map[string]int{ - "jan": 1, - "feb": 2, - "mar": 3, - "apr": 4, - "may": 5, - "jun": 6, - "jul": 7, - "aug": 8, - "sep": 9, - "oct": 10, - "nov": 11, - "dec": 12, - } - // Full month name to its number. - monthFullNameMap = map[string]int{ - "january": 1, - "february": 2, - "march": 3, - "april": 4, - "may": 5, - "june": 6, - "july": 7, - "august": 8, - "september": 9, - "october": 10, - "november": 11, - "december": 12, - } - // Short week name to its number. - weekShortNameMap = map[string]int{ - "sun": 0, - "mon": 1, - "tue": 2, - "wed": 3, - "thu": 4, - "fri": 5, - "sat": 6, - } - // Full week name to its number. - weekFullNameMap = map[string]int{ - "sunday": 0, - "monday": 1, - "tuesday": 2, - "wednesday": 3, - "thursday": 4, - "friday": 5, - "saturday": 6, - } -) - -// newSchedule creates and returns a schedule object for given cron pattern. -func newSchedule(pattern string) (*cronSchedule, error) { - var currentTimestamp = time.Now().Unix() - // Check if the predefined patterns. - if match, _ := gregex.MatchString(`(@\w+)\s*(\w*)\s*`, pattern); len(match) > 0 { - key := strings.ToLower(match[1]) - if v, ok := predefinedPatternMap[key]; ok { - pattern = v - } else if strings.Compare(key, "@every") == 0 { - d, err := gtime.ParseDuration(match[2]) - if err != nil { - return nil, err - } - return &cronSchedule{ - createTimestamp: currentTimestamp, - everySeconds: int64(d.Seconds()), - pattern: pattern, - lastTimestamp: gtype.NewInt64(currentTimestamp), - }, nil - } else { - return nil, gerror.NewCodef(gcode.CodeInvalidParameter, `invalid pattern: "%s"`, pattern) - } - } - // Handle the common cron pattern, like: - // 0 0 0 1 1 2 - if match, _ := gregex.MatchString(regexForCron, pattern); len(match) == 7 { - schedule := &cronSchedule{ - createTimestamp: currentTimestamp, - everySeconds: 0, - pattern: pattern, - lastTimestamp: gtype.NewInt64(currentTimestamp), - } - // Second. - if m, err := parsePatternItem(match[1], 0, 59, false); err != nil { - return nil, err - } else { - schedule.secondMap = m - } - // Minute. - if m, err := parsePatternItem(match[2], 0, 59, false); err != nil { - return nil, err - } else { - schedule.minuteMap = m - } - // Hour. - if m, err := parsePatternItem(match[3], 0, 23, false); err != nil { - return nil, err - } else { - schedule.hourMap = m - } - // Day. - if m, err := parsePatternItem(match[4], 1, 31, true); err != nil { - return nil, err - } else { - schedule.dayMap = m - } - // Month. - if m, err := parsePatternItem(match[5], 1, 12, false); err != nil { - return nil, err - } else { - schedule.monthMap = m - } - // Week. - if m, err := parsePatternItem(match[6], 0, 6, true); err != nil { - return nil, err - } else { - schedule.weekMap = m - } - return schedule, nil - } - return nil, gerror.NewCodef(gcode.CodeInvalidParameter, `invalid pattern: "%s"`, pattern) -} - -// parsePatternItem parses every item in the pattern and returns the result as map, which is used for indexing. -func parsePatternItem(item string, min int, max int, allowQuestionMark bool) (map[int]struct{}, error) { - m := make(map[int]struct{}, max-min+1) - if item == "*" || (allowQuestionMark && item == "?") { - for i := min; i <= max; i++ { - m[i] = struct{}{} - } - return m, nil - } - // Like: MON,FRI - for _, itemElem := range strings.Split(item, ",") { - var ( - interval = 1 - intervalArray = strings.Split(itemElem, "/") - ) - if len(intervalArray) == 2 { - if number, err := strconv.Atoi(intervalArray[1]); err != nil { - return nil, gerror.NewCodef(gcode.CodeInvalidParameter, `invalid pattern item: "%s"`, itemElem) - } else { - interval = number - } - } - var ( - rangeMin = min - rangeMax = max - itemType = patternItemTypeUnknown - rangeArray = strings.Split(intervalArray[0], "-") // Like: 1-30, JAN-DEC - ) - switch max { - case 6: - // It's checking week field. - itemType = patternItemTypeWeek - - case 12: - // It's checking month field. - itemType = patternItemTypeMonth - } - // Eg: */5 - if rangeArray[0] != "*" { - if number, err := parsePatternItemValue(rangeArray[0], itemType); err != nil { - return nil, gerror.NewCodef(gcode.CodeInvalidParameter, `invalid pattern item: "%s"`, itemElem) - } else { - rangeMin = number - if len(intervalArray) == 1 { - rangeMax = number - } - } - } - if len(rangeArray) == 2 { - if number, err := parsePatternItemValue(rangeArray[1], itemType); err != nil { - return nil, gerror.NewCodef(gcode.CodeInvalidParameter, `invalid pattern item: "%s"`, itemElem) - } else { - rangeMax = number - } - } - for i := rangeMin; i <= rangeMax; i += interval { - m[i] = struct{}{} - } - } - return m, nil -} - -// parsePatternItemValue parses the field value to a number according to its field type. -func parsePatternItemValue(value string, itemType int) (int, error) { - if gregex.IsMatchString(`^\d+$`, value) { - // It is pure number. - if number, err := strconv.Atoi(value); err == nil { - return number, nil - } - } else { - // Check if it contains letter, - // it converts the value to number according to predefined map. - switch itemType { - case patternItemTypeWeek: - if number, ok := weekShortNameMap[strings.ToLower(value)]; ok { - return number, nil - } - if number, ok := weekFullNameMap[strings.ToLower(value)]; ok { - return number, nil - } - case patternItemTypeMonth: - if number, ok := monthShortNameMap[strings.ToLower(value)]; ok { - return number, nil - } - if number, ok := monthFullNameMap[strings.ToLower(value)]; ok { - return number, nil - } - } - } - return 0, gerror.NewCodef(gcode.CodeInvalidParameter, `invalid pattern value: "%s"`, value) -} - -// checkMeetAndUpdateLastSeconds checks if the given time `t` meets the runnable point for the job. -func (s *cronSchedule) checkMeetAndUpdateLastSeconds(ctx context.Context, t time.Time) bool { - var ( - lastTimestamp = s.getAndUpdateLastTimestamp(ctx, t) - lastTime = gtime.NewFromTimeStamp(lastTimestamp) - ) - - if s.everySeconds != 0 { - // It checks using interval. - secondsAfterCreated := lastTime.Timestamp() - s.createTimestamp - if secondsAfterCreated > 0 { - return secondsAfterCreated%s.everySeconds == 0 - } - return false - } - - // It checks using normal cron pattern. - if _, ok := s.secondMap[lastTime.Second()]; !ok { - return false - } - if _, ok := s.minuteMap[lastTime.Minute()]; !ok { - return false - } - if _, ok := s.hourMap[lastTime.Hour()]; !ok { - return false - } - if _, ok := s.dayMap[lastTime.Day()]; !ok { - return false - } - if _, ok := s.monthMap[lastTime.Month()]; !ok { - return false - } - if _, ok := s.weekMap[int(lastTime.Weekday())]; !ok { - return false - } - return true -} - -// Next returns the next time this schedule is activated, greater than the given -// time. If no time can be found to satisfy the schedule, return the zero time. -func (s *cronSchedule) Next(t time.Time) time.Time { - if s.everySeconds != 0 { - var ( - diff = t.Unix() - s.createTimestamp - count = diff/s.everySeconds + 1 - ) - return t.Add(time.Duration(count*s.everySeconds) * time.Second) - } - - // Start at the earliest possible time (the upcoming second). - t = t.Add(1*time.Second - time.Duration(t.Nanosecond())*time.Nanosecond) - var ( - loc = t.Location() - added = false - yearLimit = t.Year() + 5 - ) - -WRAP: - if t.Year() > yearLimit { - return t // who will care the job that run in five years later - } - - for !s.match(s.monthMap, int(t.Month())) { - if !added { - added = true - t = time.Date(t.Year(), t.Month(), 1, 0, 0, 0, 0, loc) - } - t = t.AddDate(0, 1, 0) - // need recheck - if t.Month() == time.January { - goto WRAP - } - } - - for !s.dayMatches(t) { - if !added { - added = true - t = time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, loc) - } - t = t.AddDate(0, 0, 1) - - // Notice if the hour is no longer midnight due to DST. - // Add an hour if it's 23, subtract an hour if it's 1. - if t.Hour() != 0 { - if t.Hour() > 12 { - t = t.Add(time.Duration(24-t.Hour()) * time.Hour) - } else { - t = t.Add(time.Duration(-t.Hour()) * time.Hour) - } - } - if t.Day() == 1 { - goto WRAP - } - } - for !s.match(s.hourMap, t.Hour()) { - if !added { - added = true - t = time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), 0, 0, 0, loc) - } - t = t.Add(time.Hour) - // need recheck - if t.Hour() == 0 { - goto WRAP - } - } - for !s.match(s.minuteMap, t.Minute()) { - if !added { - added = true - t = t.Truncate(time.Minute) - } - t = t.Add(1 * time.Minute) - - if t.Minute() == 0 { - goto WRAP - } - } - for !s.match(s.secondMap, t.Second()) { - if !added { - added = true - t = t.Truncate(time.Second) - } - t = t.Add(1 * time.Second) - if t.Second() == 0 { - goto WRAP - } - } - return t.In(loc) -} - -// dayMatches returns true if the schedule's day-of-week and day-of-month -// restrictions are satisfied by the given time. -func (s *cronSchedule) dayMatches(t time.Time) bool { - _, ok1 := s.dayMap[t.Day()] - _, ok2 := s.weekMap[int(t.Weekday())] - return ok1 && ok2 -} - -func (s *cronSchedule) match(m map[int]struct{}, key int) bool { - _, ok := m[key] - return ok -} diff --git a/vendor/github.com/gogf/gf/v2/os/gcron/gcron_schedule_fix.go b/vendor/github.com/gogf/gf/v2/os/gcron/gcron_schedule_fix.go deleted file mode 100644 index fac3da1c..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gcron/gcron_schedule_fix.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gcron - -import ( - "context" - "time" - - "github.com/gogf/gf/v2/internal/intlog" -) - -// getAndUpdateLastTimestamp checks fixes and returns the last timestamp that have delay fix in some seconds. -func (s *cronSchedule) getAndUpdateLastTimestamp(ctx context.Context, t time.Time) int64 { - var ( - currentTimestamp = t.Unix() - lastTimestamp = s.lastTimestamp.Val() - ) - switch { - case - lastTimestamp == currentTimestamp: - lastTimestamp += 1 - - case - lastTimestamp == currentTimestamp-1: - lastTimestamp = currentTimestamp - - case - lastTimestamp == currentTimestamp-2, - lastTimestamp == currentTimestamp-3: - lastTimestamp += 1 - - default: - // Too much delay, let's update the last timestamp to current one. - intlog.Printf( - ctx, - `too much delay, last timestamp "%d", current "%d"`, - lastTimestamp, currentTimestamp, - ) - lastTimestamp = currentTimestamp - } - s.lastTimestamp.Set(lastTimestamp) - return lastTimestamp -} diff --git a/vendor/github.com/gogf/gf/v2/os/gctx/gctx.go b/vendor/github.com/gogf/gf/v2/os/gctx/gctx.go deleted file mode 100644 index e0bcb666..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gctx/gctx.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package gctx wraps context.Context and provides extra context features. -package gctx - -import ( - "context" - "os" - "strings" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/propagation" - - "github.com/gogf/gf/v2/net/gtrace" -) - -type ( - Ctx = context.Context // Ctx is short name alias for context.Context. - StrKey string // StrKey is a type for warps basic type string as context key. -) - -var ( - // initCtx is the context initialized from process environment. - initCtx context.Context -) - -func init() { - // All environment key-value pairs. - m := make(map[string]string) - i := 0 - for _, s := range os.Environ() { - i = strings.IndexByte(s, '=') - if i == -1 { - continue - } - m[s[0:i]] = s[i+1:] - } - // OpenTelemetry from environments. - initCtx = otel.GetTextMapPropagator().Extract( - context.Background(), - propagation.MapCarrier(m), - ) - initCtx = WithCtx(initCtx) -} - -// New creates and returns a context which contains context id. -func New() context.Context { - return WithCtx(context.Background()) -} - -// WithCtx creates and returns a context containing context id upon given parent context `ctx`. -func WithCtx(ctx context.Context) context.Context { - if CtxId(ctx) != "" { - return ctx - } - var span *gtrace.Span - ctx, span = gtrace.NewSpan(ctx, "gctx.WithCtx") - defer span.End() - return ctx -} - -// CtxId retrieves and returns the context id from context. -func CtxId(ctx context.Context) string { - return gtrace.GetTraceID(ctx) -} - -// SetInitCtx sets custom initialization context. -// Note that this function cannot be called in multiple goroutines. -func SetInitCtx(ctx context.Context) { - initCtx = ctx -} - -// GetInitCtx returns the initialization context. -// Initialization context is used in `main` or `init` functions. -func GetInitCtx() context.Context { - return initCtx -} diff --git a/vendor/github.com/gogf/gf/v2/os/gctx/gctx_never_done.go b/vendor/github.com/gogf/gf/v2/os/gctx/gctx_never_done.go deleted file mode 100644 index 8d08e53a..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gctx/gctx_never_done.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gctx - -import ( - "context" - "time" -) - -// neverDoneCtx never done. -type neverDoneCtx struct { - context.Context -} - -// Done forbids the context done from parent context. -func (*neverDoneCtx) Done() <-chan struct{} { - return nil -} - -// Deadline forbids the context deadline from parent context. -func (*neverDoneCtx) Deadline() (deadline time.Time, ok bool) { - return time.Time{}, false -} - -// Err forbids the context done from parent context. -func (c *neverDoneCtx) Err() error { - return nil -} - -// NeverDone wraps and returns a new context object that will be never done, -// which forbids the context manually done, to make the context can be propagated to asynchronous goroutines. -func NeverDone(ctx context.Context) context.Context { - return &neverDoneCtx{ctx} -} diff --git a/vendor/github.com/gogf/gf/v2/os/gfile/gfile.go b/vendor/github.com/gogf/gf/v2/os/gfile/gfile.go deleted file mode 100644 index bdb5aeac..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gfile/gfile.go +++ /dev/null @@ -1,449 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package gfile provides easy-to-use operations for file system. -package gfile - -import ( - "os" - "os/exec" - "path/filepath" - "strings" - "time" - - "github.com/gogf/gf/v2/container/gtype" - "github.com/gogf/gf/v2/errors/gerror" - "github.com/gogf/gf/v2/text/gstr" - "github.com/gogf/gf/v2/util/gconv" -) - -const ( - // Separator for file system. - // It here defines the separator as variable - // to allow it modified by developer if necessary. - Separator = string(filepath.Separator) - - // DefaultPermOpen is the default perm for file opening. - DefaultPermOpen = os.FileMode(0666) - - // DefaultPermCopy is the default perm for file/folder copy. - DefaultPermCopy = os.FileMode(0777) -) - -var ( - // The absolute file path for main package. - // It can be only checked and set once. - mainPkgPath = gtype.NewString() - - // selfPath is the current running binary path. - // As it is most commonly used, it is so defined as an internal package variable. - selfPath = "" -) - -func init() { - // Initialize internal package variable: selfPath. - selfPath, _ = exec.LookPath(os.Args[0]) - if selfPath != "" { - selfPath, _ = filepath.Abs(selfPath) - } - if selfPath == "" { - selfPath, _ = filepath.Abs(os.Args[0]) - } -} - -// Mkdir creates directories recursively with given `path`. -// The parameter `path` is suggested to be an absolute path instead of relative one. -func Mkdir(path string) (err error) { - if err = os.MkdirAll(path, os.ModePerm); err != nil { - err = gerror.Wrapf(err, `os.MkdirAll failed for path "%s" with perm "%d"`, path, os.ModePerm) - return err - } - return nil -} - -// Create creates file with given `path` recursively. -// The parameter `path` is suggested to be absolute path. -func Create(path string) (*os.File, error) { - dir := Dir(path) - if !Exists(dir) { - if err := Mkdir(dir); err != nil { - return nil, err - } - } - file, err := os.Create(path) - if err != nil { - err = gerror.Wrapf(err, `os.Create failed for name "%s"`, path) - } - return file, err -} - -// Open opens file/directory READONLY. -func Open(path string) (*os.File, error) { - file, err := os.Open(path) - if err != nil { - err = gerror.Wrapf(err, `os.Open failed for name "%s"`, path) - } - return file, err -} - -// OpenFile opens file/directory with custom `flag` and `perm`. -// The parameter `flag` is like: O_RDONLY, O_RDWR, O_RDWR|O_CREATE|O_TRUNC, etc. -func OpenFile(path string, flag int, perm os.FileMode) (*os.File, error) { - file, err := os.OpenFile(path, flag, perm) - if err != nil { - err = gerror.Wrapf(err, `os.OpenFile failed with name "%s", flag "%d", perm "%d"`, path, flag, perm) - } - return file, err -} - -// OpenWithFlag opens file/directory with default perm and custom `flag`. -// The default `perm` is 0666. -// The parameter `flag` is like: O_RDONLY, O_RDWR, O_RDWR|O_CREATE|O_TRUNC, etc. -func OpenWithFlag(path string, flag int) (*os.File, error) { - file, err := OpenFile(path, flag, DefaultPermOpen) - if err != nil { - return nil, err - } - return file, nil -} - -// OpenWithFlagPerm opens file/directory with custom `flag` and `perm`. -// The parameter `flag` is like: O_RDONLY, O_RDWR, O_RDWR|O_CREATE|O_TRUNC, etc. -// The parameter `perm` is like: 0600, 0666, 0777, etc. -func OpenWithFlagPerm(path string, flag int, perm os.FileMode) (*os.File, error) { - file, err := OpenFile(path, flag, perm) - if err != nil { - return nil, err - } - return file, nil -} - -// Join joins string array paths with file separator of current system. -func Join(paths ...string) string { - var s string - for _, path := range paths { - if s != "" { - s += Separator - } - s += gstr.TrimRight(path, Separator) - } - return s -} - -// Exists checks whether given `path` exist. -func Exists(path string) bool { - if stat, err := os.Stat(path); stat != nil && !os.IsNotExist(err) { - return true - } - return false -} - -// IsDir checks whether given `path` a directory. -// Note that it returns false if the `path` does not exist. -func IsDir(path string) bool { - s, err := os.Stat(path) - if err != nil { - return false - } - return s.IsDir() -} - -// Pwd returns absolute path of current working directory. -// Note that it returns an empty string if retrieving current -// working directory failed. -func Pwd() string { - path, err := os.Getwd() - if err != nil { - return "" - } - return path -} - -// Chdir changes the current working directory to the named directory. -// If there is an error, it will be of type *PathError. -func Chdir(dir string) (err error) { - err = os.Chdir(dir) - if err != nil { - err = gerror.Wrapf(err, `os.Chdir failed with dir "%s"`, dir) - } - return -} - -// IsFile checks whether given `path` a file, which means it's not a directory. -// Note that it returns false if the `path` does not exist. -func IsFile(path string) bool { - s, err := Stat(path) - if err != nil { - return false - } - return !s.IsDir() -} - -// Stat returns a FileInfo describing the named file. -// If there is an error, it will be of type *PathError. -func Stat(path string) (os.FileInfo, error) { - info, err := os.Stat(path) - if err != nil { - err = gerror.Wrapf(err, `os.Stat failed for file "%s"`, path) - } - return info, err -} - -// Move renames (moves) `src` to `dst` path. -// If `dst` already exists and is not a directory, it'll be replaced. -func Move(src string, dst string) (err error) { - err = os.Rename(src, dst) - if err != nil { - err = gerror.Wrapf(err, `os.Rename failed from "%s" to "%s"`, src, dst) - } - return -} - -// Rename is alias of Move. -// See Move. -func Rename(src string, dst string) error { - return Move(src, dst) -} - -// DirNames returns sub-file names of given directory `path`. -// Note that the returned names are NOT absolute paths. -func DirNames(path string) ([]string, error) { - f, err := Open(path) - if err != nil { - return nil, err - } - list, err := f.Readdirnames(-1) - _ = f.Close() - if err != nil { - err = gerror.Wrapf(err, `Read dir files failed from path "%s"`, path) - return nil, err - } - return list, nil -} - -// Glob returns the names of all files matching pattern or nil -// if there is no matching file. The syntax of patterns is the same -// as in Match. The pattern may describe hierarchical names such as -// /usr/*/bin/ed (assuming the Separator is '/'). -// -// Glob ignores file system errors such as I/O errors reading directories. -// The only possible returned error is ErrBadPattern, when pattern -// is malformed. -func Glob(pattern string, onlyNames ...bool) ([]string, error) { - list, err := filepath.Glob(pattern) - if err != nil { - err = gerror.Wrapf(err, `filepath.Glob failed for pattern "%s"`, pattern) - return nil, err - } - if len(onlyNames) > 0 && onlyNames[0] && len(list) > 0 { - array := make([]string, len(list)) - for k, v := range list { - array[k] = Basename(v) - } - return array, nil - } - return list, nil -} - -// Remove deletes all file/directory with `path` parameter. -// If parameter `path` is directory, it deletes it recursively. -// -// It does nothing if given `path` does not exist or is empty. -func Remove(path string) (err error) { - // It does nothing if `path` is empty. - if path == "" { - return nil - } - if err = os.RemoveAll(path); err != nil { - err = gerror.Wrapf(err, `os.RemoveAll failed for path "%s"`, path) - } - return -} - -// IsReadable checks whether given `path` is readable. -func IsReadable(path string) bool { - result := true - file, err := os.OpenFile(path, os.O_RDONLY, DefaultPermOpen) - if err != nil { - result = false - } - file.Close() - return result -} - -// IsWritable checks whether given `path` is writable. -// -// TODO improve performance; use golang.org/x/sys to cross-plat-form -func IsWritable(path string) bool { - result := true - if IsDir(path) { - // If it's a directory, create a temporary file to test whether it's writable. - tmpFile := strings.TrimRight(path, Separator) + Separator + gconv.String(time.Now().UnixNano()) - if f, err := Create(tmpFile); err != nil || !Exists(tmpFile) { - result = false - } else { - _ = f.Close() - _ = Remove(tmpFile) - } - } else { - // If it's a file, check if it can open it. - file, err := os.OpenFile(path, os.O_WRONLY, DefaultPermOpen) - if err != nil { - result = false - } - _ = file.Close() - } - return result -} - -// Chmod is alias of os.Chmod. -// See os.Chmod. -func Chmod(path string, mode os.FileMode) (err error) { - err = os.Chmod(path, mode) - if err != nil { - err = gerror.Wrapf(err, `os.Chmod failed with path "%s" and mode "%s"`, path, mode) - } - return -} - -// Abs returns an absolute representation of path. -// If the path is not absolute it will be joined with the current -// working directory to turn it into an absolute path. The absolute -// path name for a given file is not guaranteed to be unique. -// Abs calls Clean on the result. -func Abs(path string) string { - p, _ := filepath.Abs(path) - return p -} - -// RealPath converts the given `path` to its absolute path -// and checks if the file path exists. -// If the file does not exist, return an empty string. -func RealPath(path string) string { - p, err := filepath.Abs(path) - if err != nil { - return "" - } - if !Exists(p) { - return "" - } - return p -} - -// SelfPath returns absolute file path of current running process(binary). -func SelfPath() string { - return selfPath -} - -// SelfName returns file name of current running process(binary). -func SelfName() string { - return Basename(SelfPath()) -} - -// SelfDir returns absolute directory path of current running process(binary). -func SelfDir() string { - return filepath.Dir(SelfPath()) -} - -// Basename returns the last element of path, which contains file extension. -// Trailing path separators are removed before extracting the last element. -// If the path is empty, Base returns ".". -// If the path consists entirely of separators, Basename returns a single separator. -// Example: -// /var/www/file.js -> file.js -// file.js -> file.js -func Basename(path string) string { - return filepath.Base(path) -} - -// Name returns the last element of path without file extension. -// Example: -// /var/www/file.js -> file -// file.js -> file -func Name(path string) string { - base := filepath.Base(path) - if i := strings.LastIndexByte(base, '.'); i != -1 { - return base[:i] - } - return base -} - -// Dir returns all but the last element of path, typically the path's directory. -// After dropping the final element, Dir calls Clean on the path and trailing -// slashes are removed. -// If the `path` is empty, Dir returns ".". -// If the `path` is ".", Dir treats the path as current working directory. -// If the `path` consists entirely of separators, Dir returns a single separator. -// The returned path does not end in a separator unless it is the root directory. -func Dir(path string) string { - if path == "." { - return filepath.Dir(RealPath(path)) - } - return filepath.Dir(path) -} - -// IsEmpty checks whether the given `path` is empty. -// If `path` is a folder, it checks if there's any file under it. -// If `path` is a file, it checks if the file size is zero. -// -// Note that it returns true if `path` does not exist. -func IsEmpty(path string) bool { - stat, err := Stat(path) - if err != nil { - return true - } - if stat.IsDir() { - file, err := os.Open(path) - if err != nil { - return true - } - defer file.Close() - names, err := file.Readdirnames(-1) - if err != nil { - return true - } - return len(names) == 0 - } else { - return stat.Size() == 0 - } -} - -// Ext returns the file name extension used by path. -// The extension is the suffix beginning at the final dot -// in the final element of path; it is empty if there is -// no dot. -// Note: the result contains symbol '.'. -// Eg: -// main.go => .go -// api.json => .json -func Ext(path string) string { - ext := filepath.Ext(path) - if p := strings.IndexByte(ext, '?'); p != -1 { - ext = ext[0:p] - } - return ext -} - -// ExtName is like function Ext, which returns the file name extension used by path, -// but the result does not contain symbol '.'. -// Eg: -// main.go => go -// api.json => json -func ExtName(path string) string { - return strings.TrimLeft(Ext(path), ".") -} - -// Temp retrieves and returns the temporary directory of current system. -// -// The optional parameter `names` specifies the sub-folders/sub-files, -// which will be joined with current system separator and returned with the path. -func Temp(names ...string) string { - path := os.TempDir() - for _, name := range names { - path = Join(path, name) - } - return path -} diff --git a/vendor/github.com/gogf/gf/v2/os/gfile/gfile_cache.go b/vendor/github.com/gogf/gf/v2/os/gfile/gfile_cache.go deleted file mode 100644 index 731cb1ce..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gfile/gfile_cache.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gfile - -import ( - "context" - "time" - - "github.com/gogf/gf/v2/errors/gcode" - "github.com/gogf/gf/v2/errors/gerror" - "github.com/gogf/gf/v2/internal/command" - "github.com/gogf/gf/v2/internal/intlog" - "github.com/gogf/gf/v2/os/gcache" - "github.com/gogf/gf/v2/os/gfsnotify" -) - -const ( - defaultCacheDuration = "1m" // defaultCacheExpire is the expire time for file content caching in seconds. - commandEnvKeyForCache = "gf.gfile.cache" // commandEnvKeyForCache is the configuration key for command argument or environment configuring cache expire duration. -) - -var ( - // Default expire time for file content caching. - cacheDuration = getCacheDuration() - - // internalCache is the memory cache for internal usage. - internalCache = gcache.New() -) - -func getCacheDuration() time.Duration { - cacheDurationConfigured := command.GetOptWithEnv(commandEnvKeyForCache, defaultCacheDuration) - d, err := time.ParseDuration(cacheDurationConfigured) - if err != nil { - panic(gerror.WrapCodef( - gcode.CodeInvalidConfiguration, - err, - `error parsing string "%s" to time duration`, - cacheDurationConfigured, - )) - } - return d -} - -// GetContentsWithCache returns string content of given file by `path` from cache. -// If there's no content in the cache, it will read it from disk file specified by `path`. -// The parameter `expire` specifies the caching time for this file content in seconds. -func GetContentsWithCache(path string, duration ...time.Duration) string { - return string(GetBytesWithCache(path, duration...)) -} - -// GetBytesWithCache returns []byte content of given file by `path` from cache. -// If there's no content in the cache, it will read it from disk file specified by `path`. -// The parameter `expire` specifies the caching time for this file content in seconds. -func GetBytesWithCache(path string, duration ...time.Duration) []byte { - var ( - ctx = context.Background() - expire = cacheDuration - cacheKey = commandEnvKeyForCache + path - ) - - if len(duration) > 0 { - expire = duration[0] - } - r, _ := internalCache.GetOrSetFuncLock(ctx, cacheKey, func(ctx context.Context) (interface{}, error) { - b := GetBytes(path) - if b != nil { - // Adding this `path` to gfsnotify, - // it will clear its cache if there's any changes of the file. - _, _ = gfsnotify.Add(path, func(event *gfsnotify.Event) { - _, err := internalCache.Remove(ctx, cacheKey) - if err != nil { - intlog.Errorf(ctx, `%+v`, err) - } - gfsnotify.Exit() - }) - } - return b, nil - }, expire) - if r != nil { - return r.Bytes() - } - return nil -} diff --git a/vendor/github.com/gogf/gf/v2/os/gfile/gfile_contents.go b/vendor/github.com/gogf/gf/v2/os/gfile/gfile_contents.go deleted file mode 100644 index 048136f7..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gfile/gfile_contents.go +++ /dev/null @@ -1,213 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gfile - -import ( - "bufio" - "io" - "os" - - "github.com/gogf/gf/v2/errors/gerror" -) - -var ( - // DefaultReadBuffer is the buffer size for reading file content. - DefaultReadBuffer = 1024 -) - -// GetContents returns the file content of `path` as string. -// It returns en empty string if it fails reading. -func GetContents(path string) string { - return string(GetBytes(path)) -} - -// GetBytes returns the file content of `path` as []byte. -// It returns nil if it fails reading. -func GetBytes(path string) []byte { - data, err := os.ReadFile(path) - if err != nil { - return nil - } - return data -} - -// putContents puts binary content to file of `path`. -func putContents(path string, data []byte, flag int, perm os.FileMode) error { - // It supports creating file of `path` recursively. - dir := Dir(path) - if !Exists(dir) { - if err := Mkdir(dir); err != nil { - return err - } - } - // Opening file with given `flag` and `perm`. - f, err := OpenWithFlagPerm(path, flag, perm) - if err != nil { - return err - } - defer f.Close() - // Write data. - var n int - if n, err = f.Write(data); err != nil { - err = gerror.Wrapf(err, `Write data to file "%s" failed`, path) - return err - } else if n < len(data) { - return io.ErrShortWrite - } - return nil -} - -// Truncate truncates file of `path` to given size by `size`. -func Truncate(path string, size int) (err error) { - err = os.Truncate(path, int64(size)) - if err != nil { - err = gerror.Wrapf(err, `os.Truncate failed for file "%s", size "%d"`, path, size) - } - return -} - -// PutContents puts string `content` to file of `path`. -// It creates file of `path` recursively if it does not exist. -func PutContents(path string, content string) error { - return putContents(path, []byte(content), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, DefaultPermOpen) -} - -// PutContentsAppend appends string `content` to file of `path`. -// It creates file of `path` recursively if it does not exist. -func PutContentsAppend(path string, content string) error { - return putContents(path, []byte(content), os.O_WRONLY|os.O_CREATE|os.O_APPEND, DefaultPermOpen) -} - -// PutBytes puts binary `content` to file of `path`. -// It creates file of `path` recursively if it does not exist. -func PutBytes(path string, content []byte) error { - return putContents(path, content, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, DefaultPermOpen) -} - -// PutBytesAppend appends binary `content` to file of `path`. -// It creates file of `path` recursively if it does not exist. -func PutBytesAppend(path string, content []byte) error { - return putContents(path, content, os.O_WRONLY|os.O_CREATE|os.O_APPEND, DefaultPermOpen) -} - -// GetNextCharOffset returns the file offset for given `char` starting from `start`. -func GetNextCharOffset(reader io.ReaderAt, char byte, start int64) int64 { - buffer := make([]byte, DefaultReadBuffer) - offset := start - for { - if n, err := reader.ReadAt(buffer, offset); n > 0 { - for i := 0; i < n; i++ { - if buffer[i] == char { - return int64(i) + offset - } - } - offset += int64(n) - } else if err != nil { - break - } - } - return -1 -} - -// GetNextCharOffsetByPath returns the file offset for given `char` starting from `start`. -// It opens file of `path` for reading with os.O_RDONLY flag and default perm. -func GetNextCharOffsetByPath(path string, char byte, start int64) int64 { - if f, err := OpenWithFlagPerm(path, os.O_RDONLY, DefaultPermOpen); err == nil { - defer f.Close() - return GetNextCharOffset(f, char, start) - } - return -1 -} - -// GetBytesTilChar returns the contents of the file as []byte -// until the next specified byte `char` position. -// -// Note: Returned value contains the character of the last position. -func GetBytesTilChar(reader io.ReaderAt, char byte, start int64) ([]byte, int64) { - if offset := GetNextCharOffset(reader, char, start); offset != -1 { - return GetBytesByTwoOffsets(reader, start, offset+1), offset - } - return nil, -1 -} - -// GetBytesTilCharByPath returns the contents of the file given by `path` as []byte -// until the next specified byte `char` position. -// It opens file of `path` for reading with os.O_RDONLY flag and default perm. -// -// Note: Returned value contains the character of the last position. -func GetBytesTilCharByPath(path string, char byte, start int64) ([]byte, int64) { - if f, err := OpenWithFlagPerm(path, os.O_RDONLY, DefaultPermOpen); err == nil { - defer f.Close() - return GetBytesTilChar(f, char, start) - } - return nil, -1 -} - -// GetBytesByTwoOffsets returns the binary content as []byte from `start` to `end`. -// Note: Returned value does not contain the character of the last position, which means -// it returns content range as [start, end). -func GetBytesByTwoOffsets(reader io.ReaderAt, start int64, end int64) []byte { - buffer := make([]byte, end-start) - if _, err := reader.ReadAt(buffer, start); err != nil { - return nil - } - return buffer -} - -// GetBytesByTwoOffsetsByPath returns the binary content as []byte from `start` to `end`. -// Note: Returned value does not contain the character of the last position, which means -// it returns content range as [start, end). -// It opens file of `path` for reading with os.O_RDONLY flag and default perm. -func GetBytesByTwoOffsetsByPath(path string, start int64, end int64) []byte { - if f, err := OpenWithFlagPerm(path, os.O_RDONLY, DefaultPermOpen); err == nil { - defer f.Close() - return GetBytesByTwoOffsets(f, start, end) - } - return nil -} - -// ReadLines reads file content line by line, which is passed to the callback function `callback` as string. -// It matches each line of text, separated by chars '\r' or '\n', stripped any trailing end-of-line marker. -// -// Note that the parameter passed to callback function might be an empty value, and the last non-empty line -// will be passed to callback function `callback` even if it has no newline marker. -func ReadLines(file string, callback func(line string) error) error { - f, err := Open(file) - if err != nil { - return err - } - defer f.Close() - - scanner := bufio.NewScanner(f) - for scanner.Scan() { - if err = callback(scanner.Text()); err != nil { - return err - } - } - return nil -} - -// ReadLinesBytes reads file content line by line, which is passed to the callback function `callback` as []byte. -// It matches each line of text, separated by chars '\r' or '\n', stripped any trailing end-of-line marker. -// -// Note that the parameter passed to callback function might be an empty value, and the last non-empty line -// will be passed to callback function `callback` even if it has no newline marker. -func ReadLinesBytes(file string, callback func(bytes []byte) error) error { - f, err := Open(file) - if err != nil { - return err - } - defer f.Close() - - scanner := bufio.NewScanner(f) - for scanner.Scan() { - if err = callback(scanner.Bytes()); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/gogf/gf/v2/os/gfile/gfile_copy.go b/vendor/github.com/gogf/gf/v2/os/gfile/gfile_copy.go deleted file mode 100644 index c7f6dabd..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gfile/gfile_copy.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gfile - -import ( - "io" - "os" - "path/filepath" - - "github.com/gogf/gf/v2/errors/gcode" - "github.com/gogf/gf/v2/errors/gerror" -) - -// Copy file/directory from `src` to `dst`. -// -// If `src` is file, it calls CopyFile to implements copy feature, -// or else it calls CopyDir. -func Copy(src string, dst string) error { - if src == "" { - return gerror.NewCode(gcode.CodeInvalidParameter, "source path cannot be empty") - } - if dst == "" { - return gerror.NewCode(gcode.CodeInvalidParameter, "destination path cannot be empty") - } - if IsFile(src) { - return CopyFile(src, dst) - } - return CopyDir(src, dst) -} - -// CopyFile copies the contents of the file named `src` to the file named -// by `dst`. The file will be created if it does not exist. If the -// destination file exists, all it's contents will be replaced by the contents -// of the source file. The file mode will be copied from the source and -// the copied data is synced/flushed to stable storage. -// Thanks: https://gist.github.com/r0l1/92462b38df26839a3ca324697c8cba04 -func CopyFile(src, dst string) (err error) { - if src == "" { - return gerror.NewCode(gcode.CodeInvalidParameter, "source file cannot be empty") - } - if dst == "" { - return gerror.NewCode(gcode.CodeInvalidParameter, "destination file cannot be empty") - } - // If src and dst are the same path, it does nothing. - if src == dst { - return nil - } - var inFile *os.File - inFile, err = Open(src) - if err != nil { - return - } - defer func() { - if e := inFile.Close(); e != nil { - err = gerror.Wrapf(e, `file close failed for "%s"`, src) - } - }() - var outFile *os.File - outFile, err = Create(dst) - if err != nil { - return - } - defer func() { - if e := outFile.Close(); e != nil { - err = gerror.Wrapf(e, `file close failed for "%s"`, dst) - } - }() - if _, err = io.Copy(outFile, inFile); err != nil { - err = gerror.Wrapf(err, `io.Copy failed from "%s" to "%s"`, src, dst) - return - } - if err = outFile.Sync(); err != nil { - err = gerror.Wrapf(err, `file sync failed for file "%s"`, dst) - return - } - if err = Chmod(dst, DefaultPermCopy); err != nil { - return - } - return -} - -// CopyDir recursively copies a directory tree, attempting to preserve permissions. -// -// Note that, the Source directory must exist and symlinks are ignored and skipped. -func CopyDir(src string, dst string) (err error) { - if src == "" { - return gerror.NewCode(gcode.CodeInvalidParameter, "source directory cannot be empty") - } - if dst == "" { - return gerror.NewCode(gcode.CodeInvalidParameter, "destination directory cannot be empty") - } - // If src and dst are the same path, it does nothing. - if src == dst { - return nil - } - src = filepath.Clean(src) - dst = filepath.Clean(dst) - si, err := Stat(src) - if err != nil { - return err - } - if !si.IsDir() { - return gerror.NewCode(gcode.CodeInvalidParameter, "source is not a directory") - } - if !Exists(dst) { - if err = os.MkdirAll(dst, DefaultPermCopy); err != nil { - err = gerror.Wrapf(err, `create directory failed for path "%s", perm "%s"`, dst, DefaultPermCopy) - return - } - } - entries, err := os.ReadDir(src) - if err != nil { - err = gerror.Wrapf(err, `read directory failed for path "%s"`, src) - return - } - for _, entry := range entries { - srcPath := filepath.Join(src, entry.Name()) - dstPath := filepath.Join(dst, entry.Name()) - if entry.IsDir() { - if err = CopyDir(srcPath, dstPath); err != nil { - return - } - } else { - // Skip symlinks. - if entry.Type()&os.ModeSymlink != 0 { - continue - } - if err = CopyFile(srcPath, dstPath); err != nil { - return - } - } - } - return -} diff --git a/vendor/github.com/gogf/gf/v2/os/gfile/gfile_home.go b/vendor/github.com/gogf/gf/v2/os/gfile/gfile_home.go deleted file mode 100644 index 817e74bf..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gfile/gfile_home.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gfile - -import ( - "bytes" - "os" - "os/exec" - "os/user" - "runtime" - "strings" - - "github.com/gogf/gf/v2/errors/gerror" -) - -// Home returns absolute path of current user's home directory. -// The optional parameter `names` specifies the sub-folders/sub-files, -// which will be joined with current system separator and returned with the path. -func Home(names ...string) (string, error) { - path, err := getHomePath() - if err != nil { - return "", err - } - for _, name := range names { - path += Separator + name - } - return path, nil -} - -// getHomePath returns absolute path of current user's home directory. -func getHomePath() (string, error) { - u, err := user.Current() - if nil == err { - return u.HomeDir, nil - } - if runtime.GOOS == "windows" { - return homeWindows() - } - return homeUnix() -} - -// homeUnix retrieves and returns the home on unix system. -func homeUnix() (string, error) { - if home := os.Getenv("HOME"); home != "" { - return home, nil - } - var stdout bytes.Buffer - cmd := exec.Command("sh", "-c", "eval echo ~$USER") - cmd.Stdout = &stdout - if err := cmd.Run(); err != nil { - err = gerror.Wrapf(err, `retrieve home directory failed`) - return "", err - } - - result := strings.TrimSpace(stdout.String()) - if result == "" { - return "", gerror.New("blank output when reading home directory") - } - - return result, nil -} - -// homeWindows retrieves and returns the home on windows system. -func homeWindows() (string, error) { - var ( - drive = os.Getenv("HOMEDRIVE") - path = os.Getenv("HOMEPATH") - home = drive + path - ) - if drive == "" || path == "" { - home = os.Getenv("USERPROFILE") - } - if home == "" { - return "", gerror.New("environment keys HOMEDRIVE, HOMEPATH and USERPROFILE are empty") - } - - return home, nil -} diff --git a/vendor/github.com/gogf/gf/v2/os/gfile/gfile_replace.go b/vendor/github.com/gogf/gf/v2/os/gfile/gfile_replace.go deleted file mode 100644 index 3cb7b689..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gfile/gfile_replace.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gfile - -import ( - "github.com/gogf/gf/v2/text/gstr" -) - -// ReplaceFile replaces content for file `path`. -func ReplaceFile(search, replace, path string) error { - return PutContents(path, gstr.Replace(GetContents(path), search, replace)) -} - -// ReplaceFileFunc replaces content for file `path` with callback function `f`. -func ReplaceFileFunc(f func(path, content string) string, path string) error { - data := GetContents(path) - result := f(path, data) - if len(data) != len(result) && data != result { - return PutContents(path, result) - } - return nil -} - -// ReplaceDir replaces content for files under `path`. -// The parameter `pattern` specifies the file pattern which matches to be replaced. -// It does replacement recursively if given parameter `recursive` is true. -func ReplaceDir(search, replace, path, pattern string, recursive ...bool) error { - files, err := ScanDirFile(path, pattern, recursive...) - if err != nil { - return err - } - for _, file := range files { - if err = ReplaceFile(search, replace, file); err != nil { - return err - } - } - return err -} - -// ReplaceDirFunc replaces content for files under `path` with callback function `f`. -// The parameter `pattern` specifies the file pattern which matches to be replaced. -// It does replacement recursively if given parameter `recursive` is true. -func ReplaceDirFunc(f func(path, content string) string, path, pattern string, recursive ...bool) error { - files, err := ScanDirFile(path, pattern, recursive...) - if err != nil { - return err - } - for _, file := range files { - if err = ReplaceFileFunc(f, file); err != nil { - return err - } - } - return err -} diff --git a/vendor/github.com/gogf/gf/v2/os/gfile/gfile_scan.go b/vendor/github.com/gogf/gf/v2/os/gfile/gfile_scan.go deleted file mode 100644 index deda5b6d..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gfile/gfile_scan.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gfile - -import ( - "path/filepath" - "sort" - - "github.com/gogf/gf/v2/errors/gerror" - "github.com/gogf/gf/v2/text/gstr" -) - -const ( - // Max recursive depth for directory scanning. - maxScanDepth = 100000 -) - -// ScanDir returns all sub-files with absolute paths of given `path`, -// It scans directory recursively if given parameter `recursive` is true. -// -// The pattern parameter `pattern` supports multiple file name patterns, -// using the ',' symbol to separate multiple patterns. -func ScanDir(path string, pattern string, recursive ...bool) ([]string, error) { - isRecursive := false - if len(recursive) > 0 { - isRecursive = recursive[0] - } - list, err := doScanDir(0, path, pattern, isRecursive, nil) - if err != nil { - return nil, err - } - if len(list) > 0 { - sort.Strings(list) - } - return list, nil -} - -// ScanDirFunc returns all sub-files with absolute paths of given `path`, -// It scans directory recursively if given parameter `recursive` is true. -// -// The pattern parameter `pattern` supports multiple file name patterns, using the ',' -// symbol to separate multiple patterns. -// -// The parameter `recursive` specifies whether scanning the `path` recursively, which -// means it scans its sub-files and appends the files path to result array if the sub-file -// is also a folder. It is false in default. -// -// The parameter `handler` specifies the callback function handling each sub-file path of -// the `path` and its sub-folders. It ignores the sub-file path if `handler` returns an empty -// string, or else it appends the sub-file path to result slice. -func ScanDirFunc(path string, pattern string, recursive bool, handler func(path string) string) ([]string, error) { - list, err := doScanDir(0, path, pattern, recursive, handler) - if err != nil { - return nil, err - } - if len(list) > 0 { - sort.Strings(list) - } - return list, nil -} - -// ScanDirFile returns all sub-files with absolute paths of given `path`, -// It scans directory recursively if given parameter `recursive` is true. -// -// The pattern parameter `pattern` supports multiple file name patterns, -// using the ',' symbol to separate multiple patterns. -// -// Note that it returns only files, exclusive of directories. -func ScanDirFile(path string, pattern string, recursive ...bool) ([]string, error) { - isRecursive := false - if len(recursive) > 0 { - isRecursive = recursive[0] - } - list, err := doScanDir(0, path, pattern, isRecursive, func(path string) string { - if IsDir(path) { - return "" - } - return path - }) - if err != nil { - return nil, err - } - if len(list) > 0 { - sort.Strings(list) - } - return list, nil -} - -// ScanDirFileFunc returns all sub-files with absolute paths of given `path`, -// It scans directory recursively if given parameter `recursive` is true. -// -// The pattern parameter `pattern` supports multiple file name patterns, using the ',' -// symbol to separate multiple patterns. -// -// The parameter `recursive` specifies whether scanning the `path` recursively, which -// means it scans its sub-files and appends the file paths to result array if the sub-file -// is also a folder. It is false in default. -// -// The parameter `handler` specifies the callback function handling each sub-file path of -// the `path` and its sub-folders. It ignores the sub-file path if `handler` returns an empty -// string, or else it appends the sub-file path to result slice. -// -// Note that the parameter `path` for `handler` is not a directory but a file. -// It returns only files, exclusive of directories. -func ScanDirFileFunc(path string, pattern string, recursive bool, handler func(path string) string) ([]string, error) { - list, err := doScanDir(0, path, pattern, recursive, func(path string) string { - if IsDir(path) { - return "" - } - return handler(path) - }) - if err != nil { - return nil, err - } - if len(list) > 0 { - sort.Strings(list) - } - return list, nil -} - -// doScanDir is an internal method which scans directory and returns the absolute path -// list of files that are not sorted. -// -// The pattern parameter `pattern` supports multiple file name patterns, using the ',' -// symbol to separate multiple patterns. -// -// The parameter `recursive` specifies whether scanning the `path` recursively, which -// means it scans its sub-files and appends the files path to result array if the sub-file -// is also a folder. It is false in default. -// -// The parameter `handler` specifies the callback function handling each sub-file path of -// the `path` and its sub-folders. It ignores the sub-file path if `handler` returns an empty -// string, or else it appends the sub-file path to result slice. -func doScanDir(depth int, path string, pattern string, recursive bool, handler func(path string) string) ([]string, error) { - if depth >= maxScanDepth { - return nil, gerror.Newf("directory scanning exceeds max recursive depth: %d", maxScanDepth) - } - var ( - list []string - file, err = Open(path) - ) - if err != nil { - return nil, err - } - defer file.Close() - names, err := file.Readdirnames(-1) - if err != nil { - err = gerror.Wrapf(err, `read directory files failed from path "%s"`, path) - return nil, err - } - var ( - filePath string - patterns = gstr.SplitAndTrim(pattern, ",") - ) - for _, name := range names { - filePath = path + Separator + name - if IsDir(filePath) && recursive { - array, _ := doScanDir(depth+1, filePath, pattern, true, handler) - if len(array) > 0 { - list = append(list, array...) - } - } - // Handler filtering. - if handler != nil { - filePath = handler(filePath) - if filePath == "" { - continue - } - } - // If it meets pattern, then add it to the result list. - for _, p := range patterns { - if match, _ := filepath.Match(p, name); match { - if filePath = Abs(filePath); filePath != "" { - list = append(list, filePath) - } - } - } - } - return list, nil -} diff --git a/vendor/github.com/gogf/gf/v2/os/gfile/gfile_search.go b/vendor/github.com/gogf/gf/v2/os/gfile/gfile_search.go deleted file mode 100644 index a0d06999..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gfile/gfile_search.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gfile - -import ( - "bytes" - "fmt" - - "github.com/gogf/gf/v2/container/garray" - "github.com/gogf/gf/v2/errors/gerror" -) - -// Search searches file by name `name` in following paths with priority: -// prioritySearchPaths, Pwd()、SelfDir()、MainPkgPath(). -// It returns the absolute file path of `name` if found, or en empty string if not found. -func Search(name string, prioritySearchPaths ...string) (realPath string, err error) { - // Check if it's an absolute path. - realPath = RealPath(name) - if realPath != "" { - return - } - // Search paths array. - array := garray.NewStrArray() - array.Append(prioritySearchPaths...) - array.Append(Pwd(), SelfDir()) - if path := MainPkgPath(); path != "" { - array.Append(path) - } - // Remove repeated items. - array.Unique() - // Do the searching. - array.RLockFunc(func(array []string) { - path := "" - for _, v := range array { - path = RealPath(v + Separator + name) - if path != "" { - realPath = path - break - } - } - }) - // If it fails searching, it returns formatted error. - if realPath == "" { - buffer := bytes.NewBuffer(nil) - buffer.WriteString(fmt.Sprintf(`cannot find "%s" in following paths:`, name)) - array.RLockFunc(func(array []string) { - for k, v := range array { - buffer.WriteString(fmt.Sprintf("\n%d. %s", k+1, v)) - } - }) - err = gerror.New(buffer.String()) - } - return -} diff --git a/vendor/github.com/gogf/gf/v2/os/gfile/gfile_size.go b/vendor/github.com/gogf/gf/v2/os/gfile/gfile_size.go deleted file mode 100644 index fb8fca76..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gfile/gfile_size.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gfile - -import ( - "fmt" - "os" - "strconv" - "strings" -) - -// Size returns the size of file specified by `path` in byte. -func Size(path string) int64 { - s, e := os.Stat(path) - if e != nil { - return 0 - } - return s.Size() -} - -// SizeFormat returns the size of file specified by `path` in format string. -func SizeFormat(path string) string { - return FormatSize(Size(path)) -} - -// ReadableSize formats size of file given by `path`, for more human readable. -func ReadableSize(path string) string { - return FormatSize(Size(path)) -} - -// StrToSize converts formatted size string to its size in bytes. -func StrToSize(sizeStr string) int64 { - i := 0 - for ; i < len(sizeStr); i++ { - if sizeStr[i] == '.' || (sizeStr[i] >= '0' && sizeStr[i] <= '9') { - continue - } else { - break - } - } - var ( - unit = sizeStr[i:] - number, _ = strconv.ParseFloat(sizeStr[:i], 64) - ) - if unit == "" { - return int64(number) - } - switch strings.ToLower(unit) { - case "b", "bytes": - return int64(number) - case "k", "kb", "ki", "kib", "kilobyte": - return int64(number * 1024) - case "m", "mb", "mi", "mib", "mebibyte": - return int64(number * 1024 * 1024) - case "g", "gb", "gi", "gib", "gigabyte": - return int64(number * 1024 * 1024 * 1024) - case "t", "tb", "ti", "tib", "terabyte": - return int64(number * 1024 * 1024 * 1024 * 1024) - case "p", "pb", "pi", "pib", "petabyte": - return int64(number * 1024 * 1024 * 1024 * 1024 * 1024) - case "e", "eb", "ei", "eib", "exabyte": - return int64(number * 1024 * 1024 * 1024 * 1024 * 1024 * 1024) - case "z", "zb", "zi", "zib", "zettabyte": - return int64(number * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024) - case "y", "yb", "yi", "yib", "yottabyte": - return int64(number * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024) - case "bb", "brontobyte": - return int64(number * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024) - } - return -1 -} - -// FormatSize formats size `raw` for more manually readable. -func FormatSize(raw int64) string { - var r float64 = float64(raw) - var t float64 = 1024 - var d float64 = 1 - if r < t { - return fmt.Sprintf("%.2fB", r/d) - } - d *= 1024 - t *= 1024 - if r < t { - return fmt.Sprintf("%.2fK", r/d) - } - d *= 1024 - t *= 1024 - if r < t { - return fmt.Sprintf("%.2fM", r/d) - } - d *= 1024 - t *= 1024 - if r < t { - return fmt.Sprintf("%.2fG", r/d) - } - d *= 1024 - t *= 1024 - if r < t { - return fmt.Sprintf("%.2fT", r/d) - } - d *= 1024 - t *= 1024 - if r < t { - return fmt.Sprintf("%.2fP", r/d) - } - d *= 1024 - t *= 1024 - if r < t { - return fmt.Sprintf("%.2fE", r/d) - } - d *= 1024 - t *= 1024 - if r < t { - return fmt.Sprintf("%.2fZ", r/d) - } - d *= 1024 - t *= 1024 - if r < t { - return fmt.Sprintf("%.2fY", r/d) - } - d *= 1024 - t *= 1024 - if r < t { - return fmt.Sprintf("%.2fBB", r/d) - } - return "TooLarge" -} diff --git a/vendor/github.com/gogf/gf/v2/os/gfile/gfile_sort.go b/vendor/github.com/gogf/gf/v2/os/gfile/gfile_sort.go deleted file mode 100644 index 3772cb07..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gfile/gfile_sort.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gfile - -import ( - "strings" - - "github.com/gogf/gf/v2/container/garray" -) - -// fileSortFunc is the comparison function for files. -// It sorts the array in order of: directory -> file. -// If `path1` and `path2` are the same type, it then sorts them as strings. -func fileSortFunc(path1, path2 string) int { - isDirPath1 := IsDir(path1) - isDirPath2 := IsDir(path2) - if isDirPath1 && !isDirPath2 { - return -1 - } - if !isDirPath1 && isDirPath2 { - return 1 - } - if n := strings.Compare(path1, path2); n != 0 { - return n - } else { - return -1 - } -} - -// SortFiles sorts the `files` in order of: directory -> file. -// Note that the item of `files` should be absolute path. -func SortFiles(files []string) []string { - array := garray.NewSortedStrArrayComparator(fileSortFunc) - array.Add(files...) - return array.Slice() -} diff --git a/vendor/github.com/gogf/gf/v2/os/gfile/gfile_source.go b/vendor/github.com/gogf/gf/v2/os/gfile/gfile_source.go deleted file mode 100644 index d77f0b9b..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gfile/gfile_source.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gfile - -import ( - "os" - "runtime" - "strings" - - "github.com/gogf/gf/v2/text/gregex" - "github.com/gogf/gf/v2/text/gstr" -) - -var ( - // goRootForFilter is used for stack filtering purpose. - goRootForFilter = runtime.GOROOT() -) - -func init() { - if goRootForFilter != "" { - goRootForFilter = strings.ReplaceAll(goRootForFilter, "\\", "/") - } -} - -// MainPkgPath returns absolute file path of package main, -// which contains the entrance function main. -// -// It's only available in develop environment. -// -// Note1: Only valid for source development environments, -// IE only valid for systems that generate this executable. -// -// Note2: When the method is called for the first time, if it is in an asynchronous goroutine, -// the method may not get the main package path. -func MainPkgPath() string { - // It is only for source development environments. - if goRootForFilter == "" { - return "" - } - path := mainPkgPath.Val() - if path != "" { - return path - } - var lastFile string - for i := 1; i < 10000; i++ { - if pc, file, _, ok := runtime.Caller(i); ok { - if goRootForFilter != "" && len(file) >= len(goRootForFilter) && file[0:len(goRootForFilter)] == goRootForFilter { - continue - } - if Ext(file) != ".go" { - continue - } - lastFile = file - // Check if it is called in package initialization function, - // in which it here cannot retrieve main package path, - // it so just returns that can make next check. - if fn := runtime.FuncForPC(pc); fn != nil { - array := gstr.Split(fn.Name(), ".") - if array[0] != "main" { - continue - } - } - if gregex.IsMatchString(`package\s+main\s+`, GetContents(file)) { - mainPkgPath.Set(Dir(file)) - return Dir(file) - } - } else { - break - } - } - // If it still cannot find the path of the package main, - // it recursively searches the directory and its parents directory of the last go file. - // It's usually necessary for uint testing cases of business project. - if lastFile != "" { - for path = Dir(lastFile); len(path) > 1 && Exists(path) && path[len(path)-1] != os.PathSeparator; { - files, _ := ScanDir(path, "*.go") - for _, v := range files { - if gregex.IsMatchString(`package\s+main\s+`, GetContents(v)) { - mainPkgPath.Set(path) - return path - } - } - path = Dir(path) - } - } - return "" -} diff --git a/vendor/github.com/gogf/gf/v2/os/gfile/gfile_time.go b/vendor/github.com/gogf/gf/v2/os/gfile/gfile_time.go deleted file mode 100644 index 21053b73..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gfile/gfile_time.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gfile - -import ( - "os" - "time" -) - -// MTime returns the modification time of file given by `path` in second. -func MTime(path string) time.Time { - s, e := os.Stat(path) - if e != nil { - return time.Time{} - } - return s.ModTime() -} - -// MTimestamp returns the modification time of file given by `path` in second. -func MTimestamp(path string) int64 { - mtime := MTime(path) - if mtime.IsZero() { - return -1 - } - return mtime.Unix() -} - -// MTimestampMilli returns the modification time of file given by `path` in millisecond. -func MTimestampMilli(path string) int64 { - mtime := MTime(path) - if mtime.IsZero() { - return -1 - } - return mtime.UnixNano() / 1000000 -} diff --git a/vendor/github.com/gogf/gf/v2/os/gfpool/gfpool.go b/vendor/github.com/gogf/gf/v2/os/gfpool/gfpool.go deleted file mode 100644 index 5eec01b2..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gfpool/gfpool.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package gfpool provides io-reusable pool for file pointer. -package gfpool - -import ( - "os" - "time" - - "github.com/gogf/gf/v2/container/gmap" - "github.com/gogf/gf/v2/container/gpool" - "github.com/gogf/gf/v2/container/gtype" -) - -// Pool pointer pool. -type Pool struct { - id *gtype.Int // Pool id, which is used to mark this pool whether recreated. - pool *gpool.Pool // Underlying pool. - init *gtype.Bool // Whether initialized, used for marking this file added to fsnotify, and it can only be added just once. - ttl time.Duration // Time to live for file pointer items. -} - -// File is an item in the pool. -type File struct { - *os.File // Underlying file pointer. - stat os.FileInfo // State of current file pointer. - pid int // Belonging pool id, which is set when file pointer created. It's used to check whether the pool is recreated. - pool *Pool // Belonging ool. - flag int // Flash for opening file. - perm os.FileMode // Permission for opening file. - path string // Absolute path of the file. -} - -var ( - // Global file pointer pool. - pools = gmap.NewStrAnyMap(true) -) diff --git a/vendor/github.com/gogf/gf/v2/os/gfpool/gfpool_file.go b/vendor/github.com/gogf/gf/v2/os/gfpool/gfpool_file.go deleted file mode 100644 index 052866d6..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gfpool/gfpool_file.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gfpool - -import ( - "fmt" - "os" - "time" - - "github.com/gogf/gf/v2/errors/gerror" -) - -// Open creates and returns a file item with given file path, flag and opening permission. -// It automatically creates an associated file pointer pool internally when it's called first time. -// It retrieves a file item from the file pointer pool after then. -func Open(path string, flag int, perm os.FileMode, ttl ...time.Duration) (file *File, err error) { - var fpTTL time.Duration - if len(ttl) > 0 { - fpTTL = ttl[0] - } - // DO NOT search the path here wasting performance! - // Leave following codes just for warning you. - // - // path, err = gfile.Search(path) - // if err != nil { - // return nil, err - // } - pool := pools.GetOrSetFuncLock( - fmt.Sprintf("%s&%d&%d&%d", path, flag, fpTTL, perm), - func() interface{} { - return New(path, flag, perm, fpTTL) - }, - ).(*Pool) - - return pool.File() -} - -// Get returns a file item with given file path, flag and opening permission. -// It retrieves a file item from the file pointer pool after then. -func Get(path string, flag int, perm os.FileMode, ttl ...time.Duration) (file *File) { - var fpTTL time.Duration - if len(ttl) > 0 { - fpTTL = ttl[0] - } - - f, found := pools.Search(fmt.Sprintf("%s&%d&%d&%d", path, flag, fpTTL, perm)) - if !found { - return nil - } - - fp, _ := f.(*Pool).pool.Get() - return fp.(*File) -} - -// Stat returns the FileInfo structure describing file. -func (f *File) Stat() (os.FileInfo, error) { - if f.stat == nil { - return nil, gerror.New("file stat is empty") - } - return f.stat, nil -} - -// Close puts the file pointer back to the file pointer pool. -func (f *File) Close(close ...bool) error { - if len(close) > 0 && close[0] { - f.File.Close() - } - - if f.pid == f.pool.id.Val() { - return f.pool.pool.Put(f) - } - return nil -} diff --git a/vendor/github.com/gogf/gf/v2/os/gfpool/gfpool_pool.go b/vendor/github.com/gogf/gf/v2/os/gfpool/gfpool_pool.go deleted file mode 100644 index c8d9942a..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gfpool/gfpool_pool.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gfpool - -import ( - "os" - "time" - - "github.com/gogf/gf/v2/container/gpool" - "github.com/gogf/gf/v2/container/gtype" - "github.com/gogf/gf/v2/errors/gerror" - "github.com/gogf/gf/v2/os/gfsnotify" -) - -// New creates and returns a file pointer pool with given file path, flag and opening permission. -// -// Note the expiration logic: -// ttl = 0 : not expired; -// ttl < 0 : immediate expired after use; -// ttl > 0 : timeout expired; -// It is not expired in default. -func New(path string, flag int, perm os.FileMode, ttl ...time.Duration) *Pool { - var fpTTL time.Duration - if len(ttl) > 0 { - fpTTL = ttl[0] - } - p := &Pool{ - id: gtype.NewInt(), - ttl: fpTTL, - init: gtype.NewBool(), - } - p.pool = newFilePool(p, path, flag, perm, fpTTL) - return p -} - -// newFilePool creates and returns a file pointer pool with given file path, flag and opening permission. -func newFilePool(p *Pool, path string, flag int, perm os.FileMode, ttl time.Duration) *gpool.Pool { - pool := gpool.New(ttl, func() (interface{}, error) { - file, err := os.OpenFile(path, flag, perm) - if err != nil { - err = gerror.Wrapf(err, `os.OpenFile failed for file "%s", flag "%d", perm "%s"`, path, flag, perm) - return nil, err - } - return &File{ - File: file, - pid: p.id.Val(), - pool: p, - flag: flag, - perm: perm, - path: path, - }, nil - }, func(i interface{}) { - _ = i.(*File).File.Close() - }) - return pool -} - -// File retrieves file item from the file pointer pool and returns it. It creates one if -// the file pointer pool is empty. -// Note that it should be closed when it will never be used. When it's closed, it is not -// really closed the underlying file pointer but put back to the file pointer pool. -func (p *Pool) File() (*File, error) { - if v, err := p.pool.Get(); err != nil { - return nil, err - } else { - f := v.(*File) - f.stat, err = os.Stat(f.path) - if f.flag&os.O_CREATE > 0 { - if os.IsNotExist(err) { - if f.File, err = os.OpenFile(f.path, f.flag, f.perm); err != nil { - return nil, err - } else { - // Retrieve the state of the new created file. - if f.stat, err = f.File.Stat(); err != nil { - return nil, err - } - } - } - } - if f.flag&os.O_TRUNC > 0 { - if f.stat.Size() > 0 { - if err = f.Truncate(0); err != nil { - return nil, err - } - } - } - if f.flag&os.O_APPEND > 0 { - if _, err = f.Seek(0, 2); err != nil { - return nil, err - } - } else { - if _, err = f.Seek(0, 0); err != nil { - return nil, err - } - } - // It firstly checks using !p.init.Val() for performance purpose. - if !p.init.Val() && p.init.Cas(false, true) { - _, _ = gfsnotify.Add(f.path, func(event *gfsnotify.Event) { - // If the file is removed or renamed, recreates the pool by increasing the pool id. - if event.IsRemove() || event.IsRename() { - // It drops the old pool. - p.id.Add(1) - // Clears the pool items staying in the pool. - p.pool.Clear() - // It uses another adding to drop the file items between the two adding. - // Whenever the pool id changes, the pool will be recreated. - p.id.Add(1) - } - }, false) - } - return f, nil - } -} - -// Close closes current file pointer pool. -func (p *Pool) Close() { - p.pool.Close() -} diff --git a/vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify.go b/vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify.go deleted file mode 100644 index be58e2c3..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package gfsnotify provides a platform-independent interface for file system notifications. -package gfsnotify - -import ( - "context" - "sync" - "time" - - "github.com/fsnotify/fsnotify" - - "github.com/gogf/gf/v2/container/glist" - "github.com/gogf/gf/v2/container/gmap" - "github.com/gogf/gf/v2/container/gqueue" - "github.com/gogf/gf/v2/container/gset" - "github.com/gogf/gf/v2/container/gtype" - "github.com/gogf/gf/v2/errors/gcode" - "github.com/gogf/gf/v2/errors/gerror" - "github.com/gogf/gf/v2/internal/intlog" - "github.com/gogf/gf/v2/os/gcache" -) - -// Watcher is the monitor for file changes. -type Watcher struct { - watcher *fsnotify.Watcher // Underlying fsnotify object. - events *gqueue.Queue // Used for internal event management. - cache *gcache.Cache // Used for repeated event filter. - nameSet *gset.StrSet // Used for AddOnce feature. - callbacks *gmap.StrAnyMap // Path(file/folder) to callbacks mapping. - closeChan chan struct{} // Used for watcher closing notification. -} - -// Callback is the callback function for Watcher. -type Callback struct { - Id int // Unique id for callback object. - Func func(event *Event) // Callback function. - Path string // Bound file path (absolute). - name string // Registered name for AddOnce. - elem *glist.Element // Element in the callbacks of watcher. - recursive bool // Is bound to path recursively or not. -} - -// Event is the event produced by underlying fsnotify. -type Event struct { - event fsnotify.Event // Underlying event. - Path string // Absolute file path. - Op Op // File operation. - Watcher *Watcher // Parent watcher. -} - -// Op is the bits union for file operations. -type Op uint32 - -// internalPanic is the custom panic for internal usage. -type internalPanic string - -const ( - CREATE Op = 1 << iota - WRITE - REMOVE - RENAME - CHMOD -) - -const ( - repeatEventFilterDuration = time.Millisecond // Duration for repeated event filter. - callbackExitEventPanicStr internalPanic = "exit" // Custom exit event for internal usage. -) - -var ( - mu sync.Mutex // Mutex for concurrent safety of defaultWatcher. - defaultWatcher *Watcher // Default watcher. - callbackIdMap = gmap.NewIntAnyMap(true) // Id to callback mapping. - callbackIdGenerator = gtype.NewInt() // Atomic id generator for callback. -) - -// New creates and returns a new watcher. -// Note that the watcher number is limited by the file handle setting of the system. -// Eg: fs.inotify.max_user_instances system variable in linux systems. -func New() (*Watcher, error) { - w := &Watcher{ - cache: gcache.New(), - events: gqueue.New(), - nameSet: gset.NewStrSet(true), - closeChan: make(chan struct{}), - callbacks: gmap.NewStrAnyMap(true), - } - if watcher, err := fsnotify.NewWatcher(); err == nil { - w.watcher = watcher - } else { - intlog.Printf(context.TODO(), "New watcher failed: %v", err) - return nil, err - } - w.watchLoop() - w.eventLoop() - return w, nil -} - -// Add monitors `path` using default watcher with callback function `callbackFunc`. -// The optional parameter `recursive` specifies whether monitoring the `path` recursively, which is true in default. -func Add(path string, callbackFunc func(event *Event), recursive ...bool) (callback *Callback, err error) { - w, err := getDefaultWatcher() - if err != nil { - return nil, err - } - return w.Add(path, callbackFunc, recursive...) -} - -// AddOnce monitors `path` using default watcher with callback function `callbackFunc` only once using unique name `name`. -// If AddOnce is called multiple times with the same `name` parameter, `path` is only added to monitor once. It returns error -// if it's called twice with the same `name`. -// -// The optional parameter `recursive` specifies whether monitoring the `path` recursively, which is true in default. -func AddOnce(name, path string, callbackFunc func(event *Event), recursive ...bool) (callback *Callback, err error) { - w, err := getDefaultWatcher() - if err != nil { - return nil, err - } - return w.AddOnce(name, path, callbackFunc, recursive...) -} - -// Remove removes all monitoring callbacks of given `path` from watcher recursively. -func Remove(path string) error { - w, err := getDefaultWatcher() - if err != nil { - return err - } - return w.Remove(path) -} - -// RemoveCallback removes specified callback with given id from watcher. -func RemoveCallback(callbackId int) error { - w, err := getDefaultWatcher() - if err != nil { - return err - } - callback := (*Callback)(nil) - if r := callbackIdMap.Get(callbackId); r != nil { - callback = r.(*Callback) - } - if callback == nil { - return gerror.NewCodef(gcode.CodeInvalidParameter, `callback for id %d not found`, callbackId) - } - w.RemoveCallback(callbackId) - return nil -} - -// Exit is only used in the callback function, which can be used to remove current callback -// of itself from the watcher. -func Exit() { - panic(callbackExitEventPanicStr) -} - -// getDefaultWatcher creates and returns the default watcher. -// This is used for lazy initialization purpose. -func getDefaultWatcher() (*Watcher, error) { - mu.Lock() - defer mu.Unlock() - if defaultWatcher != nil { - return defaultWatcher, nil - } - var err error - defaultWatcher, err = New() - return defaultWatcher, err -} diff --git a/vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify_event.go b/vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify_event.go deleted file mode 100644 index f91638ca..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify_event.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// ThIs Source Code Form Is subject to the terms of the MIT License. -// If a copy of the MIT was not dIstributed with thIs file, -// You can obtain one at https://github.com/gogf/gf. - -package gfsnotify - -// String returns current event as string. -func (e *Event) String() string { - return e.event.String() -} - -// IsCreate checks whether current event contains file/folder create event. -func (e *Event) IsCreate() bool { - return e.Op == 1 || e.Op&CREATE == CREATE -} - -// IsWrite checks whether current event contains file/folder write event. -func (e *Event) IsWrite() bool { - return e.Op&WRITE == WRITE -} - -// IsRemove checks whether current event contains file/folder remove event. -func (e *Event) IsRemove() bool { - return e.Op&REMOVE == REMOVE -} - -// IsRename checks whether current event contains file/folder rename event. -func (e *Event) IsRename() bool { - return e.Op&RENAME == RENAME -} - -// IsChmod checks whether current event contains file/folder chmod event. -func (e *Event) IsChmod() bool { - return e.Op&CHMOD == CHMOD -} diff --git a/vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify_filefunc.go b/vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify_filefunc.go deleted file mode 100644 index c8f4c7b0..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify_filefunc.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// ThIs Source Code Form Is subject to the terms of the MIT License. -// If a copy of the MIT was not dIstributed with thIs file, -// You can obtain one at https://github.com/gogf/gf. - -package gfsnotify - -import ( - "fmt" - "os" - "path/filepath" - "sort" - "strings" - - "github.com/gogf/gf/v2/errors/gerror" -) - -// fileDir returns all but the last element of path, typically the path's directory. -// After dropping the final element, Dir calls Clean on the path and trailing -// slashes are removed. -// If the path is empty, Dir returns ".". -// If the path consists entirely of separators, Dir returns a single separator. -// The returned path does not end in a separator unless it is the root directory. -func fileDir(path string) string { - return filepath.Dir(path) -} - -// fileRealPath converts the given `path` to its absolute path -// and checks if the file path exists. -// If the file does not exist, return an empty string. -func fileRealPath(path string) string { - p, err := filepath.Abs(path) - if err != nil { - return "" - } - if !fileExists(p) { - return "" - } - return p -} - -// fileExists checks whether given `path` exist. -func fileExists(path string) bool { - if stat, err := os.Stat(path); stat != nil && !os.IsNotExist(err) { - return true - } - return false -} - -// fileIsDir checks whether given `path` a directory. -func fileIsDir(path string) bool { - s, err := os.Stat(path) - if err != nil { - return false - } - return s.IsDir() -} - -// fileAllDirs returns all sub-folders including itself of given `path` recursively. -func fileAllDirs(path string) (list []string) { - list = []string{path} - file, err := os.Open(path) - if err != nil { - return list - } - defer file.Close() - names, err := file.Readdirnames(-1) - if err != nil { - return list - } - for _, name := range names { - tempPath := fmt.Sprintf("%s%s%s", path, string(filepath.Separator), name) - if fileIsDir(tempPath) { - if array := fileAllDirs(tempPath); len(array) > 0 { - list = append(list, array...) - } - } - } - return -} - -// fileScanDir returns all sub-files with absolute paths of given `path`, -// It scans directory recursively if given parameter `recursive` is true. -func fileScanDir(path string, pattern string, recursive ...bool) ([]string, error) { - list, err := doFileScanDir(path, pattern, recursive...) - if err != nil { - return nil, err - } - if len(list) > 0 { - sort.Strings(list) - } - return list, nil -} - -// doFileScanDir is an internal method which scans directory -// and returns the absolute path list of files that are not sorted. -// -// The pattern parameter `pattern` supports multiple file name patterns, -// using the ',' symbol to separate multiple patterns. -// -// It scans directory recursively if given parameter `recursive` is true. -func doFileScanDir(path string, pattern string, recursive ...bool) ([]string, error) { - var ( - list []string - file, err = os.Open(path) - ) - if err != nil { - err = gerror.Wrapf(err, `os.Open failed for path "%s"`, path) - return nil, err - } - defer file.Close() - names, err := file.Readdirnames(-1) - if err != nil { - err = gerror.Wrapf(err, `read directory files failed for path "%s"`, path) - return nil, err - } - filePath := "" - for _, name := range names { - filePath = fmt.Sprintf("%s%s%s", path, string(filepath.Separator), name) - if fileIsDir(filePath) && len(recursive) > 0 && recursive[0] { - array, _ := doFileScanDir(filePath, pattern, true) - if len(array) > 0 { - list = append(list, array...) - } - } - for _, p := range strings.Split(pattern, ",") { - if match, _ := filepath.Match(strings.TrimSpace(p), name); match { - list = append(list, filePath) - } - } - } - return list, nil -} diff --git a/vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify_watcher.go b/vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify_watcher.go deleted file mode 100644 index 80da7638..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify_watcher.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gfsnotify - -import ( - "context" - - "github.com/gogf/gf/v2/container/glist" - "github.com/gogf/gf/v2/errors/gcode" - "github.com/gogf/gf/v2/errors/gerror" - "github.com/gogf/gf/v2/internal/intlog" -) - -// Add monitors `path` with callback function `callbackFunc` to the watcher. -// The optional parameter `recursive` specifies whether monitoring the `path` recursively, -// which is true in default. -func (w *Watcher) Add(path string, callbackFunc func(event *Event), recursive ...bool) (callback *Callback, err error) { - return w.AddOnce("", path, callbackFunc, recursive...) -} - -// AddOnce monitors `path` with callback function `callbackFunc` only once using unique name -// `name` to the watcher. If AddOnce is called multiple times with the same `name` parameter, -// `path` is only added to monitor once. -// -// It returns error if it's called twice with the same `name`. -// -// The optional parameter `recursive` specifies whether monitoring the `path` recursively, -// which is true in default. -func (w *Watcher) AddOnce(name, path string, callbackFunc func(event *Event), recursive ...bool) (callback *Callback, err error) { - w.nameSet.AddIfNotExistFuncLock(name, func() bool { - // Firstly add the path to watcher. - callback, err = w.addWithCallbackFunc(name, path, callbackFunc, recursive...) - if err != nil { - return false - } - // If it's recursive adding, it then adds all sub-folders to the monitor. - // NOTE: - // 1. It only recursively adds **folders** to the monitor, NOT files, - // because if the folders are monitored and their sub-files are also monitored. - // 2. It bounds no callbacks to the folders, because it will search the callbacks - // from its parent recursively if any event produced. - if fileIsDir(path) && (len(recursive) == 0 || recursive[0]) { - for _, subPath := range fileAllDirs(path) { - if fileIsDir(subPath) { - if err = w.watcher.Add(subPath); err != nil { - err = gerror.Wrapf(err, `add watch failed for path "%s"`, subPath) - } else { - intlog.Printf(context.TODO(), "watcher adds monitor for: %s", subPath) - } - } - } - } - if name == "" { - return false - } - return true - }) - return -} - -// addWithCallbackFunc adds the path to underlying monitor, creates and returns a callback object. -// Very note that if it calls multiple times with the same `path`, the latest one will overwrite the previous one. -func (w *Watcher) addWithCallbackFunc(name, path string, callbackFunc func(event *Event), recursive ...bool) (callback *Callback, err error) { - // Check and convert the given path to absolute path. - if t := fileRealPath(path); t == "" { - return nil, gerror.NewCodef(gcode.CodeInvalidParameter, `"%s" does not exist`, path) - } else { - path = t - } - // Create callback object. - callback = &Callback{ - Id: callbackIdGenerator.Add(1), - Func: callbackFunc, - Path: path, - name: name, - recursive: true, - } - if len(recursive) > 0 { - callback.recursive = recursive[0] - } - // Register the callback to watcher. - w.callbacks.LockFunc(func(m map[string]interface{}) { - list := (*glist.List)(nil) - if v, ok := m[path]; !ok { - list = glist.New(true) - m[path] = list - } else { - list = v.(*glist.List) - } - callback.elem = list.PushBack(callback) - }) - // Add the path to underlying monitor. - if err = w.watcher.Add(path); err != nil { - err = gerror.Wrapf(err, `add watch failed for path "%s"`, path) - } else { - intlog.Printf(context.TODO(), "watcher adds monitor for: %s", path) - } - // Add the callback to global callback map. - callbackIdMap.Set(callback.Id, callback) - return -} - -// Close closes the watcher. -func (w *Watcher) Close() { - w.events.Close() - if err := w.watcher.Close(); err != nil { - intlog.Errorf(context.TODO(), `%+v`, err) - } - close(w.closeChan) -} - -// Remove removes monitor and all callbacks associated with the `path` recursively. -func (w *Watcher) Remove(path string) error { - // Firstly remove the callbacks of the path. - if value := w.callbacks.Remove(path); value != nil { - list := value.(*glist.List) - for { - if item := list.PopFront(); item != nil { - callbackIdMap.Remove(item.(*Callback).Id) - } else { - break - } - } - } - // Secondly remove monitor of all sub-files which have no callbacks. - if subPaths, err := fileScanDir(path, "*", true); err == nil && len(subPaths) > 0 { - for _, subPath := range subPaths { - if w.checkPathCanBeRemoved(subPath) { - if internalErr := w.watcher.Remove(subPath); internalErr != nil { - intlog.Errorf(context.TODO(), `%+v`, internalErr) - } - } - } - } - // Lastly remove the monitor of the path from underlying monitor. - err := w.watcher.Remove(path) - if err != nil { - err = gerror.Wrapf(err, `remove watch failed for path "%s"`, path) - } - return err -} - -// checkPathCanBeRemoved checks whether the given path have no callbacks bound. -func (w *Watcher) checkPathCanBeRemoved(path string) bool { - // Firstly check the callbacks in the watcher directly. - if v := w.callbacks.Get(path); v != nil { - return false - } - // Secondly check its parent whether has callbacks. - dirPath := fileDir(path) - if v := w.callbacks.Get(dirPath); v != nil { - for _, c := range v.(*glist.List).FrontAll() { - if c.(*Callback).recursive { - return false - } - } - return false - } - // Recursively check its parent. - parentDirPath := "" - for { - parentDirPath = fileDir(dirPath) - if parentDirPath == dirPath { - break - } - if v := w.callbacks.Get(parentDirPath); v != nil { - for _, c := range v.(*glist.List).FrontAll() { - if c.(*Callback).recursive { - return false - } - } - return false - } - dirPath = parentDirPath - } - return true -} - -// RemoveCallback removes callback with given callback id from watcher. -func (w *Watcher) RemoveCallback(callbackId int) { - callback := (*Callback)(nil) - if r := callbackIdMap.Get(callbackId); r != nil { - callback = r.(*Callback) - } - if callback != nil { - if r := w.callbacks.Get(callback.Path); r != nil { - r.(*glist.List).Remove(callback.elem) - } - callbackIdMap.Remove(callbackId) - if callback.name != "" { - w.nameSet.Remove(callback.name) - } - } -} diff --git a/vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify_watcher_loop.go b/vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify_watcher_loop.go deleted file mode 100644 index cfd340c5..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify_watcher_loop.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gfsnotify - -import ( - "context" - "github.com/gogf/gf/v2/errors/gcode" - "github.com/gogf/gf/v2/errors/gerror" - - "github.com/gogf/gf/v2/container/glist" - "github.com/gogf/gf/v2/internal/intlog" -) - -// watchLoop starts the loop for event listening from underlying inotify monitor. -func (w *Watcher) watchLoop() { - go func() { - for { - select { - // Close event. - case <-w.closeChan: - return - - // Event listening. - case ev := <-w.watcher.Events: - // Filter the repeated event in custom duration. - _, err := w.cache.SetIfNotExist( - context.Background(), - ev.String(), - func(ctx context.Context) (value interface{}, err error) { - w.events.Push(&Event{ - event: ev, - Path: ev.Name, - Op: Op(ev.Op), - Watcher: w, - }) - return struct{}{}, nil - }, repeatEventFilterDuration, - ) - if err != nil { - intlog.Errorf(context.TODO(), `%+v`, err) - } - - case err := <-w.watcher.Errors: - intlog.Errorf(context.TODO(), `%+v`, err) - } - } - }() -} - -// eventLoop is the core event handler. -func (w *Watcher) eventLoop() { - go func() { - for { - if v := w.events.Pop(); v != nil { - event := v.(*Event) - // If there's no any callback of this path, it removes it from monitor. - callbacks := w.getCallbacks(event.Path) - if len(callbacks) == 0 { - _ = w.watcher.Remove(event.Path) - continue - } - switch { - case event.IsRemove(): - // It should check again the existence of the path. - // It adds it back to the monitor if it still exists. - if fileExists(event.Path) { - // It adds the path back to monitor. - // We need no worry about the repeat adding. - if err := w.watcher.Add(event.Path); err != nil { - intlog.Errorf(context.TODO(), `%+v`, err) - } else { - intlog.Printf(context.TODO(), "fake remove event, watcher re-adds monitor for: %s", event.Path) - } - // Change the event to RENAME, which means it renames itself to its origin name. - event.Op = RENAME - } - - case event.IsRename(): - // It should check again the existence of the path. - // It adds it back to the monitor if it still exists. - // Especially Some editors might do RENAME and then CHMOD when it's editing file. - if fileExists(event.Path) { - // It might lost the monitoring for the path, so we add the path back to monitor. - // We need no worry about the repeat adding. - if err := w.watcher.Add(event.Path); err != nil { - intlog.Errorf(context.TODO(), `%+v`, err) - } else { - intlog.Printf(context.TODO(), "fake rename event, watcher re-adds monitor for: %s", event.Path) - } - // Change the event to CHMOD. - event.Op = CHMOD - } - - case event.IsCreate(): - // ========================================= - // Note that it here just adds the path to monitor without any callback registering, - // because its parent already has the callbacks. - // ========================================= - if fileIsDir(event.Path) { - // If it's a folder, it then does adding recursively to monitor. - for _, subPath := range fileAllDirs(event.Path) { - if fileIsDir(subPath) { - if err := w.watcher.Add(subPath); err != nil { - intlog.Errorf(context.TODO(), `%+v`, err) - } else { - intlog.Printf(context.TODO(), "folder creation event, watcher adds monitor for: %s", subPath) - } - } - } - } else { - // If it's a file, it directly adds it to monitor. - if err := w.watcher.Add(event.Path); err != nil { - intlog.Errorf(context.TODO(), `%+v`, err) - } else { - intlog.Printf(context.TODO(), "file creation event, watcher adds monitor for: %s", event.Path) - } - } - } - // Calling the callbacks in order. - for _, callback := range callbacks { - go func(callback *Callback) { - defer func() { - if err := recover(); err != nil { - switch err { - case callbackExitEventPanicStr: - w.RemoveCallback(callback.Id) - default: - if e, ok := err.(error); ok { - panic(gerror.WrapCode(gcode.CodeInternalPanic, e)) - } - panic(err) - } - } - }() - callback.Func(event) - }(callback) - } - } else { - break - } - } - }() -} - -// getCallbacks searches and returns all callbacks with given `path`. -// It also searches its parents for callbacks if they're recursive. -func (w *Watcher) getCallbacks(path string) (callbacks []*Callback) { - // Firstly add the callbacks of itself. - if v := w.callbacks.Get(path); v != nil { - for _, v := range v.(*glist.List).FrontAll() { - callback := v.(*Callback) - callbacks = append(callbacks, callback) - } - } - // Secondly searches its direct parent for callbacks. - // It is special handling here, which is the different between `recursive` and `not recursive` logic - // for direct parent folder of `path` that events are from. - dirPath := fileDir(path) - if v := w.callbacks.Get(dirPath); v != nil { - for _, v := range v.(*glist.List).FrontAll() { - callback := v.(*Callback) - callbacks = append(callbacks, callback) - } - } - // Lastly searches all the parents of directory of `path` recursively for callbacks. - for { - parentDirPath := fileDir(dirPath) - if parentDirPath == dirPath { - break - } - if v := w.callbacks.Get(parentDirPath); v != nil { - for _, v := range v.(*glist.List).FrontAll() { - callback := v.(*Callback) - if callback.recursive { - callbacks = append(callbacks, callback) - } - } - } - dirPath = parentDirPath - } - return -} diff --git a/vendor/github.com/gogf/gf/v2/os/glog/glog.go b/vendor/github.com/gogf/gf/v2/os/glog/glog.go deleted file mode 100644 index 9a5a0dd6..00000000 --- a/vendor/github.com/gogf/gf/v2/os/glog/glog.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package glog implements powerful and easy-to-use leveled logging functionality. -package glog - -import ( - "context" - - "github.com/gogf/gf/v2/internal/command" - "github.com/gogf/gf/v2/os/grpool" - "github.com/gogf/gf/v2/util/gconv" -) - -// ILogger is the API interface for logger. -type ILogger interface { - Print(ctx context.Context, v ...interface{}) - Printf(ctx context.Context, format string, v ...interface{}) - Debug(ctx context.Context, v ...interface{}) - Debugf(ctx context.Context, format string, v ...interface{}) - Info(ctx context.Context, v ...interface{}) - Infof(ctx context.Context, format string, v ...interface{}) - Notice(ctx context.Context, v ...interface{}) - Noticef(ctx context.Context, format string, v ...interface{}) - Warning(ctx context.Context, v ...interface{}) - Warningf(ctx context.Context, format string, v ...interface{}) - Error(ctx context.Context, v ...interface{}) - Errorf(ctx context.Context, format string, v ...interface{}) - Critical(ctx context.Context, v ...interface{}) - Criticalf(ctx context.Context, format string, v ...interface{}) - Panic(ctx context.Context, v ...interface{}) - Panicf(ctx context.Context, format string, v ...interface{}) - Fatal(ctx context.Context, v ...interface{}) - Fatalf(ctx context.Context, format string, v ...interface{}) -} - -const ( - commandEnvKeyForDebug = "gf.glog.debug" -) - -var ( - // Ensure Logger implements ILogger. - _ ILogger = &Logger{} - - // Default logger object, for package method usage. - defaultLogger = New() - - // Goroutine pool for async logging output. - // It uses only one asynchronous worker to ensure log sequence. - asyncPool = grpool.New(1) - - // defaultDebug enables debug level or not in default, - // which can be configured using command option or system environment. - defaultDebug = true -) - -func init() { - defaultDebug = gconv.Bool(command.GetOptWithEnv(commandEnvKeyForDebug, "true")) - SetDebug(defaultDebug) -} - -// DefaultLogger returns the default logger. -func DefaultLogger() *Logger { - return defaultLogger -} - -// SetDefaultLogger sets the default logger for package glog. -// Note that there might be concurrent safety issue if calls this function -// in different goroutines. -func SetDefaultLogger(l *Logger) { - defaultLogger = l -} diff --git a/vendor/github.com/gogf/gf/v2/os/glog/glog_api.go b/vendor/github.com/gogf/gf/v2/os/glog/glog_api.go deleted file mode 100644 index 3529973e..00000000 --- a/vendor/github.com/gogf/gf/v2/os/glog/glog_api.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package glog - -import "context" - -// Print prints `v` with newline using fmt.Sprintln. -// The parameter `v` can be multiple variables. -func Print(ctx context.Context, v ...interface{}) { - defaultLogger.Print(ctx, v...) -} - -// Printf prints `v` with format `format` using fmt.Sprintf. -// The parameter `v` can be multiple variables. -func Printf(ctx context.Context, format string, v ...interface{}) { - defaultLogger.Printf(ctx, format, v...) -} - -// Fatal prints the logging content with [FATA] header and newline, then exit the current process. -func Fatal(ctx context.Context, v ...interface{}) { - defaultLogger.Fatal(ctx, v...) -} - -// Fatalf prints the logging content with [FATA] header, custom format and newline, then exit the current process. -func Fatalf(ctx context.Context, format string, v ...interface{}) { - defaultLogger.Fatalf(ctx, format, v...) -} - -// Panic prints the logging content with [PANI] header and newline, then panics. -func Panic(ctx context.Context, v ...interface{}) { - defaultLogger.Panic(ctx, v...) -} - -// Panicf prints the logging content with [PANI] header, custom format and newline, then panics. -func Panicf(ctx context.Context, format string, v ...interface{}) { - defaultLogger.Panicf(ctx, format, v...) -} - -// Info prints the logging content with [INFO] header and newline. -func Info(ctx context.Context, v ...interface{}) { - defaultLogger.Info(ctx, v...) -} - -// Infof prints the logging content with [INFO] header, custom format and newline. -func Infof(ctx context.Context, format string, v ...interface{}) { - defaultLogger.Infof(ctx, format, v...) -} - -// Debug prints the logging content with [DEBU] header and newline. -func Debug(ctx context.Context, v ...interface{}) { - defaultLogger.Debug(ctx, v...) -} - -// Debugf prints the logging content with [DEBU] header, custom format and newline. -func Debugf(ctx context.Context, format string, v ...interface{}) { - defaultLogger.Debugf(ctx, format, v...) -} - -// Notice prints the logging content with [NOTI] header and newline. -// It also prints caller stack info if stack feature is enabled. -func Notice(ctx context.Context, v ...interface{}) { - defaultLogger.Notice(ctx, v...) -} - -// Noticef prints the logging content with [NOTI] header, custom format and newline. -// It also prints caller stack info if stack feature is enabled. -func Noticef(ctx context.Context, format string, v ...interface{}) { - defaultLogger.Noticef(ctx, format, v...) -} - -// Warning prints the logging content with [WARN] header and newline. -// It also prints caller stack info if stack feature is enabled. -func Warning(ctx context.Context, v ...interface{}) { - defaultLogger.Warning(ctx, v...) -} - -// Warningf prints the logging content with [WARN] header, custom format and newline. -// It also prints caller stack info if stack feature is enabled. -func Warningf(ctx context.Context, format string, v ...interface{}) { - defaultLogger.Warningf(ctx, format, v...) -} - -// Error prints the logging content with [ERRO] header and newline. -// It also prints caller stack info if stack feature is enabled. -func Error(ctx context.Context, v ...interface{}) { - defaultLogger.Error(ctx, v...) -} - -// Errorf prints the logging content with [ERRO] header, custom format and newline. -// It also prints caller stack info if stack feature is enabled. -func Errorf(ctx context.Context, format string, v ...interface{}) { - defaultLogger.Errorf(ctx, format, v...) -} - -// Critical prints the logging content with [CRIT] header and newline. -// It also prints caller stack info if stack feature is enabled. -func Critical(ctx context.Context, v ...interface{}) { - defaultLogger.Critical(ctx, v...) -} - -// Criticalf prints the logging content with [CRIT] header, custom format and newline. -// It also prints caller stack info if stack feature is enabled. -func Criticalf(ctx context.Context, format string, v ...interface{}) { - defaultLogger.Criticalf(ctx, format, v...) -} diff --git a/vendor/github.com/gogf/gf/v2/os/glog/glog_chaining.go b/vendor/github.com/gogf/gf/v2/os/glog/glog_chaining.go deleted file mode 100644 index 391def3e..00000000 --- a/vendor/github.com/gogf/gf/v2/os/glog/glog_chaining.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package glog - -import ( - "io" -) - -// Expose returns the default logger of package glog. -func Expose() *Logger { - return defaultLogger -} - -// To is a chaining function, -// which redirects current logging content output to the sepecified `writer`. -func To(writer io.Writer) *Logger { - return defaultLogger.To(writer) -} - -// Path is a chaining function, -// which sets the directory path to `path` for current logging content output. -func Path(path string) *Logger { - return defaultLogger.Path(path) -} - -// Cat is a chaining function, -// which sets the category to `category` for current logging content output. -func Cat(category string) *Logger { - return defaultLogger.Cat(category) -} - -// File is a chaining function, -// which sets file name `pattern` for the current logging content output. -func File(pattern string) *Logger { - return defaultLogger.File(pattern) -} - -// Level is a chaining function, -// which sets logging level for the current logging content output. -func Level(level int) *Logger { - return defaultLogger.Level(level) -} - -// LevelStr is a chaining function, -// which sets logging level for the current logging content output using level string. -func LevelStr(levelStr string) *Logger { - return defaultLogger.LevelStr(levelStr) -} - -// Skip is a chaining function, -// which sets stack skip for the current logging content output. -// It also affects the caller file path checks when line number printing enabled. -func Skip(skip int) *Logger { - return defaultLogger.Skip(skip) -} - -// Stack is a chaining function, -// which sets stack options for the current logging content output . -func Stack(enabled bool, skip ...int) *Logger { - return defaultLogger.Stack(enabled, skip...) -} - -// StackWithFilter is a chaining function, -// which sets stack filter for the current logging content output . -func StackWithFilter(filter string) *Logger { - return defaultLogger.StackWithFilter(filter) -} - -// Stdout is a chaining function, -// which enables/disables stdout for the current logging content output. -// It's enabled in default. -func Stdout(enabled ...bool) *Logger { - return defaultLogger.Stdout(enabled...) -} - -// Header is a chaining function, -// which enables/disables log header for the current logging content output. -// It's enabled in default. -func Header(enabled ...bool) *Logger { - return defaultLogger.Header(enabled...) -} - -// Line is a chaining function, -// which enables/disables printing its caller file along with its line number. -// The parameter `long` specified whether print the long absolute file path, eg: /a/b/c/d.go:23. -func Line(long ...bool) *Logger { - return defaultLogger.Line(long...) -} - -// Async is a chaining function, -// which enables/disables async logging output feature. -func Async(enabled ...bool) *Logger { - return defaultLogger.Async(enabled...) -} diff --git a/vendor/github.com/gogf/gf/v2/os/glog/glog_config.go b/vendor/github.com/gogf/gf/v2/os/glog/glog_config.go deleted file mode 100644 index 615caa68..00000000 --- a/vendor/github.com/gogf/gf/v2/os/glog/glog_config.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package glog - -import ( - "context" - "io" -) - -// SetConfig set configurations for the defaultLogger. -func SetConfig(config Config) error { - return defaultLogger.SetConfig(config) -} - -// SetConfigWithMap set configurations with map for the defaultLogger. -func SetConfigWithMap(m map[string]interface{}) error { - return defaultLogger.SetConfigWithMap(m) -} - -// SetPath sets the directory path for file logging. -func SetPath(path string) error { - return defaultLogger.SetPath(path) -} - -// GetPath returns the logging directory path for file logging. -// It returns empty string if no directory path set. -func GetPath() string { - return defaultLogger.GetPath() -} - -// SetFile sets the file name `pattern` for file logging. -// Datetime pattern can be used in `pattern`, eg: access-{Ymd}.log. -// The default file name pattern is: Y-m-d.log, eg: 2018-01-01.log -func SetFile(pattern string) { - defaultLogger.SetFile(pattern) -} - -// SetLevel sets the default logging level. -func SetLevel(level int) { - defaultLogger.SetLevel(level) -} - -// GetLevel returns the default logging level value. -func GetLevel() int { - return defaultLogger.GetLevel() -} - -// SetWriter sets the customized logging `writer` for logging. -// The `writer` object should implements the io.Writer interface. -// Developer can use customized logging `writer` to redirect logging output to another service, -// eg: kafka, mysql, mongodb, etc. -func SetWriter(writer io.Writer) { - defaultLogger.SetWriter(writer) -} - -// GetWriter returns the customized writer object, which implements the io.Writer interface. -// It returns nil if no customized writer set. -func GetWriter() io.Writer { - return defaultLogger.GetWriter() -} - -// SetDebug enables/disables the debug level for default defaultLogger. -// The debug level is enabled in default. -func SetDebug(debug bool) { - defaultLogger.SetDebug(debug) -} - -// SetAsync enables/disables async logging output feature for default defaultLogger. -func SetAsync(enabled bool) { - defaultLogger.SetAsync(enabled) -} - -// SetStdoutPrint sets whether ouptput the logging contents to stdout, which is true in default. -func SetStdoutPrint(enabled bool) { - defaultLogger.SetStdoutPrint(enabled) -} - -// SetHeaderPrint sets whether output header of the logging contents, which is true in default. -func SetHeaderPrint(enabled bool) { - defaultLogger.SetHeaderPrint(enabled) -} - -// SetPrefix sets prefix string for every logging content. -// Prefix is part of header, which means if header output is shut, no prefix will be output. -func SetPrefix(prefix string) { - defaultLogger.SetPrefix(prefix) -} - -// SetFlags sets extra flags for logging output features. -func SetFlags(flags int) { - defaultLogger.SetFlags(flags) -} - -// GetFlags returns the flags of defaultLogger. -func GetFlags() int { - return defaultLogger.GetFlags() -} - -// SetCtxKeys sets the context keys for defaultLogger. The keys is used for retrieving values -// from context and printing them to logging content. -// -// Note that multiple calls of this function will overwrite the previous set context keys. -func SetCtxKeys(keys ...interface{}) { - defaultLogger.SetCtxKeys(keys...) -} - -// GetCtxKeys retrieves and returns the context keys for logging. -func GetCtxKeys() []interface{} { - return defaultLogger.GetCtxKeys() -} - -// PrintStack prints the caller stack, -// the optional parameter `skip` specify the skipped stack offset from the end point. -func PrintStack(ctx context.Context, skip ...int) { - defaultLogger.PrintStack(ctx, skip...) -} - -// GetStack returns the caller stack content, -// the optional parameter `skip` specify the skipped stack offset from the end point. -func GetStack(skip ...int) string { - return defaultLogger.GetStack(skip...) -} - -// SetStack enables/disables the stack feature in failure logging outputs. -func SetStack(enabled bool) { - defaultLogger.SetStack(enabled) -} - -// SetLevelStr sets the logging level by level string. -func SetLevelStr(levelStr string) error { - return defaultLogger.SetLevelStr(levelStr) -} - -// SetLevelPrefix sets the prefix string for specified level. -func SetLevelPrefix(level int, prefix string) { - defaultLogger.SetLevelPrefix(level, prefix) -} - -// SetLevelPrefixes sets the level to prefix string mapping for the defaultLogger. -func SetLevelPrefixes(prefixes map[int]string) { - defaultLogger.SetLevelPrefixes(prefixes) -} - -// GetLevelPrefix returns the prefix string for specified level. -func GetLevelPrefix(level int) string { - return defaultLogger.GetLevelPrefix(level) -} - -// SetHandlers sets the logging handlers for default defaultLogger. -func SetHandlers(handlers ...Handler) { - defaultLogger.SetHandlers(handlers...) -} - -// SetWriterColorEnable sets the file logging with color -func SetWriterColorEnable(enabled bool) { - defaultLogger.SetWriterColorEnable(enabled) -} diff --git a/vendor/github.com/gogf/gf/v2/os/glog/glog_instance.go b/vendor/github.com/gogf/gf/v2/os/glog/glog_instance.go deleted file mode 100644 index 3b732606..00000000 --- a/vendor/github.com/gogf/gf/v2/os/glog/glog_instance.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package glog - -import "github.com/gogf/gf/v2/container/gmap" - -const ( - // DefaultName is the default group name for instance usage. - DefaultName = "default" -) - -var ( - // Instances map. - instances = gmap.NewStrAnyMap(true) -) - -// Instance returns an instance of Logger with default settings. -// The parameter `name` is the name for the instance. -func Instance(name ...string) *Logger { - key := DefaultName - if len(name) > 0 && name[0] != "" { - key = name[0] - } - return instances.GetOrSetFuncLock(key, func() interface{} { - return New() - }).(*Logger) -} diff --git a/vendor/github.com/gogf/gf/v2/os/glog/glog_logger.go b/vendor/github.com/gogf/gf/v2/os/glog/glog_logger.go deleted file mode 100644 index 8455fa9d..00000000 --- a/vendor/github.com/gogf/gf/v2/os/glog/glog_logger.go +++ /dev/null @@ -1,415 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package glog - -import ( - "bytes" - "context" - "fmt" - "io" - "os" - "runtime" - "strings" - "time" - - "github.com/fatih/color" - "go.opentelemetry.io/otel/trace" - - "github.com/gogf/gf/v2/debug/gdebug" - "github.com/gogf/gf/v2/internal/consts" - "github.com/gogf/gf/v2/internal/intlog" - "github.com/gogf/gf/v2/os/gctx" - "github.com/gogf/gf/v2/os/gfile" - "github.com/gogf/gf/v2/os/gfpool" - "github.com/gogf/gf/v2/os/gmlock" - "github.com/gogf/gf/v2/os/gtime" - "github.com/gogf/gf/v2/text/gregex" - "github.com/gogf/gf/v2/util/gconv" -) - -// Logger is the struct for logging management. -type Logger struct { - parent *Logger // Parent logger, if it is not empty, it means the logger is used in chaining function. - config Config // Logger configuration. -} - -const ( - defaultFileFormat = `{Y-m-d}.log` - defaultFileFlags = os.O_CREATE | os.O_WRONLY | os.O_APPEND - defaultFilePerm = os.FileMode(0666) - defaultFileExpire = time.Minute - pathFilterKey = "/os/glog/glog" - memoryLockPrefixForPrintingToFile = "glog.printToFile:" -) - -const ( - F_ASYNC = 1 << iota // Print logging content asynchronously。 - F_FILE_LONG // Print full file name and line number: /a/b/c/d.go:23. - F_FILE_SHORT // Print final file name element and line number: d.go:23. overrides F_FILE_LONG. - F_TIME_DATE // Print the date in the local time zone: 2009-01-23. - F_TIME_TIME // Print the time in the local time zone: 01:23:23. - F_TIME_MILLI // Print the time with milliseconds in the local time zone: 01:23:23.675. - F_CALLER_FN // Print Caller function name and package: main.main - F_TIME_STD = F_TIME_DATE | F_TIME_MILLI -) - -// New creates and returns a custom logger. -func New() *Logger { - return &Logger{ - config: DefaultConfig(), - } -} - -// NewWithWriter creates and returns a custom logger with io.Writer. -func NewWithWriter(writer io.Writer) *Logger { - l := New() - l.SetWriter(writer) - return l -} - -// Clone returns a new logger, which a `shallow copy` of the current logger. -// Note that the attribute `config` of the cloned one is the shallow copy of current one. -func (l *Logger) Clone() *Logger { - return &Logger{ - config: l.config, - parent: l, - } -} - -// getFilePath returns the logging file path. -// The logging file name must have extension name of "log". -func (l *Logger) getFilePath(now time.Time) string { - // Content containing "{}" in the file name is formatted using gtime. - file, _ := gregex.ReplaceStringFunc(`{.+?}`, l.config.File, func(s string) string { - return gtime.New(now).Format(strings.Trim(s, "{}")) - }) - file = gfile.Join(l.config.Path, file) - return file -} - -// print prints `s` to defined writer, logging file or passed `std`. -func (l *Logger) print(ctx context.Context, level int, stack string, values ...interface{}) { - // Lazy initialize for rotation feature. - // It uses atomic reading operation to enhance the performance checking. - // It here uses CAP for performance and concurrent safety. - // It just initializes once for each logger. - if l.config.RotateSize > 0 || l.config.RotateExpire > 0 { - if !l.config.rotatedHandlerInitialized.Val() && l.config.rotatedHandlerInitialized.Cas(false, true) { - l.rotateChecksTimely(ctx) - intlog.Printf(ctx, "logger rotation initialized: every %s", l.config.RotateCheckInterval.String()) - } - } - - var ( - now = time.Now() - input = &HandlerInput{ - internalHandlerInfo: internalHandlerInfo{ - index: -1, - }, - Logger: l, - Buffer: bytes.NewBuffer(nil), - Time: now, - Color: defaultLevelColor[level], - Level: level, - Stack: stack, - } - ) - - // Logging handlers. - if len(l.config.Handlers) > 0 { - input.handlers = append(input.handlers, l.config.Handlers...) - } else if defaultHandler != nil { - input.handlers = []Handler{defaultHandler} - } - input.handlers = append(input.handlers, defaultPrintHandler) - - // Time. - timeFormat := "" - if l.config.TimeFormat != "" { - timeFormat = l.config.TimeFormat - } else { - if l.config.Flags&F_TIME_DATE > 0 { - timeFormat += "2006-01-02" - } - if l.config.Flags&F_TIME_TIME > 0 { - if timeFormat != "" { - timeFormat += " " - } - timeFormat += "15:04:05" - } - if l.config.Flags&F_TIME_MILLI > 0 { - if timeFormat != "" { - timeFormat += " " - } - timeFormat += "15:04:05.000" - } - } - - if len(timeFormat) > 0 { - input.TimeFormat = now.Format(timeFormat) - } - - // Level string. - input.LevelFormat = l.GetLevelPrefix(level) - - // Caller path and Fn name. - if l.config.Flags&(F_FILE_LONG|F_FILE_SHORT|F_CALLER_FN) > 0 { - callerFnName, path, line := gdebug.CallerWithFilter( - []string{consts.StackFilterKeyForGoFrame}, - l.config.StSkip, - ) - if l.config.Flags&F_CALLER_FN > 0 { - if len(callerFnName) > 2 { - input.CallerFunc = fmt.Sprintf(`[%s]`, callerFnName) - } - } - if line >= 0 && len(path) > 1 { - if l.config.Flags&F_FILE_LONG > 0 { - input.CallerPath = fmt.Sprintf(`%s:%d:`, path, line) - } - if l.config.Flags&F_FILE_SHORT > 0 { - input.CallerPath = fmt.Sprintf(`%s:%d:`, gfile.Basename(path), line) - } - } - } - // Prefix. - if len(l.config.Prefix) > 0 { - input.Prefix = l.config.Prefix - } - - // Convert value to string. - if ctx != nil { - // Tracing values. - spanCtx := trace.SpanContextFromContext(ctx) - if traceId := spanCtx.TraceID(); traceId.IsValid() { - input.TraceId = traceId.String() - } - // Context values. - if len(l.config.CtxKeys) > 0 { - for _, ctxKey := range l.config.CtxKeys { - var ctxValue interface{} - if ctxValue = ctx.Value(ctxKey); ctxValue == nil { - ctxValue = ctx.Value(gctx.StrKey(gconv.String(ctxKey))) - } - if ctxValue != nil { - if input.CtxStr != "" { - input.CtxStr += ", " - } - input.CtxStr += gconv.String(ctxValue) - } - } - } - } - var tempStr string - for _, v := range values { - tempStr = gconv.String(v) - if len(input.Content) > 0 { - if input.Content[len(input.Content)-1] == '\n' { - // Remove one blank line(\n\n). - if len(tempStr) > 0 && tempStr[0] == '\n' { - input.Content += tempStr[1:] - } else { - input.Content += tempStr - } - } else { - input.Content += " " + tempStr - } - } else { - input.Content = tempStr - } - } - if l.config.Flags&F_ASYNC > 0 { - input.IsAsync = true - err := asyncPool.Add(ctx, func(ctx context.Context) { - input.Next(ctx) - }) - if err != nil { - intlog.Errorf(ctx, `%+v`, err) - } - } else { - input.Next(ctx) - } -} - -// doDefaultPrint outputs the logging content according configuration. -func (l *Logger) doDefaultPrint(ctx context.Context, input *HandlerInput) *bytes.Buffer { - var buffer *bytes.Buffer - if l.config.Writer == nil { - // Allow output to stdout? - if l.config.StdoutPrint { - if buf := l.printToStdout(ctx, input); buf != nil { - buffer = buf - } - } - - // Output content to disk file. - if l.config.Path != "" { - if buf := l.printToFile(ctx, input.Time, input); buf != nil { - buffer = buf - } - } - } else { - // Output to custom writer. - if buf := l.printToWriter(ctx, input); buf != nil { - buffer = buf - } - } - return buffer -} - -// printToWriter writes buffer to writer. -func (l *Logger) printToWriter(ctx context.Context, input *HandlerInput) *bytes.Buffer { - if l.config.Writer != nil { - var ( - buffer = input.getRealBuffer(l.config.WriterColorEnable) - ) - if _, err := l.config.Writer.Write(buffer.Bytes()); err != nil { - intlog.Errorf(ctx, `%+v`, err) - } - return buffer - } - return nil -} - -// printToStdout outputs logging content to stdout. -func (l *Logger) printToStdout(ctx context.Context, input *HandlerInput) *bytes.Buffer { - if l.config.StdoutPrint { - var ( - err error - buffer = input.getRealBuffer(!l.config.StdoutColorDisabled) - ) - // This will lose color in Windows os system. - // if _, err := os.Stdout.Write(input.getRealBuffer(true).Bytes()); err != nil { - - // This will print color in Windows os system. - if _, err = fmt.Fprint(color.Output, buffer.String()); err != nil { - intlog.Errorf(ctx, `%+v`, err) - } - return buffer - } - return nil -} - -// printToFile outputs logging content to disk file. -func (l *Logger) printToFile(ctx context.Context, t time.Time, in *HandlerInput) *bytes.Buffer { - var ( - buffer = in.getRealBuffer(l.config.WriterColorEnable) - logFilePath = l.getFilePath(t) - memoryLockKey = memoryLockPrefixForPrintingToFile + logFilePath - ) - gmlock.Lock(memoryLockKey) - defer gmlock.Unlock(memoryLockKey) - - // Rotation file size checks. - if l.config.RotateSize > 0 && gfile.Size(logFilePath) > l.config.RotateSize { - if runtime.GOOS == "windows" { - file := l.getFilePointer(ctx, logFilePath) - if file == nil { - intlog.Errorf(ctx, `got nil file pointer for: %s`, logFilePath) - return buffer - } - - if _, err := file.Write(buffer.Bytes()); err != nil { - intlog.Errorf(ctx, `%+v`, err) - } - - if err := file.Close(true); err != nil { - intlog.Errorf(ctx, `%+v`, err) - } - l.rotateFileBySize(ctx, t) - - return buffer - } - - l.rotateFileBySize(ctx, t) - } - // Logging content outputting to disk file. - if file := l.getFilePointer(ctx, logFilePath); file == nil { - intlog.Errorf(ctx, `got nil file pointer for: %s`, logFilePath) - } else { - if _, err := file.Write(buffer.Bytes()); err != nil { - intlog.Errorf(ctx, `%+v`, err) - } - if err := file.Close(); err != nil { - intlog.Errorf(ctx, `%+v`, err) - } - } - return buffer -} - -// getFilePointer retrieves and returns a file pointer from file pool. -func (l *Logger) getFilePointer(ctx context.Context, path string) *gfpool.File { - file, err := gfpool.Open( - path, - defaultFileFlags, - defaultFilePerm, - defaultFileExpire, - ) - if err != nil { - // panic(err) - intlog.Errorf(ctx, `%+v`, err) - } - return file -} - -// getOpenedFilePointer retrieves and returns a file pointer from file pool. -func (l *Logger) getOpenedFilePointer(ctx context.Context, path string) *gfpool.File { - file := gfpool.Get( - path, - defaultFileFlags, - defaultFilePerm, - defaultFileExpire, - ) - if file == nil { - intlog.Errorf(ctx, `can not find the file, path:%s`, path) - } - return file -} - -// printStd prints content `s` without stack. -func (l *Logger) printStd(ctx context.Context, level int, value ...interface{}) { - l.print(ctx, level, "", value...) -} - -// printStd prints content `s` with stack check. -func (l *Logger) printErr(ctx context.Context, level int, value ...interface{}) { - var stack string - if l.config.StStatus == 1 { - stack = l.GetStack() - } - // In matter of sequence, do not use stderr here, but use the same stdout. - l.print(ctx, level, stack, value...) -} - -// format formats `values` using fmt.Sprintf. -func (l *Logger) format(format string, value ...interface{}) string { - return fmt.Sprintf(format, value...) -} - -// PrintStack prints the caller stack, -// the optional parameter `skip` specify the skipped stack offset from the end point. -func (l *Logger) PrintStack(ctx context.Context, skip ...int) { - if s := l.GetStack(skip...); s != "" { - l.Print(ctx, "Stack:\n"+s) - } else { - l.Print(ctx) - } -} - -// GetStack returns the caller stack content, -// the optional parameter `skip` specify the skipped stack offset from the end point. -func (l *Logger) GetStack(skip ...int) string { - stackSkip := l.config.StSkip - if len(skip) > 0 { - stackSkip += skip[0] - } - filters := []string{pathFilterKey} - if l.config.StFilter != "" { - filters = append(filters, l.config.StFilter) - } - return gdebug.StackWithFilters(filters, stackSkip) -} diff --git a/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_api.go b/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_api.go deleted file mode 100644 index d485d6fc..00000000 --- a/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_api.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package glog - -import ( - "context" - "fmt" - "os" -) - -// Print prints `v` with newline using fmt.Sprintln. -// The parameter `v` can be multiple variables. -func (l *Logger) Print(ctx context.Context, v ...interface{}) { - l.printStd(ctx, LEVEL_NONE, v...) -} - -// Printf prints `v` with format `format` using fmt.Sprintf. -// The parameter `v` can be multiple variables. -func (l *Logger) Printf(ctx context.Context, format string, v ...interface{}) { - l.printStd(ctx, LEVEL_NONE, l.format(format, v...)) -} - -// Fatal prints the logging content with [FATA] header and newline, then exit the current process. -func (l *Logger) Fatal(ctx context.Context, v ...interface{}) { - l.printErr(ctx, LEVEL_FATA, v...) - os.Exit(1) -} - -// Fatalf prints the logging content with [FATA] header, custom format and newline, then exit the current process. -func (l *Logger) Fatalf(ctx context.Context, format string, v ...interface{}) { - l.printErr(ctx, LEVEL_FATA, l.format(format, v...)) - os.Exit(1) -} - -// Panic prints the logging content with [PANI] header and newline, then panics. -func (l *Logger) Panic(ctx context.Context, v ...interface{}) { - l.printErr(ctx, LEVEL_PANI, v...) - panic(fmt.Sprint(v...)) -} - -// Panicf prints the logging content with [PANI] header, custom format and newline, then panics. -func (l *Logger) Panicf(ctx context.Context, format string, v ...interface{}) { - l.printErr(ctx, LEVEL_PANI, l.format(format, v...)) - panic(l.format(format, v...)) -} - -// Info prints the logging content with [INFO] header and newline. -func (l *Logger) Info(ctx context.Context, v ...interface{}) { - if l.checkLevel(LEVEL_INFO) { - l.printStd(ctx, LEVEL_INFO, v...) - } -} - -// Infof prints the logging content with [INFO] header, custom format and newline. -func (l *Logger) Infof(ctx context.Context, format string, v ...interface{}) { - if l.checkLevel(LEVEL_INFO) { - l.printStd(ctx, LEVEL_INFO, l.format(format, v...)) - } -} - -// Debug prints the logging content with [DEBU] header and newline. -func (l *Logger) Debug(ctx context.Context, v ...interface{}) { - if l.checkLevel(LEVEL_DEBU) { - l.printStd(ctx, LEVEL_DEBU, v...) - } -} - -// Debugf prints the logging content with [DEBU] header, custom format and newline. -func (l *Logger) Debugf(ctx context.Context, format string, v ...interface{}) { - if l.checkLevel(LEVEL_DEBU) { - l.printStd(ctx, LEVEL_DEBU, l.format(format, v...)) - } -} - -// Notice prints the logging content with [NOTI] header and newline. -// It also prints caller stack info if stack feature is enabled. -func (l *Logger) Notice(ctx context.Context, v ...interface{}) { - if l.checkLevel(LEVEL_NOTI) { - l.printStd(ctx, LEVEL_NOTI, v...) - } -} - -// Noticef prints the logging content with [NOTI] header, custom format and newline. -// It also prints caller stack info if stack feature is enabled. -func (l *Logger) Noticef(ctx context.Context, format string, v ...interface{}) { - if l.checkLevel(LEVEL_NOTI) { - l.printStd(ctx, LEVEL_NOTI, l.format(format, v...)) - } -} - -// Warning prints the logging content with [WARN] header and newline. -// It also prints caller stack info if stack feature is enabled. -func (l *Logger) Warning(ctx context.Context, v ...interface{}) { - if l.checkLevel(LEVEL_WARN) { - l.printStd(ctx, LEVEL_WARN, v...) - } -} - -// Warningf prints the logging content with [WARN] header, custom format and newline. -// It also prints caller stack info if stack feature is enabled. -func (l *Logger) Warningf(ctx context.Context, format string, v ...interface{}) { - if l.checkLevel(LEVEL_WARN) { - l.printStd(ctx, LEVEL_WARN, l.format(format, v...)) - } -} - -// Error prints the logging content with [ERRO] header and newline. -// It also prints caller stack info if stack feature is enabled. -func (l *Logger) Error(ctx context.Context, v ...interface{}) { - if l.checkLevel(LEVEL_ERRO) { - l.printErr(ctx, LEVEL_ERRO, v...) - } -} - -// Errorf prints the logging content with [ERRO] header, custom format and newline. -// It also prints caller stack info if stack feature is enabled. -func (l *Logger) Errorf(ctx context.Context, format string, v ...interface{}) { - if l.checkLevel(LEVEL_ERRO) { - l.printErr(ctx, LEVEL_ERRO, l.format(format, v...)) - } -} - -// Critical prints the logging content with [CRIT] header and newline. -// It also prints caller stack info if stack feature is enabled. -func (l *Logger) Critical(ctx context.Context, v ...interface{}) { - if l.checkLevel(LEVEL_CRIT) { - l.printErr(ctx, LEVEL_CRIT, v...) - } -} - -// Criticalf prints the logging content with [CRIT] header, custom format and newline. -// It also prints caller stack info if stack feature is enabled. -func (l *Logger) Criticalf(ctx context.Context, format string, v ...interface{}) { - if l.checkLevel(LEVEL_CRIT) { - l.printErr(ctx, LEVEL_CRIT, l.format(format, v...)) - } -} - -// checkLevel checks whether the given `level` could be output. -func (l *Logger) checkLevel(level int) bool { - return l.config.Level&level > 0 -} diff --git a/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_chaining.go b/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_chaining.go deleted file mode 100644 index 29ebade2..00000000 --- a/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_chaining.go +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package glog - -import ( - "io" - - "github.com/gogf/gf/v2/os/gfile" -) - -// To is a chaining function, -// which redirects current logging content output to the specified `writer`. -func (l *Logger) To(writer io.Writer) *Logger { - logger := (*Logger)(nil) - if l.parent == nil { - logger = l.Clone() - } else { - logger = l - } - logger.SetWriter(writer) - return logger -} - -// Path is a chaining function, -// which sets the directory path to `path` for current logging content output. -// -// Note that the parameter `path` is a directory path, not a file path. -func (l *Logger) Path(path string) *Logger { - logger := (*Logger)(nil) - if l.parent == nil { - logger = l.Clone() - } else { - logger = l - } - if path != "" { - if err := logger.SetPath(path); err != nil { - panic(err) - } - } - return logger -} - -// Cat is a chaining function, -// which sets the category to `category` for current logging content output. -// Param `category` can be hierarchical, eg: module/user. -func (l *Logger) Cat(category string) *Logger { - logger := (*Logger)(nil) - if l.parent == nil { - logger = l.Clone() - } else { - logger = l - } - if logger.config.Path != "" { - if err := logger.SetPath(gfile.Join(logger.config.Path, category)); err != nil { - panic(err) - } - } - return logger -} - -// File is a chaining function, -// which sets file name `pattern` for the current logging content output. -func (l *Logger) File(file string) *Logger { - logger := (*Logger)(nil) - if l.parent == nil { - logger = l.Clone() - } else { - logger = l - } - logger.SetFile(file) - return logger -} - -// Level is a chaining function, -// which sets logging level for the current logging content output. -func (l *Logger) Level(level int) *Logger { - logger := (*Logger)(nil) - if l.parent == nil { - logger = l.Clone() - } else { - logger = l - } - logger.SetLevel(level) - return logger -} - -// LevelStr is a chaining function, -// which sets logging level for the current logging content output using level string. -func (l *Logger) LevelStr(levelStr string) *Logger { - logger := (*Logger)(nil) - if l.parent == nil { - logger = l.Clone() - } else { - logger = l - } - if err := logger.SetLevelStr(levelStr); err != nil { - panic(err) - } - return logger -} - -// Skip is a chaining function, -// which sets stack skip for the current logging content output. -// It also affects the caller file path checks when line number printing enabled. -func (l *Logger) Skip(skip int) *Logger { - logger := (*Logger)(nil) - if l.parent == nil { - logger = l.Clone() - } else { - logger = l - } - logger.SetStackSkip(skip) - return logger -} - -// Stack is a chaining function, -// which sets stack options for the current logging content output . -func (l *Logger) Stack(enabled bool, skip ...int) *Logger { - logger := (*Logger)(nil) - if l.parent == nil { - logger = l.Clone() - } else { - logger = l - } - logger.SetStack(enabled) - if len(skip) > 0 { - logger.SetStackSkip(skip[0]) - } - return logger -} - -// StackWithFilter is a chaining function, -// which sets stack filter for the current logging content output . -func (l *Logger) StackWithFilter(filter string) *Logger { - logger := (*Logger)(nil) - if l.parent == nil { - logger = l.Clone() - } else { - logger = l - } - logger.SetStack(true) - logger.SetStackFilter(filter) - return logger -} - -// Stdout is a chaining function, -// which enables/disables stdout for the current logging content output. -// It's enabled in default. -func (l *Logger) Stdout(enabled ...bool) *Logger { - logger := (*Logger)(nil) - if l.parent == nil { - logger = l.Clone() - } else { - logger = l - } - // stdout printing is enabled if `enabled` is not passed. - if len(enabled) > 0 && !enabled[0] { - logger.config.StdoutPrint = false - } else { - logger.config.StdoutPrint = true - } - return logger -} - -// Header is a chaining function, -// which enables/disables log header for the current logging content output. -// It's enabled in default. -func (l *Logger) Header(enabled ...bool) *Logger { - logger := (*Logger)(nil) - if l.parent == nil { - logger = l.Clone() - } else { - logger = l - } - // header is enabled if `enabled` is not passed. - if len(enabled) > 0 && !enabled[0] { - logger.SetHeaderPrint(false) - } else { - logger.SetHeaderPrint(true) - } - return logger -} - -// Line is a chaining function, -// which enables/disables printing its caller file path along with its line number. -// The parameter `long` specified whether print the long absolute file path, eg: /a/b/c/d.go:23, -// or else short one: d.go:23. -func (l *Logger) Line(long ...bool) *Logger { - logger := (*Logger)(nil) - if l.parent == nil { - logger = l.Clone() - } else { - logger = l - } - if len(long) > 0 && long[0] { - logger.config.Flags |= F_FILE_LONG - } else { - logger.config.Flags |= F_FILE_SHORT - } - return logger -} - -// Async is a chaining function, -// which enables/disables async logging output feature. -func (l *Logger) Async(enabled ...bool) *Logger { - logger := (*Logger)(nil) - if l.parent == nil { - logger = l.Clone() - } else { - logger = l - } - // async feature is enabled if `enabled` is not passed. - if len(enabled) > 0 && !enabled[0] { - logger.SetAsync(false) - } else { - logger.SetAsync(true) - } - return logger -} diff --git a/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_color.go b/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_color.go deleted file mode 100644 index 98bf966c..00000000 --- a/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_color.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package glog - -import "github.com/fatih/color" - -const ( - COLOR_BLACK = 30 + iota - COLOR_RED - COLOR_GREEN - COLOR_YELLOW - COLOR_BLUE - COLOR_MAGENTA - COLOR_CYAN - COLOR_WHITE -) - -// Foreground Hi-Intensity text colors -const ( - COLOR_HI_BLACK = 90 + iota - COLOR_HI_RED - COLOR_HI_GREEN - COLOR_HI_YELLOW - COLOR_HI_BLUE - COLOR_HI_MAGENTA - COLOR_HI_CYAN - COLOR_HI_WHITE -) - -// defaultLevelColor defines the default level and its mapping prefix string. -var defaultLevelColor = map[int]int{ - LEVEL_DEBU: COLOR_YELLOW, - LEVEL_INFO: COLOR_GREEN, - LEVEL_NOTI: COLOR_CYAN, - LEVEL_WARN: COLOR_MAGENTA, - LEVEL_ERRO: COLOR_RED, - LEVEL_CRIT: COLOR_HI_RED, - LEVEL_PANI: COLOR_HI_RED, - LEVEL_FATA: COLOR_HI_RED, -} - -// getColoredStr returns a string that is colored by given color. -func (l *Logger) getColoredStr(c int, s string) string { - return color.New(color.Attribute(c)).Sprint(s) -} - -func (l *Logger) getColorByLevel(level int) int { - return defaultLevelColor[level] -} diff --git a/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_config.go b/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_config.go deleted file mode 100644 index 916665e2..00000000 --- a/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_config.go +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package glog - -import ( - "context" - "io" - "strings" - "time" - - "github.com/gogf/gf/v2/container/gtype" - "github.com/gogf/gf/v2/errors/gcode" - "github.com/gogf/gf/v2/errors/gerror" - "github.com/gogf/gf/v2/internal/intlog" - "github.com/gogf/gf/v2/os/gfile" - "github.com/gogf/gf/v2/util/gconv" - "github.com/gogf/gf/v2/util/gutil" -) - -// Config is the configuration object for logger. -type Config struct { - Handlers []Handler `json:"-"` // Logger handlers which implement feature similar as middleware. - Writer io.Writer `json:"-"` // Customized io.Writer. - Flags int `json:"flags"` // Extra flags for logging output features. - TimeFormat string `json:"timeFormat"` // Logging time format - Path string `json:"path"` // Logging directory path. - File string `json:"file"` // Format pattern for logging file. - Level int `json:"level"` // Output level. - Prefix string `json:"prefix"` // Prefix string for every logging content. - StSkip int `json:"stSkip"` // Skipping count for stack. - StStatus int `json:"stStatus"` // Stack status(1: enabled - default; 0: disabled) - StFilter string `json:"stFilter"` // Stack string filter. - CtxKeys []interface{} `json:"ctxKeys"` // Context keys for logging, which is used for value retrieving from context. - HeaderPrint bool `json:"header"` // Print header or not(true in default). - StdoutPrint bool `json:"stdout"` // Output to stdout or not(true in default). - LevelPrint bool `json:"levelPrint"` // Print level format string or not(true in default). - LevelPrefixes map[int]string `json:"levelPrefixes"` // Logging level to its prefix string mapping. - RotateSize int64 `json:"rotateSize"` // Rotate the logging file if its size > 0 in bytes. - RotateExpire time.Duration `json:"rotateExpire"` // Rotate the logging file if its mtime exceeds this duration. - RotateBackupLimit int `json:"rotateBackupLimit"` // Max backup for rotated files, default is 0, means no backups. - RotateBackupExpire time.Duration `json:"rotateBackupExpire"` // Max expires for rotated files, which is 0 in default, means no expiration. - RotateBackupCompress int `json:"rotateBackupCompress"` // Compress level for rotated files using gzip algorithm. It's 0 in default, means no compression. - RotateCheckInterval time.Duration `json:"rotateCheckInterval"` // Asynchronously checks the backups and expiration at intervals. It's 1 hour in default. - StdoutColorDisabled bool `json:"stdoutColorDisabled"` // Logging level prefix with color to writer or not (false in default). - WriterColorEnable bool `json:"writerColorEnable"` // Logging level prefix with color to writer or not (false in default). - internalConfig -} - -type internalConfig struct { - rotatedHandlerInitialized *gtype.Bool // Whether the rotation feature initialized. -} - -// DefaultConfig returns the default configuration for logger. -func DefaultConfig() Config { - c := Config{ - File: defaultFileFormat, - Flags: F_TIME_STD, - TimeFormat: "", - Level: LEVEL_ALL, - CtxKeys: []interface{}{}, - StStatus: 1, - HeaderPrint: true, - StdoutPrint: true, - LevelPrint: true, - LevelPrefixes: make(map[int]string, len(defaultLevelPrefixes)), - RotateCheckInterval: time.Hour, - internalConfig: internalConfig{ - rotatedHandlerInitialized: gtype.NewBool(), - }, - } - for k, v := range defaultLevelPrefixes { - c.LevelPrefixes[k] = v - } - if !defaultDebug { - c.Level = c.Level & ^LEVEL_DEBU - } - return c -} - -// GetConfig returns the configuration of current Logger. -func (l *Logger) GetConfig() Config { - return l.config -} - -// SetConfig set configurations for the logger. -func (l *Logger) SetConfig(config Config) error { - l.config = config - // Necessary validation. - if config.Path != "" { - if err := l.SetPath(config.Path); err != nil { - intlog.Errorf(context.TODO(), `%+v`, err) - return err - } - } - intlog.Printf(context.TODO(), "SetConfig: %+v", l.config) - return nil -} - -// SetConfigWithMap set configurations with map for the logger. -func (l *Logger) SetConfigWithMap(m map[string]interface{}) error { - if len(m) == 0 { - return gerror.NewCode(gcode.CodeInvalidParameter, "configuration cannot be empty") - } - // The m now is a shallow copy of m. - // A little tricky, isn't it? - m = gutil.MapCopy(m) - // Change string configuration to int value for level. - levelKey, levelValue := gutil.MapPossibleItemByKey(m, "Level") - if levelValue != nil { - if level, ok := levelStringMap[strings.ToUpper(gconv.String(levelValue))]; ok { - m[levelKey] = level - } else { - return gerror.NewCodef(gcode.CodeInvalidParameter, `invalid level string: %v`, levelValue) - } - } - // Change string configuration to int value for file rotation size. - rotateSizeKey, rotateSizeValue := gutil.MapPossibleItemByKey(m, "RotateSize") - if rotateSizeValue != nil { - m[rotateSizeKey] = gfile.StrToSize(gconv.String(rotateSizeValue)) - if m[rotateSizeKey] == -1 { - return gerror.NewCodef(gcode.CodeInvalidConfiguration, `invalid rotate size: %v`, rotateSizeValue) - } - } - if err := gconv.Struct(m, &l.config); err != nil { - return err - } - return l.SetConfig(l.config) -} - -// SetDebug enables/disables the debug level for logger. -// The debug level is enabled in default. -func (l *Logger) SetDebug(debug bool) { - if debug { - l.config.Level = l.config.Level | LEVEL_DEBU - } else { - l.config.Level = l.config.Level & ^LEVEL_DEBU - } -} - -// SetAsync enables/disables async logging output feature. -func (l *Logger) SetAsync(enabled bool) { - if enabled { - l.config.Flags = l.config.Flags | F_ASYNC - } else { - l.config.Flags = l.config.Flags & ^F_ASYNC - } -} - -// SetFlags sets extra flags for logging output features. -func (l *Logger) SetFlags(flags int) { - l.config.Flags = flags -} - -// GetFlags returns the flags of logger. -func (l *Logger) GetFlags() int { - return l.config.Flags -} - -// SetStack enables/disables the stack feature in failure logging outputs. -func (l *Logger) SetStack(enabled bool) { - if enabled { - l.config.StStatus = 1 - } else { - l.config.StStatus = 0 - } -} - -// SetStackSkip sets the stack offset from the end point. -func (l *Logger) SetStackSkip(skip int) { - l.config.StSkip = skip -} - -// SetStackFilter sets the stack filter from the end point. -func (l *Logger) SetStackFilter(filter string) { - l.config.StFilter = filter -} - -// SetCtxKeys sets the context keys for logger. The keys is used for retrieving values -// from context and printing them to logging content. -// -// Note that multiple calls of this function will overwrite the previous set context keys. -func (l *Logger) SetCtxKeys(keys ...interface{}) { - l.config.CtxKeys = keys -} - -// AppendCtxKeys appends extra keys to logger. -// It ignores the key if it is already appended to the logger previously. -func (l *Logger) AppendCtxKeys(keys ...interface{}) { - var isExist bool - for _, key := range keys { - isExist = false - for _, ctxKey := range l.config.CtxKeys { - if ctxKey == key { - isExist = true - break - } - } - if !isExist { - l.config.CtxKeys = append(l.config.CtxKeys, key) - } - } -} - -// GetCtxKeys retrieves and returns the context keys for logging. -func (l *Logger) GetCtxKeys() []interface{} { - return l.config.CtxKeys -} - -// SetWriter sets the customized logging `writer` for logging. -// The `writer` object should implement the io.Writer interface. -// Developer can use customized logging `writer` to redirect logging output to another service, -// eg: kafka, mysql, mongodb, etc. -func (l *Logger) SetWriter(writer io.Writer) { - l.config.Writer = writer -} - -// GetWriter returns the customized writer object, which implements the io.Writer interface. -// It returns nil if no writer previously set. -func (l *Logger) GetWriter() io.Writer { - return l.config.Writer -} - -// SetPath sets the directory path for file logging. -func (l *Logger) SetPath(path string) error { - if path == "" { - return gerror.NewCode(gcode.CodeInvalidParameter, "logging path is empty") - } - if !gfile.Exists(path) { - if err := gfile.Mkdir(path); err != nil { - return gerror.Wrapf(err, `Mkdir "%s" failed in PWD "%s"`, path, gfile.Pwd()) - } - } - l.config.Path = strings.TrimRight(path, gfile.Separator) - return nil -} - -// GetPath returns the logging directory path for file logging. -// It returns empty string if no directory path set. -func (l *Logger) GetPath() string { - return l.config.Path -} - -// SetFile sets the file name `pattern` for file logging. -// Datetime pattern can be used in `pattern`, eg: access-{Ymd}.log. -// The default file name pattern is: Y-m-d.log, eg: 2018-01-01.log -func (l *Logger) SetFile(pattern string) { - l.config.File = pattern -} - -// SetTimeFormat sets the time format for the logging time. -func (l *Logger) SetTimeFormat(timeFormat string) { - l.config.TimeFormat = timeFormat -} - -// SetStdoutPrint sets whether output the logging contents to stdout, which is true in default. -func (l *Logger) SetStdoutPrint(enabled bool) { - l.config.StdoutPrint = enabled -} - -// SetHeaderPrint sets whether output header of the logging contents, which is true in default. -func (l *Logger) SetHeaderPrint(enabled bool) { - l.config.HeaderPrint = enabled -} - -// SetLevelPrint sets whether output level string of the logging contents, which is true in default. -func (l *Logger) SetLevelPrint(enabled bool) { - l.config.LevelPrint = enabled -} - -// SetPrefix sets prefix string for every logging content. -// Prefix is part of header, which means if header output is shut, no prefix will be output. -func (l *Logger) SetPrefix(prefix string) { - l.config.Prefix = prefix -} - -// SetHandlers sets the logging handlers for current logger. -func (l *Logger) SetHandlers(handlers ...Handler) { - l.config.Handlers = handlers -} - -// SetWriterColorEnable enables file/writer logging with color. -func (l *Logger) SetWriterColorEnable(enabled bool) { - l.config.WriterColorEnable = enabled -} - -// SetStdoutColorDisabled disables stdout logging with color. -func (l *Logger) SetStdoutColorDisabled(disabled bool) { - l.config.StdoutColorDisabled = disabled -} diff --git a/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_handler.go b/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_handler.go deleted file mode 100644 index 4929fb51..00000000 --- a/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_handler.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package glog - -import ( - "bytes" - "context" - "time" -) - -// Handler is function handler for custom logging content outputs. -type Handler func(ctx context.Context, in *HandlerInput) - -// HandlerInput is the input parameter struct for logging Handler. -type HandlerInput struct { - internalHandlerInfo - Logger *Logger // Current Logger object. - Buffer *bytes.Buffer // Buffer for logging content outputs. - Time time.Time // Logging time, which is the time that logging triggers. - TimeFormat string // Formatted time string, like "2016-01-09 12:00:00". - Color int // Using color, like COLOR_RED, COLOR_BLUE, etc. Eg: 34 - Level int // Using level, like LEVEL_INFO, LEVEL_ERRO, etc. Eg: 256 - LevelFormat string // Formatted level string, like "DEBU", "ERRO", etc. Eg: ERRO - CallerFunc string // The source function name that calls logging, only available if F_CALLER_FN set. - CallerPath string // The source file path and its line number that calls logging, only available if F_FILE_SHORT or F_FILE_LONG set. - CtxStr string // The retrieved context value string from context, only available if Config.CtxKeys configured. - TraceId string // Trace id, only available if OpenTelemetry is enabled. - Prefix string // Custom prefix string for logging content. - Content string // Content is the main logging content without error stack string produced by logger. - Stack string // Stack string produced by logger, only available if Config.StStatus configured. - IsAsync bool // IsAsync marks it is in asynchronous logging. -} - -type internalHandlerInfo struct { - index int // Middleware handling index for internal usage. - handlers []Handler // Handler array calling bu index. -} - -// defaultHandler is the default handler for package. -var defaultHandler Handler - -// defaultPrintHandler is a handler for logging content printing. -// This handler outputs logging content to file/stdout/write if any of them configured. -func defaultPrintHandler(ctx context.Context, in *HandlerInput) { - buffer := in.Logger.doDefaultPrint(ctx, in) - if in.Buffer.Len() == 0 { - in.Buffer = buffer - } -} - -// SetDefaultHandler sets default handler for package. -func SetDefaultHandler(handler Handler) { - defaultHandler = handler -} - -// GetDefaultHandler returns the default handler of package. -func GetDefaultHandler() Handler { - return defaultHandler -} - -// Next calls the next logging handler in middleware way. -func (in *HandlerInput) Next(ctx context.Context) { - in.index++ - if in.index < len(in.handlers) { - in.handlers[in.index](ctx, in) - } -} - -// String returns the logging content formatted by default logging handler. -func (in *HandlerInput) String(withColor ...bool) string { - formatWithColor := false - if len(withColor) > 0 { - formatWithColor = withColor[0] - } - return in.getDefaultBuffer(formatWithColor).String() -} - -func (in *HandlerInput) getDefaultBuffer(withColor bool) *bytes.Buffer { - buffer := bytes.NewBuffer(nil) - if in.Logger.config.HeaderPrint { - if in.TimeFormat != "" { - buffer.WriteString(in.TimeFormat) - } - if in.Logger.config.LevelPrint && in.LevelFormat != "" { - var levelStr = "[" + in.LevelFormat + "]" - if withColor { - in.addStringToBuffer(buffer, in.Logger.getColoredStr( - in.Logger.getColorByLevel(in.Level), levelStr, - )) - } else { - in.addStringToBuffer(buffer, levelStr) - } - } - } - if in.TraceId != "" { - in.addStringToBuffer(buffer, "{"+in.TraceId+"}") - } - if in.CtxStr != "" { - in.addStringToBuffer(buffer, "{"+in.CtxStr+"}") - } - if in.Logger.config.HeaderPrint { - if in.Prefix != "" { - in.addStringToBuffer(buffer, in.Prefix) - } - if in.CallerFunc != "" { - in.addStringToBuffer(buffer, in.CallerFunc) - } - if in.CallerPath != "" { - in.addStringToBuffer(buffer, in.CallerPath) - } - } - if in.Content != "" { - if in.Stack != "" { - in.addStringToBuffer(buffer, in.Content+"\nStack:\n"+in.Stack) - } else { - in.addStringToBuffer(buffer, in.Content) - } - } - // avoid a single space at the end of a line. - buffer.WriteString("\n") - return buffer -} - -func (in *HandlerInput) getRealBuffer(withColor bool) *bytes.Buffer { - if in.Buffer.Len() > 0 { - return in.Buffer - } - return in.getDefaultBuffer(withColor) -} - -func (in *HandlerInput) addStringToBuffer(buffer *bytes.Buffer, strings ...string) { - for _, s := range strings { - if buffer.Len() > 0 { - buffer.WriteByte(' ') - } - buffer.WriteString(s) - } -} diff --git a/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_handler_json.go b/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_handler_json.go deleted file mode 100644 index 20b82c74..00000000 --- a/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_handler_json.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package glog - -import ( - "context" - - "github.com/gogf/gf/v2/internal/json" -) - -// HandlerOutputJson is the structure outputting logging content as single json. -type HandlerOutputJson struct { - Time string `json:""` // Formatted time string, like "2016-01-09 12:00:00". - TraceId string `json:",omitempty"` // Trace id, only available if tracing is enabled. - CtxStr string `json:",omitempty"` // The retrieved context value string from context, only available if Config.CtxKeys configured. - Level string `json:""` // Formatted level string, like "DEBU", "ERRO", etc. Eg: ERRO - CallerFunc string `json:",omitempty"` // The source function name that calls logging, only available if F_CALLER_FN set. - CallerPath string `json:",omitempty"` // The source file path and its line number that calls logging, only available if F_FILE_SHORT or F_FILE_LONG set. - Prefix string `json:",omitempty"` // Custom prefix string for logging content. - Content string `json:""` // Content is the main logging content, containing error stack string produced by logger. - Stack string `json:",omitempty"` // Stack string produced by logger, only available if Config.StStatus configured. -} - -// HandlerJson is a handler for output logging content as a single json string. -func HandlerJson(ctx context.Context, in *HandlerInput) { - output := HandlerOutputJson{ - Time: in.TimeFormat, - TraceId: in.TraceId, - CtxStr: in.CtxStr, - Level: in.LevelFormat, - CallerFunc: in.CallerFunc, - CallerPath: in.CallerPath, - Prefix: in.Prefix, - Content: in.Content, - Stack: in.Stack, - } - jsonBytes, err := json.Marshal(output) - if err != nil { - panic(err) - } - in.Buffer.Write(jsonBytes) - in.Buffer.Write([]byte("\n")) - in.Next(ctx) -} diff --git a/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_level.go b/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_level.go deleted file mode 100644 index 5b4ecc88..00000000 --- a/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_level.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package glog - -import ( - "strings" - - "github.com/gogf/gf/v2/errors/gcode" - "github.com/gogf/gf/v2/errors/gerror" -) - -// Note that the LEVEL_PANI and LEVEL_FATA levels are not used for logging output, -// but for prefix configurations. -const ( - LEVEL_ALL = LEVEL_DEBU | LEVEL_INFO | LEVEL_NOTI | LEVEL_WARN | LEVEL_ERRO | LEVEL_CRIT - LEVEL_DEV = LEVEL_ALL - LEVEL_PROD = LEVEL_WARN | LEVEL_ERRO | LEVEL_CRIT - LEVEL_NONE = 0 - LEVEL_DEBU = 1 << iota // 16 - LEVEL_INFO // 32 - LEVEL_NOTI // 64 - LEVEL_WARN // 128 - LEVEL_ERRO // 256 - LEVEL_CRIT // 512 - LEVEL_PANI // 1024 - LEVEL_FATA // 2048 -) - -// defaultLevelPrefixes defines the default level and its mapping prefix string. -var defaultLevelPrefixes = map[int]string{ - LEVEL_DEBU: "DEBU", - LEVEL_INFO: "INFO", - LEVEL_NOTI: "NOTI", - LEVEL_WARN: "WARN", - LEVEL_ERRO: "ERRO", - LEVEL_CRIT: "CRIT", - LEVEL_PANI: "PANI", - LEVEL_FATA: "FATA", -} - -// levelStringMap defines level string name to its level mapping. -var levelStringMap = map[string]int{ - "ALL": LEVEL_DEBU | LEVEL_INFO | LEVEL_NOTI | LEVEL_WARN | LEVEL_ERRO | LEVEL_CRIT, - "DEV": LEVEL_DEBU | LEVEL_INFO | LEVEL_NOTI | LEVEL_WARN | LEVEL_ERRO | LEVEL_CRIT, - "DEVELOP": LEVEL_DEBU | LEVEL_INFO | LEVEL_NOTI | LEVEL_WARN | LEVEL_ERRO | LEVEL_CRIT, - "PROD": LEVEL_WARN | LEVEL_ERRO | LEVEL_CRIT, - "PRODUCT": LEVEL_WARN | LEVEL_ERRO | LEVEL_CRIT, - "DEBU": LEVEL_DEBU | LEVEL_INFO | LEVEL_NOTI | LEVEL_WARN | LEVEL_ERRO | LEVEL_CRIT, - "DEBUG": LEVEL_DEBU | LEVEL_INFO | LEVEL_NOTI | LEVEL_WARN | LEVEL_ERRO | LEVEL_CRIT, - "INFO": LEVEL_INFO | LEVEL_NOTI | LEVEL_WARN | LEVEL_ERRO | LEVEL_CRIT, - "NOTI": LEVEL_NOTI | LEVEL_WARN | LEVEL_ERRO | LEVEL_CRIT, - "NOTICE": LEVEL_NOTI | LEVEL_WARN | LEVEL_ERRO | LEVEL_CRIT, - "WARN": LEVEL_WARN | LEVEL_ERRO | LEVEL_CRIT, - "WARNING": LEVEL_WARN | LEVEL_ERRO | LEVEL_CRIT, - "ERRO": LEVEL_ERRO | LEVEL_CRIT, - "ERROR": LEVEL_ERRO | LEVEL_CRIT, - "CRIT": LEVEL_CRIT, - "CRITICAL": LEVEL_CRIT, -} - -// SetLevel sets the logging level. -// Note that levels ` LEVEL_CRIT | LEVEL_PANI | LEVEL_FATA ` cannot be removed for logging content, -// which are automatically added to levels. -func (l *Logger) SetLevel(level int) { - l.config.Level = level | LEVEL_CRIT | LEVEL_PANI | LEVEL_FATA -} - -// GetLevel returns the logging level value. -func (l *Logger) GetLevel() int { - return l.config.Level -} - -// SetLevelStr sets the logging level by level string. -func (l *Logger) SetLevelStr(levelStr string) error { - if level, ok := levelStringMap[strings.ToUpper(levelStr)]; ok { - l.config.Level = level - } else { - return gerror.NewCodef(gcode.CodeInvalidParameter, `invalid level string: %s`, levelStr) - } - return nil -} - -// SetLevelPrefix sets the prefix string for specified level. -func (l *Logger) SetLevelPrefix(level int, prefix string) { - l.config.LevelPrefixes[level] = prefix -} - -// SetLevelPrefixes sets the level to prefix string mapping for the logger. -func (l *Logger) SetLevelPrefixes(prefixes map[int]string) { - for k, v := range prefixes { - l.config.LevelPrefixes[k] = v - } -} - -// GetLevelPrefix returns the prefix string for specified level. -func (l *Logger) GetLevelPrefix(level int) string { - return l.config.LevelPrefixes[level] -} - -// getLevelPrefixWithBrackets returns the prefix string with brackets for specified level. -func (l *Logger) getLevelPrefixWithBrackets(level int) string { - levelStr := "" - if s, ok := l.config.LevelPrefixes[level]; ok { - levelStr = "[" + s + "]" - } - return levelStr -} diff --git a/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_rotate.go b/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_rotate.go deleted file mode 100644 index 6090e517..00000000 --- a/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_rotate.go +++ /dev/null @@ -1,308 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package glog - -import ( - "context" - "fmt" - "runtime" - "strings" - "time" - - "github.com/gogf/gf/v2/container/garray" - "github.com/gogf/gf/v2/encoding/gcompress" - "github.com/gogf/gf/v2/internal/intlog" - "github.com/gogf/gf/v2/os/gfile" - "github.com/gogf/gf/v2/os/gmlock" - "github.com/gogf/gf/v2/os/gtime" - "github.com/gogf/gf/v2/os/gtimer" - "github.com/gogf/gf/v2/text/gregex" -) - -const ( - memoryLockPrefixForRotating = "glog.rotateChecksTimely:" -) - -// rotateFileBySize rotates the current logging file according to the -// configured rotation size. -func (l *Logger) rotateFileBySize(ctx context.Context, now time.Time) { - if l.config.RotateSize <= 0 { - return - } - if err := l.doRotateFile(ctx, l.getFilePath(now)); err != nil { - // panic(err) - intlog.Errorf(ctx, `%+v`, err) - } -} - -// doRotateFile rotates the given logging file. -func (l *Logger) doRotateFile(ctx context.Context, filePath string) error { - memoryLockKey := "glog.doRotateFile:" + filePath - if !gmlock.TryLock(memoryLockKey) { - return nil - } - defer gmlock.Unlock(memoryLockKey) - - intlog.PrintFunc(ctx, func() string { - return fmt.Sprintf(`start rotating file by size: %s, file: %s`, gfile.SizeFormat(filePath), filePath) - }) - defer intlog.PrintFunc(ctx, func() string { - return fmt.Sprintf(`done rotating file by size: %s, size: %s`, gfile.SizeFormat(filePath), filePath) - }) - - // No backups, it then just removes the current logging file. - if l.config.RotateBackupLimit == 0 { - if err := gfile.Remove(filePath); err != nil { - return err - } - intlog.Printf( - ctx, - `%d size exceeds, no backups set, remove original logging file: %s`, - l.config.RotateSize, filePath, - ) - return nil - } - // Else it creates new backup files. - var ( - dirPath = gfile.Dir(filePath) - fileName = gfile.Name(filePath) - fileExtName = gfile.ExtName(filePath) - newFilePath = "" - ) - // Rename the logging file by adding extra datetime information to microseconds, like: - // access.log -> access.20200326101301899002.log - // access.20200326.log -> access.20200326.20200326101301899002.log - for { - var ( - now = gtime.Now() - micro = now.Microsecond() % 1000 - ) - if micro == 0 { - micro = 101 - } else { - for micro < 100 { - micro *= 10 - } - } - newFilePath = gfile.Join( - dirPath, - fmt.Sprintf( - `%s.%s%d.%s`, - fileName, now.Format("YmdHisu"), micro, fileExtName, - ), - ) - if !gfile.Exists(newFilePath) { - break - } else { - intlog.Printf(ctx, `rotation file exists, continue: %s`, newFilePath) - } - } - intlog.Printf(ctx, "rotating file by size from %s to %s", filePath, newFilePath) - if err := gfile.Rename(filePath, newFilePath); err != nil { - return err - } - return nil -} - -// rotateChecksTimely timely checks the backups expiration and the compression. -func (l *Logger) rotateChecksTimely(ctx context.Context) { - defer gtimer.AddOnce(ctx, l.config.RotateCheckInterval, l.rotateChecksTimely) - - // Checks whether file rotation not enabled. - if l.config.RotateSize <= 0 && l.config.RotateExpire == 0 { - intlog.Printf( - ctx, - "logging rotation ignore checks: RotateSize: %d, RotateExpire: %s", - l.config.RotateSize, l.config.RotateExpire.String(), - ) - return - } - - // It here uses memory lock to guarantee the concurrent safety. - memoryLockKey := memoryLockPrefixForRotating + l.config.Path - if !gmlock.TryLock(memoryLockKey) { - return - } - defer gmlock.Unlock(memoryLockKey) - - var ( - now = time.Now() - pattern = "*.log, *.gz" - files, err = gfile.ScanDirFile(l.config.Path, pattern, true) - ) - if err != nil { - intlog.Errorf(ctx, `%+v`, err) - } - intlog.Printf(ctx, "logging rotation start checks: %+v", files) - // get file name regex pattern - // access-{y-m-d}-test.log => access-$-test.log => access-\$-test\.log => access-(.+?)-test\.log - fileNameRegexPattern, _ := gregex.ReplaceString(`{.+?}`, "$", l.config.File) - fileNameRegexPattern = gregex.Quote(fileNameRegexPattern) - fileNameRegexPattern = strings.ReplaceAll(fileNameRegexPattern, "\\$", "(.+?)") - // ============================================================= - // Rotation of expired file checks. - // ============================================================= - if l.config.RotateExpire > 0 { - var ( - mtime time.Time - subDuration time.Duration - expireRotated bool - ) - for _, file := range files { - // ignore backup file - if gregex.IsMatchString(`.+\.\d{20}\.log`, gfile.Basename(file)) { - continue - } - // ignore not matching file - if !gregex.IsMatchString(fileNameRegexPattern, file) { - continue - } - mtime = gfile.MTime(file) - subDuration = now.Sub(mtime) - if subDuration > l.config.RotateExpire { - func() { - memoryLockFileKey := memoryLockPrefixForPrintingToFile + file - if !gmlock.TryLock(memoryLockFileKey) { - return - } - defer gmlock.Unlock(memoryLockFileKey) - if runtime.GOOS == "windows" { - fp := l.getOpenedFilePointer(ctx, file) - if fp == nil { - intlog.Errorf(ctx, `got nil file pointer for: %s`, file) - return - } - if err := fp.Close(true); err != nil { - intlog.Errorf(ctx, `%+v`, err) - } - } - expireRotated = true - intlog.Printf( - ctx, - `%v - %v = %v > %v, rotation expire logging file: %s`, - now, mtime, subDuration, l.config.RotateExpire, file, - ) - if err := l.doRotateFile(ctx, file); err != nil { - intlog.Errorf(ctx, `%+v`, err) - } - }() - } - } - if expireRotated { - // Update the files array. - files, err = gfile.ScanDirFile(l.config.Path, pattern, true) - if err != nil { - intlog.Errorf(ctx, `%+v`, err) - } - } - } - - // ============================================================= - // Rotated file compression. - // ============================================================= - needCompressFileArray := garray.NewStrArray() - if l.config.RotateBackupCompress > 0 { - for _, file := range files { - // Eg: access.20200326101301899002.log.gz - if gfile.ExtName(file) == "gz" { - continue - } - // ignore not matching file - originalLoggingFilePath, _ := gregex.ReplaceString(`\.\d{20}`, "", file) - if !gregex.IsMatchString(fileNameRegexPattern, originalLoggingFilePath) { - continue - } - // Eg: - // access.20200326101301899002.log - if gregex.IsMatchString(`.+\.\d{20}\.log`, gfile.Basename(file)) { - needCompressFileArray.Append(file) - } - } - if needCompressFileArray.Len() > 0 { - needCompressFileArray.Iterator(func(_ int, path string) bool { - err := gcompress.GzipFile(path, path+".gz") - if err == nil { - intlog.Printf(ctx, `compressed done, remove original logging file: %s`, path) - if err = gfile.Remove(path); err != nil { - intlog.Print(ctx, err) - } - } else { - intlog.Print(ctx, err) - } - return true - }) - // Update the files array. - files, err = gfile.ScanDirFile(l.config.Path, pattern, true) - if err != nil { - intlog.Errorf(ctx, `%+v`, err) - } - } - } - - // ============================================================= - // Backups count limitation and expiration checks. - // ============================================================= - backupFiles := garray.NewSortedArray(func(a, b interface{}) int { - // Sorted by rotated/backup file mtime. - // The older rotated/backup file is put in the head of array. - var ( - file1 = a.(string) - file2 = b.(string) - result = gfile.MTimestampMilli(file1) - gfile.MTimestampMilli(file2) - ) - if result <= 0 { - return -1 - } - return 1 - }) - if l.config.RotateBackupLimit > 0 || l.config.RotateBackupExpire > 0 { - for _, file := range files { - // ignore not matching file - originalLoggingFilePath, _ := gregex.ReplaceString(`\.\d{20}`, "", file) - if !gregex.IsMatchString(fileNameRegexPattern, originalLoggingFilePath) { - continue - } - if gregex.IsMatchString(`.+\.\d{20}\.log`, gfile.Basename(file)) { - backupFiles.Add(file) - } - } - intlog.Printf(ctx, `calculated backup files array: %+v`, backupFiles) - diff := backupFiles.Len() - l.config.RotateBackupLimit - for i := 0; i < diff; i++ { - path, _ := backupFiles.PopLeft() - intlog.Printf(ctx, `remove exceeded backup limit file: %s`, path) - if err := gfile.Remove(path.(string)); err != nil { - intlog.Errorf(ctx, `%+v`, err) - } - } - // Backups expiration checking. - if l.config.RotateBackupExpire > 0 { - var ( - mtime time.Time - subDuration time.Duration - ) - backupFiles.Iterator(func(_ int, v interface{}) bool { - path := v.(string) - mtime = gfile.MTime(path) - subDuration = now.Sub(mtime) - if subDuration > l.config.RotateBackupExpire { - intlog.Printf( - ctx, - `%v - %v = %v > %v, remove expired backup file: %s`, - now, mtime, subDuration, l.config.RotateBackupExpire, path, - ) - if err := gfile.Remove(path); err != nil { - intlog.Errorf(ctx, `%+v`, err) - } - return true - } else { - return false - } - }) - } - } -} diff --git a/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_writer.go b/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_writer.go deleted file mode 100644 index 610e5e53..00000000 --- a/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_writer.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package glog - -import ( - "bytes" - "context" -) - -// Write implements the io.Writer interface. -// It just prints the content using Print. -func (l *Logger) Write(p []byte) (n int, err error) { - l.Header(false).Print(context.TODO(), string(bytes.TrimRight(p, "\r\n"))) - return len(p), nil -} diff --git a/vendor/github.com/gogf/gf/v2/os/gmlock/gmlock.go b/vendor/github.com/gogf/gf/v2/os/gmlock/gmlock.go deleted file mode 100644 index 92be26f4..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gmlock/gmlock.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package gmlock implements a concurrent-safe memory-based locker. -package gmlock - -var ( - // Default locker. - locker = New() -) - -// Lock locks the `key` with writing lock. -// If there's a write/reading lock the `key`, -// it will blocks until the lock is released. -func Lock(key string) { - locker.Lock(key) -} - -// TryLock tries locking the `key` with writing lock, -// it returns true if success, or if there's a write/reading lock the `key`, -// it returns false. -func TryLock(key string) bool { - return locker.TryLock(key) -} - -// Unlock unlocks the writing lock of the `key`. -func Unlock(key string) { - locker.Unlock(key) -} - -// RLock locks the `key` with reading lock. -// If there's a writing lock on `key`, -// it will blocks until the writing lock is released. -func RLock(key string) { - locker.RLock(key) -} - -// TryRLock tries locking the `key` with reading lock. -// It returns true if success, or if there's a writing lock on `key`, it returns false. -func TryRLock(key string) bool { - return locker.TryRLock(key) -} - -// RUnlock unlocks the reading lock of the `key`. -func RUnlock(key string) { - locker.RUnlock(key) -} - -// LockFunc locks the `key` with writing lock and callback function `f`. -// If there's a write/reading lock the `key`, -// it will blocks until the lock is released. -// -// It releases the lock after `f` is executed. -func LockFunc(key string, f func()) { - locker.LockFunc(key, f) -} - -// RLockFunc locks the `key` with reading lock and callback function `f`. -// If there's a writing lock the `key`, -// it will blocks until the lock is released. -// -// It releases the lock after `f` is executed. -func RLockFunc(key string, f func()) { - locker.RLockFunc(key, f) -} - -// TryLockFunc locks the `key` with writing lock and callback function `f`. -// It returns true if success, or else if there's a write/reading lock the `key`, it return false. -// -// It releases the lock after `f` is executed. -func TryLockFunc(key string, f func()) bool { - return locker.TryLockFunc(key, f) -} - -// TryRLockFunc locks the `key` with reading lock and callback function `f`. -// It returns true if success, or else if there's a writing lock the `key`, it returns false. -// -// It releases the lock after `f` is executed. -func TryRLockFunc(key string, f func()) bool { - return locker.TryRLockFunc(key, f) -} - -// Remove removes mutex with given `key`. -func Remove(key string) { - locker.Remove(key) -} diff --git a/vendor/github.com/gogf/gf/v2/os/gmlock/gmlock_locker.go b/vendor/github.com/gogf/gf/v2/os/gmlock/gmlock_locker.go deleted file mode 100644 index b802d76f..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gmlock/gmlock_locker.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gmlock - -import ( - "sync" - - "github.com/gogf/gf/v2/container/gmap" -) - -// Locker is a memory based locker. -// Note that there's no cache expire mechanism for mutex in locker. -// You need remove certain mutex manually when you do not want use it anymore. -type Locker struct { - m *gmap.StrAnyMap -} - -// New creates and returns a new memory locker. -// A memory locker can lock/unlock with dynamic string key. -func New() *Locker { - return &Locker{ - m: gmap.NewStrAnyMap(true), - } -} - -// Lock locks the `key` with writing lock. -// If there's a write/reading lock the `key`, -// it will block until the lock is released. -func (l *Locker) Lock(key string) { - l.getOrNewMutex(key).Lock() -} - -// TryLock tries locking the `key` with writing lock, -// it returns true if success, or it returns false if there's a writing/reading lock the `key`. -func (l *Locker) TryLock(key string) bool { - return l.getOrNewMutex(key).TryLock() -} - -// Unlock unlocks the writing lock of the `key`. -func (l *Locker) Unlock(key string) { - if v := l.m.Get(key); v != nil { - v.(*sync.RWMutex).Unlock() - } -} - -// RLock locks the `key` with reading lock. -// If there's a writing lock on `key`, -// it will blocks until the writing lock is released. -func (l *Locker) RLock(key string) { - l.getOrNewMutex(key).RLock() -} - -// TryRLock tries locking the `key` with reading lock. -// It returns true if success, or if there's a writing lock on `key`, it returns false. -func (l *Locker) TryRLock(key string) bool { - return l.getOrNewMutex(key).TryRLock() -} - -// RUnlock unlocks the reading lock of the `key`. -func (l *Locker) RUnlock(key string) { - if v := l.m.Get(key); v != nil { - v.(*sync.RWMutex).RUnlock() - } -} - -// LockFunc locks the `key` with writing lock and callback function `f`. -// If there's a write/reading lock the `key`, -// it will block until the lock is released. -// -// It releases the lock after `f` is executed. -func (l *Locker) LockFunc(key string, f func()) { - l.Lock(key) - defer l.Unlock(key) - f() -} - -// RLockFunc locks the `key` with reading lock and callback function `f`. -// If there's a writing lock the `key`, -// it will block until the lock is released. -// -// It releases the lock after `f` is executed. -func (l *Locker) RLockFunc(key string, f func()) { - l.RLock(key) - defer l.RUnlock(key) - f() -} - -// TryLockFunc locks the `key` with writing lock and callback function `f`. -// It returns true if success, or else if there's a write/reading lock the `key`, it return false. -// -// It releases the lock after `f` is executed. -func (l *Locker) TryLockFunc(key string, f func()) bool { - if l.TryLock(key) { - defer l.Unlock(key) - f() - return true - } - return false -} - -// TryRLockFunc locks the `key` with reading lock and callback function `f`. -// It returns true if success, or else if there's a writing lock the `key`, it returns false. -// -// It releases the lock after `f` is executed. -func (l *Locker) TryRLockFunc(key string, f func()) bool { - if l.TryRLock(key) { - defer l.RUnlock(key) - f() - return true - } - return false -} - -// Remove removes mutex with given `key` from locker. -func (l *Locker) Remove(key string) { - l.m.Remove(key) -} - -// Clear removes all mutexes from locker. -func (l *Locker) Clear() { - l.m.Clear() -} - -// getOrNewMutex returns the mutex of given `key` if it exists, -// or else creates and returns a new one. -func (l *Locker) getOrNewMutex(key string) *sync.RWMutex { - return l.m.GetOrSetFuncLock(key, func() interface{} { - return &sync.RWMutex{} - }).(*sync.RWMutex) -} diff --git a/vendor/github.com/gogf/gf/v2/os/grpool/grpool.go b/vendor/github.com/gogf/gf/v2/os/grpool/grpool.go deleted file mode 100644 index aac2f432..00000000 --- a/vendor/github.com/gogf/gf/v2/os/grpool/grpool.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package grpool implements a goroutine reusable pool. -package grpool - -import ( - "context" - "time" - - "github.com/gogf/gf/v2/container/glist" - "github.com/gogf/gf/v2/container/gtype" - "github.com/gogf/gf/v2/errors/gcode" - "github.com/gogf/gf/v2/errors/gerror" - "github.com/gogf/gf/v2/os/gtimer" - "github.com/gogf/gf/v2/util/grand" -) - -// Func is the pool function which contains context parameter. -type Func func(ctx context.Context) - -// RecoverFunc is the pool runtime panic recover function which contains context parameter. -type RecoverFunc func(ctx context.Context, err error) - -// Pool manages the goroutines using pool. -type Pool struct { - limit int // Max goroutine count limit. - count *gtype.Int // Current running goroutine count. - list *glist.List // List for asynchronous job adding purpose. - closed *gtype.Bool // Is pool closed or not. -} - -type localPoolItem struct { - Ctx context.Context - Func Func -} - -const ( - minTimerDuration = 500 * time.Millisecond - maxTimerDuration = 1500 * time.Millisecond -) - -// Default goroutine pool. -var ( - pool = New() -) - -// New creates and returns a new goroutine pool object. -// The parameter `limit` is used to limit the max goroutine count, -// which is not limited in default. -func New(limit ...int) *Pool { - p := &Pool{ - limit: -1, - count: gtype.NewInt(), - list: glist.New(true), - closed: gtype.NewBool(), - } - if len(limit) > 0 && limit[0] > 0 { - p.limit = limit[0] - } - timerDuration := grand.D(minTimerDuration, maxTimerDuration) - gtimer.Add(context.Background(), timerDuration, p.supervisor) - return p -} - -// Add pushes a new job to the pool using default goroutine pool. -// The job will be executed asynchronously. -func Add(ctx context.Context, f Func) error { - return pool.Add(ctx, f) -} - -// AddWithRecover pushes a new job to the pool with specified recover function. -// The optional `recoverFunc` is called when any panic during executing of `userFunc`. -// If `recoverFunc` is not passed or given nil, it ignores the panic from `userFunc`. -// The job will be executed asynchronously. -func AddWithRecover(ctx context.Context, userFunc Func, recoverFunc RecoverFunc) error { - return pool.AddWithRecover(ctx, userFunc, recoverFunc) -} - -// Size returns current goroutine count of default goroutine pool. -func Size() int { - return pool.Size() -} - -// Jobs returns current job count of default goroutine pool. -func Jobs() int { - return pool.Jobs() -} - -// Add pushes a new job to the pool. -// The job will be executed asynchronously. -func (p *Pool) Add(ctx context.Context, f Func) error { - for p.closed.Val() { - return gerror.NewCode( - gcode.CodeInvalidOperation, - "goroutine pool is already closed", - ) - } - p.list.PushFront(&localPoolItem{ - Ctx: ctx, - Func: f, - }) - // Check and fork new worker. - p.checkAndFork() - return nil -} - -// checkAndFork checks and creates a new goroutine worker. -// Note that the worker dies if the job function panics and the job has no recover handling. -func (p *Pool) checkAndFork() { - // Check whether fork new goroutine or not. - var n int - for { - n = p.count.Val() - if p.limit != -1 && n >= p.limit { - // No need fork new goroutine. - return - } - if p.count.Cas(n, n+1) { - // Use CAS to guarantee atomicity. - break - } - } - // Create job function in goroutine. - go func() { - defer p.count.Add(-1) - - var ( - listItem interface{} - poolItem *localPoolItem - ) - for !p.closed.Val() { - listItem = p.list.PopBack() - if listItem == nil { - return - } - poolItem = listItem.(*localPoolItem) - poolItem.Func(poolItem.Ctx) - } - }() -} - -// AddWithRecover pushes a new job to the pool with specified recover function. -// The optional `recoverFunc` is called when any panic during executing of `userFunc`. -// If `recoverFunc` is not passed or given nil, it ignores the panic from `userFunc`. -// The job will be executed asynchronously. -func (p *Pool) AddWithRecover(ctx context.Context, userFunc Func, recoverFunc RecoverFunc) error { - return p.Add(ctx, func(ctx context.Context) { - defer func() { - if exception := recover(); exception != nil { - if recoverFunc != nil { - if v, ok := exception.(error); ok && gerror.HasStack(v) { - recoverFunc(ctx, v) - } else { - recoverFunc(ctx, gerror.NewCodef(gcode.CodeInternalPanic, "%+v", exception)) - } - } - } - }() - userFunc(ctx) - }) -} - -// Cap returns the capacity of the pool. -// This capacity is defined when pool is created. -// It returns -1 if there's no limit. -func (p *Pool) Cap() int { - return p.limit -} - -// Size returns current goroutine count of the pool. -func (p *Pool) Size() int { - return p.count.Val() -} - -// Jobs returns current job count of the pool. -// Note that, it does not return worker/goroutine count but the job/task count. -func (p *Pool) Jobs() int { - return p.list.Size() -} - -// IsClosed returns if pool is closed. -func (p *Pool) IsClosed() bool { - return p.closed.Val() -} - -// Close closes the goroutine pool, which makes all goroutines exit. -func (p *Pool) Close() { - p.closed.Set(true) -} diff --git a/vendor/github.com/gogf/gf/v2/os/grpool/grpool_supervisor.go b/vendor/github.com/gogf/gf/v2/os/grpool/grpool_supervisor.go deleted file mode 100644 index d57e9695..00000000 --- a/vendor/github.com/gogf/gf/v2/os/grpool/grpool_supervisor.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package grpool - -import ( - "context" - - "github.com/gogf/gf/v2/os/gtimer" -) - -// supervisor checks the job list and fork new worker goroutine to handle the job -// if there are jobs but no workers in pool. -func (p *Pool) supervisor(ctx context.Context) { - if p.IsClosed() { - gtimer.Exit() - } - if p.list.Size() > 0 && p.count.Val() == 0 { - var number = p.list.Size() - if p.limit > 0 { - number = p.limit - } - for i := 0; i < number; i++ { - p.checkAndFork() - } - } -} diff --git a/vendor/github.com/gogf/gf/v2/os/gstructs/gstructs.go b/vendor/github.com/gogf/gf/v2/os/gstructs/gstructs.go deleted file mode 100644 index 09f911f2..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gstructs/gstructs.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package gstructs provides functions for struct information retrieving. -package gstructs - -import ( - "reflect" -) - -// Type wraps reflect.Type for additional features. -type Type struct { - reflect.Type -} - -// Field contains information of a struct field . -type Field struct { - Value reflect.Value // The underlying value of the field. - Field reflect.StructField // The underlying field of the field. - - // Retrieved tag name. It depends TagValue. - TagName string - - // Retrieved tag value. - // There might be more than one tags in the field, but only one can be retrieved according to calling function rules. - TagValue string -} - -// FieldsInput is the input parameter struct type for function Fields. -type FieldsInput struct { - // Pointer should be type of struct/*struct. - Pointer interface{} - - // RecursiveOption specifies the way retrieving the fields recursively if the attribute - // is an embedded struct. It is RecursiveOptionNone in default. - RecursiveOption RecursiveOption -} - -// FieldMapInput is the input parameter struct type for function FieldMap. -type FieldMapInput struct { - // Pointer should be type of struct/*struct. - Pointer interface{} - - // PriorityTagArray specifies the priority tag array for retrieving from high to low. - // If it's given `nil`, it returns map[name]Field, of which the `name` is attribute name. - PriorityTagArray []string - - // RecursiveOption specifies the way retrieving the fields recursively if the attribute - // is an embedded struct. It is RecursiveOptionNone in default. - RecursiveOption RecursiveOption -} - -type RecursiveOption int - -const ( - RecursiveOptionNone RecursiveOption = 0 // No recursively retrieving fields as map if the field is an embedded struct. - RecursiveOptionEmbedded RecursiveOption = 1 // Recursively retrieving fields as map if the field is an embedded struct. - RecursiveOptionEmbeddedNoTag RecursiveOption = 2 // Recursively retrieving fields as map if the field is an embedded struct and the field has no tag. -) diff --git a/vendor/github.com/gogf/gf/v2/os/gstructs/gstructs_field.go b/vendor/github.com/gogf/gf/v2/os/gstructs/gstructs_field.go deleted file mode 100644 index e1d68603..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gstructs/gstructs_field.go +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gstructs - -import ( - "reflect" - - "github.com/gogf/gf/v2/internal/utils" - "github.com/gogf/gf/v2/util/gtag" -) - -// Tag returns the value associated with key in the tag string. If there is no -// such key in the tag, Tag returns the empty string. -func (f *Field) Tag(key string) string { - s := f.Field.Tag.Get(key) - if s != "" { - s = gtag.Parse(s) - } - return s -} - -// TagLookup returns the value associated with key in the tag string. -// If the key is present in the tag the value (which may be empty) -// is returned. Otherwise, the returned value will be the empty string. -// The ok return value reports whether the value was explicitly set in -// the tag string. If the tag does not have the conventional format, -// the value returned by Lookup is unspecified. -func (f *Field) TagLookup(key string) (value string, ok bool) { - value, ok = f.Field.Tag.Lookup(key) - if ok && value != "" { - value = gtag.Parse(value) - } - return -} - -// IsEmbedded returns true if the given field is an anonymous field (embedded) -func (f *Field) IsEmbedded() bool { - return f.Field.Anonymous -} - -// TagStr returns the tag string of the field. -func (f *Field) TagStr() string { - return string(f.Field.Tag) -} - -// TagMap returns all the tag of the field along with its value string as map. -func (f *Field) TagMap() map[string]string { - var ( - data = ParseTag(f.TagStr()) - ) - for k, v := range data { - data[k] = utils.StripSlashes(gtag.Parse(v)) - } - return data -} - -// IsExported returns true if the given field is exported. -func (f *Field) IsExported() bool { - return f.Field.PkgPath == "" -} - -// Name returns the name of the given field. -func (f *Field) Name() string { - return f.Field.Name -} - -// Type returns the type of the given field. -// Note that this Type is not reflect.Type. If you need reflect.Type, please use Field.Type().Type. -func (f *Field) Type() Type { - return Type{ - Type: f.Field.Type, - } -} - -// Kind returns the reflect.Kind for Value of Field `f`. -func (f *Field) Kind() reflect.Kind { - return f.Value.Kind() -} - -// OriginalKind retrieves and returns the original reflect.Kind for Value of Field `f`. -func (f *Field) OriginalKind() reflect.Kind { - var ( - reflectType = f.Value.Type() - reflectKind = reflectType.Kind() - ) - for reflectKind == reflect.Ptr { - reflectType = reflectType.Elem() - reflectKind = reflectType.Kind() - } - return reflectKind -} - -// Fields retrieves and returns the fields of `pointer` as slice. -func Fields(in FieldsInput) ([]Field, error) { - var ( - ok bool - fieldFilterMap = make(map[string]struct{}) - retrievedFields = make([]Field, 0) - currentLevelFieldMap = make(map[string]Field) - ) - rangeFields, err := getFieldValues(in.Pointer) - if err != nil { - return nil, err - } - - for index := 0; index < len(rangeFields); index++ { - field := rangeFields[index] - currentLevelFieldMap[field.Name()] = field - } - - for index := 0; index < len(rangeFields); index++ { - field := rangeFields[index] - if _, ok = fieldFilterMap[field.Name()]; ok { - continue - } - if field.IsEmbedded() { - if in.RecursiveOption != RecursiveOptionNone { - switch in.RecursiveOption { - case RecursiveOptionEmbeddedNoTag: - if field.TagStr() != "" { - break - } - fallthrough - - case RecursiveOptionEmbedded: - structFields, err := Fields(FieldsInput{ - Pointer: field.Value, - RecursiveOption: in.RecursiveOption, - }) - if err != nil { - return nil, err - } - // The current level fields can overwrite the sub-struct fields with the same name. - for i := 0; i < len(structFields); i++ { - var ( - structField = structFields[i] - fieldName = structField.Name() - ) - if _, ok = fieldFilterMap[fieldName]; ok { - continue - } - fieldFilterMap[fieldName] = struct{}{} - if v, ok := currentLevelFieldMap[fieldName]; !ok { - retrievedFields = append(retrievedFields, structField) - } else { - retrievedFields = append(retrievedFields, v) - } - } - continue - } - } - continue - } - fieldFilterMap[field.Name()] = struct{}{} - retrievedFields = append(retrievedFields, field) - } - return retrievedFields, nil -} - -// FieldMap retrieves and returns struct field as map[name/tag]Field from `pointer`. -// -// The parameter `pointer` should be type of struct/*struct. -// -// The parameter `priority` specifies the priority tag array for retrieving from high to low. -// If it's given `nil`, it returns map[name]Field, of which the `name` is attribute name. -// -// The parameter `recursive` specifies the whether retrieving the fields recursively if the attribute -// is an embedded struct. -// -// Note that it only retrieves the exported attributes with first letter upper-case from struct. -func FieldMap(in FieldMapInput) (map[string]Field, error) { - fields, err := getFieldValues(in.Pointer) - if err != nil { - return nil, err - } - var ( - tagValue string - mapField = make(map[string]Field) - ) - for _, field := range fields { - // Only retrieve exported attributes. - if !field.IsExported() { - continue - } - tagValue = "" - for _, p := range in.PriorityTagArray { - tagValue = field.Tag(p) - if tagValue != "" && tagValue != "-" { - break - } - } - tempField := field - tempField.TagValue = tagValue - if tagValue != "" { - mapField[tagValue] = tempField - } else { - if in.RecursiveOption != RecursiveOptionNone && field.IsEmbedded() { - switch in.RecursiveOption { - case RecursiveOptionEmbeddedNoTag: - if field.TagStr() != "" { - mapField[field.Name()] = tempField - break - } - fallthrough - - case RecursiveOptionEmbedded: - m, err := FieldMap(FieldMapInput{ - Pointer: field.Value, - PriorityTagArray: in.PriorityTagArray, - RecursiveOption: in.RecursiveOption, - }) - if err != nil { - return nil, err - } - for k, v := range m { - if _, ok := mapField[k]; !ok { - tempV := v - mapField[k] = tempV - } - } - } - } else { - mapField[field.Name()] = tempField - } - } - } - return mapField, nil -} diff --git a/vendor/github.com/gogf/gf/v2/os/gstructs/gstructs_field_tag.go b/vendor/github.com/gogf/gf/v2/os/gstructs/gstructs_field_tag.go deleted file mode 100644 index 2fd03978..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gstructs/gstructs_field_tag.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gstructs - -import ( - "strings" - - "github.com/gogf/gf/v2/util/gtag" -) - -// TagJsonName returns the `json` tag name string of the field. -func (f *Field) TagJsonName() string { - if jsonTag := f.Tag(gtag.Json); jsonTag != "" { - return strings.Split(jsonTag, ",")[0] - } - return "" -} - -// TagDefault returns the most commonly used tag `default/d` value of the field. -func (f *Field) TagDefault() string { - v := f.Tag(gtag.Default) - if v == "" { - v = f.Tag(gtag.DefaultShort) - } - return v -} - -// TagParam returns the most commonly used tag `param/p` value of the field. -func (f *Field) TagParam() string { - v := f.Tag(gtag.Param) - if v == "" { - v = f.Tag(gtag.ParamShort) - } - return v -} - -// TagValid returns the most commonly used tag `valid/v` value of the field. -func (f *Field) TagValid() string { - v := f.Tag(gtag.Valid) - if v == "" { - v = f.Tag(gtag.ValidShort) - } - return v -} - -// TagDescription returns the most commonly used tag `description/des/dc` value of the field. -func (f *Field) TagDescription() string { - v := f.Tag(gtag.Description) - if v == "" { - v = f.Tag(gtag.DescriptionShort) - } - if v == "" { - v = f.Tag(gtag.DescriptionShort2) - } - return v -} - -// TagSummary returns the most commonly used tag `summary/sum/sm` value of the field. -func (f *Field) TagSummary() string { - v := f.Tag(gtag.Summary) - if v == "" { - v = f.Tag(gtag.SummaryShort) - } - if v == "" { - v = f.Tag(gtag.SummaryShort2) - } - return v -} - -// TagAdditional returns the most commonly used tag `additional/ad` value of the field. -func (f *Field) TagAdditional() string { - v := f.Tag(gtag.Additional) - if v == "" { - v = f.Tag(gtag.AdditionalShort) - } - return v -} - -// TagExample returns the most commonly used tag `example/eg` value of the field. -func (f *Field) TagExample() string { - v := f.Tag(gtag.Example) - if v == "" { - v = f.Tag(gtag.ExampleShort) - } - return v -} diff --git a/vendor/github.com/gogf/gf/v2/os/gstructs/gstructs_tag.go b/vendor/github.com/gogf/gf/v2/os/gstructs/gstructs_tag.go deleted file mode 100644 index 72cbd081..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gstructs/gstructs_tag.go +++ /dev/null @@ -1,225 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gstructs - -import ( - "reflect" - "strconv" - - "github.com/gogf/gf/v2/errors/gcode" - "github.com/gogf/gf/v2/errors/gerror" - "github.com/gogf/gf/v2/util/gtag" -) - -// ParseTag parses tag string into map. -// For example: -// ParseTag(`v:"required" p:"id" d:"1"`) => map[v:required p:id d:1]. -func ParseTag(tag string) map[string]string { - var ( - key string - data = make(map[string]string) - ) - for tag != "" { - // Skip leading space. - i := 0 - for i < len(tag) && tag[i] == ' ' { - i++ - } - tag = tag[i:] - if tag == "" { - break - } - // Scan to colon. A space, a quote or a control character is a syntax error. - // Strictly speaking, control chars include the range [0x7f, 0x9f], not just - // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters - // as it is simpler to inspect the tag's bytes than the tag's runes. - i = 0 - for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f { - i++ - } - if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' { - break - } - key = tag[:i] - tag = tag[i+1:] - - // Scan quoted string to find value. - i = 1 - for i < len(tag) && tag[i] != '"' { - if tag[i] == '\\' { - i++ - } - i++ - } - if i >= len(tag) { - break - } - quotedValue := tag[:i+1] - tag = tag[i+1:] - value, err := strconv.Unquote(quotedValue) - if err != nil { - panic(gerror.WrapCodef(gcode.CodeInvalidParameter, err, `error parsing tag "%s"`, tag)) - } - data[key] = gtag.Parse(value) - } - return data -} - -// TagFields retrieves and returns struct tags as []Field from `pointer`. -// -// The parameter `pointer` should be type of struct/*struct. -// -// Note that, -// 1. It only retrieves the exported attributes with first letter upper-case from struct. -// 2. The parameter `priority` should be given, it only retrieves fields that has given tag. -func TagFields(pointer interface{}, priority []string) ([]Field, error) { - return getFieldValuesByTagPriority(pointer, priority, map[string]struct{}{}) -} - -// TagMapName retrieves and returns struct tags as map[tag]attribute from `pointer`. -// -// The parameter `pointer` should be type of struct/*struct. -// -// Note that, -// 1. It only retrieves the exported attributes with first letter upper-case from struct. -// 2. The parameter `priority` should be given, it only retrieves fields that has given tag. -// 3. If one field has no specified tag, it uses its field name as result map key. -func TagMapName(pointer interface{}, priority []string) (map[string]string, error) { - fields, err := TagFields(pointer, priority) - if err != nil { - return nil, err - } - tagMap := make(map[string]string, len(fields)) - for _, field := range fields { - tagMap[field.TagValue] = field.Name() - } - return tagMap, nil -} - -// TagMapField retrieves struct tags as map[tag]Field from `pointer`, and returns it. -// The parameter `object` should be either type of struct/*struct/[]struct/[]*struct. -// -// Note that, -// 1. It only retrieves the exported attributes with first letter upper-case from struct. -// 2. The parameter `priority` should be given, it only retrieves fields that has given tag. -// 3. If one field has no specified tag, it uses its field name as result map key. -func TagMapField(object interface{}, priority []string) (map[string]Field, error) { - fields, err := TagFields(object, priority) - if err != nil { - return nil, err - } - tagMap := make(map[string]Field, len(fields)) - for _, field := range fields { - tagField := field - tagMap[field.TagValue] = tagField - } - return tagMap, nil -} - -func getFieldValues(value interface{}) ([]Field, error) { - var ( - reflectValue reflect.Value - reflectKind reflect.Kind - ) - if v, ok := value.(reflect.Value); ok { - reflectValue = v - reflectKind = reflectValue.Kind() - } else { - reflectValue = reflect.ValueOf(value) - reflectKind = reflectValue.Kind() - } - for { - switch reflectKind { - case reflect.Ptr: - if !reflectValue.IsValid() || reflectValue.IsNil() { - // If pointer is type of *struct and nil, then automatically create a temporary struct. - reflectValue = reflect.New(reflectValue.Type().Elem()).Elem() - reflectKind = reflectValue.Kind() - } else { - reflectValue = reflectValue.Elem() - reflectKind = reflectValue.Kind() - } - case reflect.Array, reflect.Slice: - reflectValue = reflect.New(reflectValue.Type().Elem()).Elem() - reflectKind = reflectValue.Kind() - default: - goto exitLoop - } - } - -exitLoop: - for reflectKind == reflect.Ptr { - reflectValue = reflectValue.Elem() - reflectKind = reflectValue.Kind() - } - if reflectKind != reflect.Struct { - return nil, gerror.NewCode( - gcode.CodeInvalidParameter, - "given value should be either type of struct/*struct/[]struct/[]*struct", - ) - } - var ( - structType = reflectValue.Type() - length = reflectValue.NumField() - fields = make([]Field, length) - ) - for i := 0; i < length; i++ { - fields[i] = Field{ - Value: reflectValue.Field(i), - Field: structType.Field(i), - } - } - return fields, nil -} - -func getFieldValuesByTagPriority( - pointer interface{}, priority []string, repeatedTagFilteringMap map[string]struct{}, -) ([]Field, error) { - fields, err := getFieldValues(pointer) - if err != nil { - return nil, err - } - var ( - tagName string - tagValue string - tagFields = make([]Field, 0) - ) - for _, field := range fields { - // Only retrieve exported attributes. - if !field.IsExported() { - continue - } - tagValue = "" - for _, p := range priority { - tagName = p - tagValue = field.Tag(p) - if tagValue != "" && tagValue != "-" { - break - } - } - if tagValue != "" { - // Filter repeated tag. - if _, ok := repeatedTagFilteringMap[tagValue]; ok { - continue - } - tagField := field - tagField.TagName = tagName - tagField.TagValue = tagValue - tagFields = append(tagFields, tagField) - } - // If this is an embedded attribute, it retrieves the tags recursively. - if field.IsEmbedded() && field.OriginalKind() == reflect.Struct { - subTagFields, err := getFieldValuesByTagPriority(field.Value, priority, repeatedTagFilteringMap) - if err != nil { - return nil, err - } else { - tagFields = append(tagFields, subTagFields...) - } - } - } - return tagFields, nil -} diff --git a/vendor/github.com/gogf/gf/v2/os/gstructs/gstructs_type.go b/vendor/github.com/gogf/gf/v2/os/gstructs/gstructs_type.go deleted file mode 100644 index 82d24de6..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gstructs/gstructs_type.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gstructs - -import ( - "reflect" - - "github.com/gogf/gf/v2/errors/gerror" -) - -// StructType retrieves and returns the struct Type of specified struct/*struct. -// The parameter `object` should be either type of struct/*struct/[]struct/[]*struct. -func StructType(object interface{}) (*Type, error) { - var ( - reflectValue reflect.Value - reflectKind reflect.Kind - reflectType reflect.Type - ) - if rv, ok := object.(reflect.Value); ok { - reflectValue = rv - } else { - reflectValue = reflect.ValueOf(object) - } - reflectKind = reflectValue.Kind() - for { - switch reflectKind { - case reflect.Ptr: - if !reflectValue.IsValid() || reflectValue.IsNil() { - // If pointer is type of *struct and nil, then automatically create a temporary struct. - reflectValue = reflect.New(reflectValue.Type().Elem()).Elem() - reflectKind = reflectValue.Kind() - } else { - reflectValue = reflectValue.Elem() - reflectKind = reflectValue.Kind() - } - - case reflect.Array, reflect.Slice: - reflectValue = reflect.New(reflectValue.Type().Elem()).Elem() - reflectKind = reflectValue.Kind() - - default: - goto exitLoop - } - } - -exitLoop: - if reflectKind != reflect.Struct { - return nil, gerror.Newf( - `invalid object kind "%s", kind of "struct" is required`, - reflectKind, - ) - } - reflectType = reflectValue.Type() - return &Type{ - Type: reflectType, - }, nil -} - -// Signature returns a unique string as this type. -func (t Type) Signature() string { - return t.PkgPath() + "/" + t.String() -} - -// FieldKeys returns the keys of current struct/map. -func (t Type) FieldKeys() []string { - keys := make([]string, t.NumField()) - for i := 0; i < t.NumField(); i++ { - keys[i] = t.Field(i).Name - } - return keys -} diff --git a/vendor/github.com/gogf/gf/v2/os/gtime/gtime.go b/vendor/github.com/gogf/gf/v2/os/gtime/gtime.go deleted file mode 100644 index 5cbe0020..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gtime/gtime.go +++ /dev/null @@ -1,452 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package gtime provides functionality for measuring and displaying time. -// -// This package should keep much less dependencies with other packages. -package gtime - -import ( - "context" - "fmt" - "regexp" - "strconv" - "strings" - "time" - - "github.com/gogf/gf/v2/errors/gcode" - "github.com/gogf/gf/v2/errors/gerror" - "github.com/gogf/gf/v2/internal/intlog" - "github.com/gogf/gf/v2/internal/utils" - "github.com/gogf/gf/v2/text/gregex" -) - -const ( - // Short writes for common usage durations. - - D = 24 * time.Hour - H = time.Hour - M = time.Minute - S = time.Second - MS = time.Millisecond - US = time.Microsecond - NS = time.Nanosecond - - // Regular expression1(datetime separator supports '-', '/', '.'). - // Eg: - // "2017-12-14 04:51:34 +0805 LMT", - // "2017-12-14 04:51:34 +0805 LMT", - // "2006-01-02T15:04:05Z07:00", - // "2014-01-17T01:19:15+08:00", - // "2018-02-09T20:46:17.897Z", - // "2018-02-09 20:46:17.897", - // "2018-02-09T20:46:17Z", - // "2018-02-09 20:46:17", - // "2018/10/31 - 16:38:46" - // "2018-02-09", - // "2018.02.09", - timeRegexPattern1 = `(\d{4}[-/\.]\d{1,2}[-/\.]\d{1,2})[:\sT-]*(\d{0,2}:{0,1}\d{0,2}:{0,1}\d{0,2}){0,1}\.{0,1}(\d{0,9})([\sZ]{0,1})([\+-]{0,1})([:\d]*)` - - // Regular expression2(datetime separator supports '-', '/', '.'). - // Eg: - // 01-Nov-2018 11:50:28 - // 01/Nov/2018 11:50:28 - // 01.Nov.2018 11:50:28 - // 01.Nov.2018:11:50:28 - timeRegexPattern2 = `(\d{1,2}[-/\.][A-Za-z]{3,}[-/\.]\d{4})[:\sT-]*(\d{0,2}:{0,1}\d{0,2}:{0,1}\d{0,2}){0,1}\.{0,1}(\d{0,9})([\sZ]{0,1})([\+-]{0,1})([:\d]*)` - - // Regular expression3(time). - // Eg: - // 11:50:28 - // 11:50:28.897 - timeRegexPattern3 = `(\d{2}):(\d{2}):(\d{2})\.{0,1}(\d{0,9})` -) - -var ( - // It's more high performance using regular expression - // than time.ParseInLocation to parse the datetime string. - timeRegex1, _ = regexp.Compile(timeRegexPattern1) - timeRegex2, _ = regexp.Compile(timeRegexPattern2) - timeRegex3, _ = regexp.Compile(timeRegexPattern3) - - // Month words to arabic numerals mapping. - monthMap = map[string]int{ - "jan": 1, - "feb": 2, - "mar": 3, - "apr": 4, - "may": 5, - "jun": 6, - "jul": 7, - "aug": 8, - "sep": 9, - "sept": 9, - "oct": 10, - "nov": 11, - "dec": 12, - "january": 1, - "february": 2, - "march": 3, - "april": 4, - "june": 6, - "july": 7, - "august": 8, - "september": 9, - "october": 10, - "november": 11, - "december": 12, - } -) - -// Timestamp retrieves and returns the timestamp in seconds. -func Timestamp() int64 { - return Now().Timestamp() -} - -// TimestampMilli retrieves and returns the timestamp in milliseconds. -func TimestampMilli() int64 { - return Now().TimestampMilli() -} - -// TimestampMicro retrieves and returns the timestamp in microseconds. -func TimestampMicro() int64 { - return Now().TimestampMicro() -} - -// TimestampNano retrieves and returns the timestamp in nanoseconds. -func TimestampNano() int64 { - return Now().TimestampNano() -} - -// TimestampStr is a convenience method which retrieves and returns -// the timestamp in seconds as string. -func TimestampStr() string { - return Now().TimestampStr() -} - -// TimestampMilliStr is a convenience method which retrieves and returns -// the timestamp in milliseconds as string. -func TimestampMilliStr() string { - return Now().TimestampMilliStr() -} - -// TimestampMicroStr is a convenience method which retrieves and returns -// the timestamp in microseconds as string. -func TimestampMicroStr() string { - return Now().TimestampMicroStr() -} - -// TimestampNanoStr is a convenience method which retrieves and returns -// the timestamp in nanoseconds as string. -func TimestampNanoStr() string { - return Now().TimestampNanoStr() -} - -// Date returns current date in string like "2006-01-02". -func Date() string { - return time.Now().Format("2006-01-02") -} - -// Datetime returns current datetime in string like "2006-01-02 15:04:05". -func Datetime() string { - return time.Now().Format("2006-01-02 15:04:05") -} - -// ISO8601 returns current datetime in ISO8601 format like "2006-01-02T15:04:05-07:00". -func ISO8601() string { - return time.Now().Format("2006-01-02T15:04:05-07:00") -} - -// RFC822 returns current datetime in RFC822 format like "Mon, 02 Jan 06 15:04 MST". -func RFC822() string { - return time.Now().Format("Mon, 02 Jan 06 15:04 MST") -} - -// parseDateStr parses the string to year, month and day numbers. -func parseDateStr(s string) (year, month, day int) { - array := strings.Split(s, "-") - if len(array) < 3 { - array = strings.Split(s, "/") - } - if len(array) < 3 { - array = strings.Split(s, ".") - } - // Parsing failed. - if len(array) < 3 { - return - } - // Checking the year in head or tail. - if utils.IsNumeric(array[1]) { - year, _ = strconv.Atoi(array[0]) - month, _ = strconv.Atoi(array[1]) - day, _ = strconv.Atoi(array[2]) - } else { - if v, ok := monthMap[strings.ToLower(array[1])]; ok { - month = v - } else { - return - } - year, _ = strconv.Atoi(array[2]) - day, _ = strconv.Atoi(array[0]) - } - return -} - -// StrToTime converts string to *Time object. It also supports timestamp string. -// The parameter `format` is unnecessary, which specifies the format for converting like "Y-m-d H:i:s". -// If `format` is given, it acts as same as function StrToTimeFormat. -// If `format` is not given, it converts string as a "standard" datetime string. -// Note that, it fails and returns error if there's no date string in `str`. -func StrToTime(str string, format ...string) (*Time, error) { - if str == "" { - return &Time{wrapper{time.Time{}}}, nil - } - if len(format) > 0 { - return StrToTimeFormat(str, format[0]) - } - if isTimestampStr(str) { - timestamp, _ := strconv.ParseInt(str, 10, 64) - return NewFromTimeStamp(timestamp), nil - } - var ( - year, month, day int - hour, min, sec, nsec int - match []string - local = time.Local - ) - if match = timeRegex1.FindStringSubmatch(str); len(match) > 0 && match[1] != "" { - year, month, day = parseDateStr(match[1]) - } else if match = timeRegex2.FindStringSubmatch(str); len(match) > 0 && match[1] != "" { - year, month, day = parseDateStr(match[1]) - } else if match = timeRegex3.FindStringSubmatch(str); len(match) > 0 && match[1] != "" { - s := strings.ReplaceAll(match[2], ":", "") - if len(s) < 6 { - s += strings.Repeat("0", 6-len(s)) - } - hour, _ = strconv.Atoi(match[1]) - min, _ = strconv.Atoi(match[2]) - sec, _ = strconv.Atoi(match[3]) - nsec, _ = strconv.Atoi(match[4]) - for i := 0; i < 9-len(match[4]); i++ { - nsec *= 10 - } - return NewFromTime(time.Date(0, time.Month(1), 1, hour, min, sec, nsec, local)), nil - } else { - return nil, gerror.NewCodef(gcode.CodeInvalidParameter, `unsupported time converting for string "%s"`, str) - } - - // Time - if len(match[2]) > 0 { - s := strings.ReplaceAll(match[2], ":", "") - if len(s) < 6 { - s += strings.Repeat("0", 6-len(s)) - } - hour, _ = strconv.Atoi(s[0:2]) - min, _ = strconv.Atoi(s[2:4]) - sec, _ = strconv.Atoi(s[4:6]) - } - // Nanoseconds, check and perform bits filling - if len(match[3]) > 0 { - nsec, _ = strconv.Atoi(match[3]) - for i := 0; i < 9-len(match[3]); i++ { - nsec *= 10 - } - } - // If there's zone information in the string, - // it then performs time zone conversion, which converts the time zone to UTC. - if match[4] != "" && match[6] == "" { - match[6] = "000000" - } - // If there's offset in the string, it then firstly processes the offset. - if match[6] != "" { - zone := strings.ReplaceAll(match[6], ":", "") - zone = strings.TrimLeft(zone, "+-") - if len(zone) <= 6 { - zone += strings.Repeat("0", 6-len(zone)) - h, _ := strconv.Atoi(zone[0:2]) - m, _ := strconv.Atoi(zone[2:4]) - s, _ := strconv.Atoi(zone[4:6]) - if h > 24 || m > 59 || s > 59 { - return nil, gerror.NewCodef(gcode.CodeInvalidParameter, `invalid zone string "%s"`, match[6]) - } - operation := match[5] - if operation != "+" && operation != "-" { - operation = "-" - } - // Comparing the given time zone whether equals to current time zone, - // it converts it to UTC if they do not equal. - _, localOffset := time.Now().Zone() - // Comparing in seconds. - if (h*3600+m*60+s) != localOffset || - (localOffset > 0 && operation == "-") || - (localOffset < 0 && operation == "+") { - local = time.UTC - // UTC conversion. - switch operation { - case "+": - if h > 0 { - hour -= h - } - if m > 0 { - min -= m - } - if s > 0 { - sec -= s - } - case "-": - if h > 0 { - hour += h - } - if m > 0 { - min += m - } - if s > 0 { - sec += s - } - } - } - } - } - if month <= 0 || day <= 0 { - return nil, gerror.NewCodef(gcode.CodeInvalidParameter, `invalid time string "%s"`, str) - } - return NewFromTime(time.Date(year, time.Month(month), day, hour, min, sec, nsec, local)), nil -} - -// ConvertZone converts time in string `strTime` from `fromZone` to `toZone`. -// The parameter `fromZone` is unnecessary, it is current time zone in default. -func ConvertZone(strTime string, toZone string, fromZone ...string) (*Time, error) { - t, err := StrToTime(strTime) - if err != nil { - return nil, err - } - var l *time.Location - if len(fromZone) > 0 { - if l, err = time.LoadLocation(fromZone[0]); err != nil { - err = gerror.WrapCodef(gcode.CodeInvalidParameter, err, `time.LoadLocation failed for name "%s"`, fromZone[0]) - return nil, err - } else { - t.Time = time.Date(t.Year(), time.Month(t.Month()), t.Day(), t.Hour(), t.Minute(), t.Time.Second(), t.Time.Nanosecond(), l) - } - } - if l, err = time.LoadLocation(toZone); err != nil { - err = gerror.WrapCodef(gcode.CodeInvalidParameter, err, `time.LoadLocation failed for name "%s"`, toZone) - return nil, err - } else { - return t.ToLocation(l), nil - } -} - -// StrToTimeFormat parses string `str` to *Time object with given format `format`. -// The parameter `format` is like "Y-m-d H:i:s". -func StrToTimeFormat(str string, format string) (*Time, error) { - return StrToTimeLayout(str, formatToStdLayout(format)) -} - -// StrToTimeLayout parses string `str` to *Time object with given format `layout`. -// The parameter `layout` is in stdlib format like "2006-01-02 15:04:05". -func StrToTimeLayout(str string, layout string) (*Time, error) { - if t, err := time.ParseInLocation(layout, str, time.Local); err == nil { - return NewFromTime(t), nil - } else { - return nil, gerror.WrapCodef( - gcode.CodeInvalidParameter, err, - `time.ParseInLocation failed for layout "%s" and value "%s"`, - layout, str, - ) - } -} - -// ParseTimeFromContent retrieves time information for content string, it then parses and returns it -// as *Time object. -// It returns the first time information if there are more than one time string in the content. -// It only retrieves and parses the time information with given `format` if it's passed. -func ParseTimeFromContent(content string, format ...string) *Time { - var ( - err error - match []string - ) - if len(format) > 0 { - match, err = gregex.MatchString(formatToRegexPattern(format[0]), content) - if err != nil { - intlog.Errorf(context.TODO(), `%+v`, err) - } - if len(match) > 0 { - return NewFromStrFormat(match[0], format[0]) - } - } else { - if match = timeRegex1.FindStringSubmatch(content); len(match) >= 1 { - return NewFromStr(strings.Trim(match[0], "./_- \n\r")) - } else if match = timeRegex2.FindStringSubmatch(content); len(match) >= 1 { - return NewFromStr(strings.Trim(match[0], "./_- \n\r")) - } else if match = timeRegex3.FindStringSubmatch(content); len(match) >= 1 { - return NewFromStr(strings.Trim(match[0], "./_- \n\r")) - } - } - return nil -} - -// ParseDuration parses a duration string. -// A duration string is a possibly signed sequence of -// decimal numbers, each with optional fraction and a unit suffix, -// such as "300ms", "-1.5h", "1d" or "2h45m". -// Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h", "d". -// -// Very note that it supports unit "d" more than function time.ParseDuration. -func ParseDuration(s string) (duration time.Duration, err error) { - var ( - num int64 - ) - if utils.IsNumeric(s) { - num, err = strconv.ParseInt(s, 10, 64) - if err != nil { - err = gerror.WrapCodef(gcode.CodeInvalidParameter, err, `strconv.ParseInt failed for string "%s"`, s) - return 0, err - } - return time.Duration(num), nil - } - match, err := gregex.MatchString(`^([\-\d]+)[dD](.*)$`, s) - if err != nil { - return 0, err - } - if len(match) == 3 { - num, err = strconv.ParseInt(match[1], 10, 64) - if err != nil { - err = gerror.WrapCodef(gcode.CodeInvalidParameter, err, `strconv.ParseInt failed for string "%s"`, match[1]) - return 0, err - } - s = fmt.Sprintf(`%dh%s`, num*24, match[2]) - duration, err = time.ParseDuration(s) - if err != nil { - err = gerror.WrapCodef(gcode.CodeInvalidParameter, err, `time.ParseDuration failed for string "%s"`, s) - } - return - } - duration, err = time.ParseDuration(s) - err = gerror.WrapCodef(gcode.CodeInvalidParameter, err, `time.ParseDuration failed for string "%s"`, s) - return -} - -// FuncCost calculates the cost time of function `f` in nanoseconds. -func FuncCost(f func()) time.Duration { - t := time.Now() - f() - return time.Since(t) -} - -// isTimestampStr checks and returns whether given string a timestamp string. -func isTimestampStr(s string) bool { - length := len(s) - if length == 0 { - return false - } - for i := 0; i < len(s); i++ { - if s[i] < '0' || s[i] > '9' { - return false - } - } - return true -} diff --git a/vendor/github.com/gogf/gf/v2/os/gtime/gtime_format.go b/vendor/github.com/gogf/gf/v2/os/gtime/gtime_format.go deleted file mode 100644 index a6287913..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gtime/gtime_format.go +++ /dev/null @@ -1,288 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gtime - -import ( - "bytes" - "strconv" - "strings" - - "github.com/gogf/gf/v2/text/gregex" -) - -var ( - // Refer: http://php.net/manual/en/function.date.php - formats = map[byte]string{ - 'd': "02", // Day: Day of the month, 2 digits with leading zeros. Eg: 01 to 31. - 'D': "Mon", // Day: A textual representation of a day, three letters. Eg: Mon through Sun. - 'w': "Monday", // Day: Numeric representation of the day of the week. Eg: 0 (for Sunday) through 6 (for Saturday). - 'N': "Monday", // Day: ISO-8601 numeric representation of the day of the week. Eg: 1 (for Monday) through 7 (for Sunday). - 'j': "=j=02", // Day: Day of the month without leading zeros. Eg: 1 to 31. - 'S': "02", // Day: English ordinal suffix for the day of the month, 2 characters. Eg: st, nd, rd or th. Works well with j. - 'l': "Monday", // Day: A full textual representation of the day of the week. Eg: Sunday through Saturday. - 'z': "", // Day: The day of the year (starting from 0). Eg: 0 through 365. - 'W': "", // Week: ISO-8601 week number of year, weeks starting on Monday. Eg: 42 (the 42nd week in the year). - 'F': "January", // Month: A full textual representation of a month, such as January or March. Eg: January through December. - 'm': "01", // Month: Numeric representation of a month, with leading zeros. Eg: 01 through 12. - 'M': "Jan", // Month: A short textual representation of a month, three letters. Eg: Jan through Dec. - 'n': "1", // Month: Numeric representation of a month, without leading zeros. Eg: 1 through 12. - 't': "", // Month: Number of days in the given month. Eg: 28 through 31. - 'Y': "2006", // Year: A full numeric representation of a year, 4 digits. Eg: 1999 or 2003. - 'y': "06", // Year: A two-digit representation of a year. Eg: 99 or 03. - 'a': "pm", // Time: Lowercase Ante meridiem and Post meridiem. Eg: am or pm. - 'A': "PM", // Time: Uppercase Ante meridiem and Post meridiem. Eg: AM or PM. - 'g': "3", // Time: 12-hour format of an hour without leading zeros. Eg: 1 through 12. - 'G': "=G=15", // Time: 24-hour format of an hour without leading zeros. Eg: 0 through 23. - 'h': "03", // Time: 12-hour format of an hour with leading zeros. Eg: 01 through 12. - 'H': "15", // Time: 24-hour format of an hour with leading zeros. Eg: 00 through 23. - 'i': "04", // Time: Minutes with leading zeros. Eg: 00 to 59. - 's': "05", // Time: Seconds with leading zeros. Eg: 00 through 59. - 'u': "=u=.000", // Time: Milliseconds. Eg: 234, 678. - 'U': "", // Time: Seconds since the Unix Epoch (January 1 1970 00:00:00 GMT). - 'O': "-0700", // Zone: Difference to Greenwich time (GMT) in hours. Eg: +0200. - 'P': "-07:00", // Zone: Difference to Greenwich time (GMT) with colon between hours and minutes. Eg: +02:00. - 'T': "MST", // Zone: Timezone abbreviation. Eg: UTC, EST, MDT ... - 'c': "2006-01-02T15:04:05-07:00", // Format: ISO 8601 date. Eg: 2004-02-12T15:19:21+00:00. - 'r': "Mon, 02 Jan 06 15:04 MST", // Format: RFC 2822 formatted date. Eg: Thu, 21 Dec 2000 16:01:07 +0200. - } - - // Week to number mapping. - weekMap = map[string]string{ - "Sunday": "0", - "Monday": "1", - "Tuesday": "2", - "Wednesday": "3", - "Thursday": "4", - "Friday": "5", - "Saturday": "6", - } - - // Day count of each month which is not in leap year. - dayOfMonth = []int{0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334} -) - -// Format formats and returns the formatted result with custom `format`. -func (t *Time) Format(format string) string { - if t == nil { - return "" - } - runes := []rune(format) - buffer := bytes.NewBuffer(nil) - for i := 0; i < len(runes); { - switch runes[i] { - case '\\': - if i < len(runes)-1 { - buffer.WriteRune(runes[i+1]) - i += 2 - continue - } else { - return buffer.String() - } - case 'W': - buffer.WriteString(strconv.Itoa(t.WeeksOfYear())) - case 'z': - buffer.WriteString(strconv.Itoa(t.DayOfYear())) - case 't': - buffer.WriteString(strconv.Itoa(t.DaysInMonth())) - case 'U': - buffer.WriteString(strconv.FormatInt(t.Unix(), 10)) - default: - if runes[i] > 255 { - buffer.WriteRune(runes[i]) - break - } - if f, ok := formats[byte(runes[i])]; ok { - result := t.Time.Format(f) - // Particular chars should be handled here. - switch runes[i] { - case 'j': - for _, s := range []string{"=j=0", "=j="} { - result = strings.ReplaceAll(result, s, "") - } - buffer.WriteString(result) - case 'G': - for _, s := range []string{"=G=0", "=G="} { - result = strings.ReplaceAll(result, s, "") - } - buffer.WriteString(result) - case 'u': - buffer.WriteString(strings.ReplaceAll(result, "=u=.", "")) - case 'w': - buffer.WriteString(weekMap[result]) - case 'N': - buffer.WriteString(strings.ReplaceAll(weekMap[result], "0", "7")) - case 'S': - buffer.WriteString(formatMonthDaySuffixMap(result)) - default: - buffer.WriteString(result) - } - } else { - buffer.WriteRune(runes[i]) - } - } - i++ - } - return buffer.String() -} - -// FormatNew formats and returns a new Time object with given custom `format`. -func (t *Time) FormatNew(format string) *Time { - if t == nil { - return nil - } - return NewFromStr(t.Format(format)) -} - -// FormatTo formats `t` with given custom `format`. -func (t *Time) FormatTo(format string) *Time { - if t == nil { - return nil - } - t.Time = NewFromStr(t.Format(format)).Time - return t -} - -// Layout formats the time with stdlib layout and returns the formatted result. -func (t *Time) Layout(layout string) string { - if t == nil { - return "" - } - return t.Time.Format(layout) -} - -// LayoutNew formats the time with stdlib layout and returns the new Time object. -func (t *Time) LayoutNew(layout string) *Time { - if t == nil { - return nil - } - newTime, err := StrToTimeLayout(t.Layout(layout), layout) - if err != nil { - panic(err) - } - return newTime -} - -// LayoutTo formats `t` with stdlib layout. -func (t *Time) LayoutTo(layout string) *Time { - if t == nil { - return nil - } - newTime, err := StrToTimeLayout(t.Layout(layout), layout) - if err != nil { - panic(err) - } - t.Time = newTime.Time - return t -} - -// IsLeapYear checks whether the time is leap year. -func (t *Time) IsLeapYear() bool { - year := t.Year() - if (year%4 == 0 && year%100 != 0) || year%400 == 0 { - return true - } - return false -} - -// DayOfYear checks and returns the position of the day for the year. -func (t *Time) DayOfYear() int { - var ( - day = t.Day() - month = t.Month() - ) - if t.IsLeapYear() { - if month > 2 { - return dayOfMonth[month-1] + day - } - return dayOfMonth[month-1] + day - 1 - } - return dayOfMonth[month-1] + day - 1 -} - -// DaysInMonth returns the day count of current month. -func (t *Time) DaysInMonth() int { - switch t.Month() { - case 1, 3, 5, 7, 8, 10, 12: - return 31 - case 4, 6, 9, 11: - return 30 - } - if t.IsLeapYear() { - return 29 - } - return 28 -} - -// WeeksOfYear returns the point of current week for the year. -func (t *Time) WeeksOfYear() int { - _, week := t.ISOWeek() - return week -} - -// formatToStdLayout converts custom format to stdlib layout. -func formatToStdLayout(format string) string { - b := bytes.NewBuffer(nil) - for i := 0; i < len(format); { - switch format[i] { - case '\\': - if i < len(format)-1 { - b.WriteByte(format[i+1]) - i += 2 - continue - } else { - return b.String() - } - - default: - if f, ok := formats[format[i]]; ok { - // Handle particular chars. - switch format[i] { - case 'j': - b.WriteString("2") - case 'G': - b.WriteString("15") - case 'u': - if i > 0 && format[i-1] == '.' { - b.WriteString("000") - } else { - b.WriteString(".000") - } - - default: - b.WriteString(f) - } - } else { - b.WriteByte(format[i]) - } - i++ - } - } - return b.String() -} - -// formatToRegexPattern converts the custom format to its corresponding regular expression. -func formatToRegexPattern(format string) string { - s := gregex.Quote(formatToStdLayout(format)) - s, _ = gregex.ReplaceString(`[0-9]`, `[0-9]`, s) - s, _ = gregex.ReplaceString(`[A-Za-z]`, `[A-Za-z]`, s) - s, _ = gregex.ReplaceString(`\s+`, `\s+`, s) - return s -} - -// formatMonthDaySuffixMap returns the short english word for current day. -func formatMonthDaySuffixMap(day string) string { - switch day { - case "01", "21", "31": - return "st" - case "02", "22": - return "nd" - case "03", "23": - return "rd" - default: - return "th" - } -} diff --git a/vendor/github.com/gogf/gf/v2/os/gtime/gtime_sql.go b/vendor/github.com/gogf/gf/v2/os/gtime/gtime_sql.go deleted file mode 100644 index e3c79a81..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gtime/gtime_sql.go +++ /dev/null @@ -1,28 +0,0 @@ -package gtime - -import ( - "database/sql/driver" -) - -// Scan implements interface used by Scan in package database/sql for Scanning value -// from database to local golang variable. -func (t *Time) Scan(value interface{}) error { - if t == nil { - return nil - } - newTime := New(value) - *t = *newTime - return nil -} - -// Value is the interface providing the Value method for package database/sql/driver -// for retrieving value from golang variable to database. -func (t *Time) Value() (driver.Value, error) { - if t == nil { - return nil, nil - } - if t.IsZero() { - return nil, nil - } - return t.Time, nil -} diff --git a/vendor/github.com/gogf/gf/v2/os/gtime/gtime_time.go b/vendor/github.com/gogf/gf/v2/os/gtime/gtime_time.go deleted file mode 100644 index ed8254d3..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gtime/gtime_time.go +++ /dev/null @@ -1,554 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gtime - -import ( - "bytes" - "strconv" - "time" - - "github.com/gogf/gf/v2/errors/gcode" - "github.com/gogf/gf/v2/errors/gerror" -) - -// Time is a wrapper for time.Time for additional features. -type Time struct { - wrapper -} - -// iUnixNano is an interface definition commonly for custom time.Time wrapper. -type iUnixNano interface { - UnixNano() int64 -} - -// New creates and returns a Time object with given parameter. -// The optional parameter can be type of: time.Time/*time.Time, string or integer. -func New(param ...interface{}) *Time { - if len(param) > 0 { - switch r := param[0].(type) { - case time.Time: - return NewFromTime(r) - case *time.Time: - return NewFromTime(*r) - - case Time: - return &r - - case *Time: - return r - - case string: - if len(param) > 1 { - switch t := param[1].(type) { - case string: - return NewFromStrFormat(r, t) - case []byte: - return NewFromStrFormat(r, string(t)) - } - } - return NewFromStr(r) - - case []byte: - if len(param) > 1 { - switch t := param[1].(type) { - case string: - return NewFromStrFormat(string(r), t) - case []byte: - return NewFromStrFormat(string(r), string(t)) - } - } - return NewFromStr(string(r)) - - case int: - return NewFromTimeStamp(int64(r)) - - case int64: - return NewFromTimeStamp(r) - - default: - if v, ok := r.(iUnixNano); ok { - return NewFromTimeStamp(v.UnixNano()) - } - } - } - return &Time{ - wrapper{time.Time{}}, - } -} - -// Now creates and returns a time object of now. -func Now() *Time { - return &Time{ - wrapper{time.Now()}, - } -} - -// NewFromTime creates and returns a Time object with given time.Time object. -func NewFromTime(t time.Time) *Time { - return &Time{ - wrapper{t}, - } -} - -// NewFromStr creates and returns a Time object with given string. -// Note that it returns nil if there's error occurs. -func NewFromStr(str string) *Time { - if t, err := StrToTime(str); err == nil { - return t - } - return nil -} - -// NewFromStrFormat creates and returns a Time object with given string and -// custom format like: Y-m-d H:i:s. -// Note that it returns nil if there's error occurs. -func NewFromStrFormat(str string, format string) *Time { - if t, err := StrToTimeFormat(str, format); err == nil { - return t - } - return nil -} - -// NewFromStrLayout creates and returns a Time object with given string and -// stdlib layout like: 2006-01-02 15:04:05. -// Note that it returns nil if there's error occurs. -func NewFromStrLayout(str string, layout string) *Time { - if t, err := StrToTimeLayout(str, layout); err == nil { - return t - } - return nil -} - -// NewFromTimeStamp creates and returns a Time object with given timestamp, -// which can be in seconds to nanoseconds. -// Eg: 1600443866 and 1600443866199266000 are both considered as valid timestamp number. -func NewFromTimeStamp(timestamp int64) *Time { - if timestamp == 0 { - return &Time{} - } - var sec, nano int64 - if timestamp > 1e9 { - for timestamp < 1e18 { - timestamp *= 10 - } - sec = timestamp / 1e9 - nano = timestamp % 1e9 - } else { - sec = timestamp - } - return &Time{ - wrapper{time.Unix(sec, nano)}, - } -} - -// Timestamp returns the timestamp in seconds. -func (t *Time) Timestamp() int64 { - if t.IsZero() { - return 0 - } - return t.UnixNano() / 1e9 -} - -// TimestampMilli returns the timestamp in milliseconds. -func (t *Time) TimestampMilli() int64 { - if t.IsZero() { - return 0 - } - return t.UnixNano() / 1e6 -} - -// TimestampMicro returns the timestamp in microseconds. -func (t *Time) TimestampMicro() int64 { - if t.IsZero() { - return 0 - } - return t.UnixNano() / 1e3 -} - -// TimestampNano returns the timestamp in nanoseconds. -func (t *Time) TimestampNano() int64 { - if t.IsZero() { - return 0 - } - return t.UnixNano() -} - -// TimestampStr is a convenience method which retrieves and returns -// the timestamp in seconds as string. -func (t *Time) TimestampStr() string { - if t.IsZero() { - return "" - } - return strconv.FormatInt(t.Timestamp(), 10) -} - -// TimestampMilliStr is a convenience method which retrieves and returns -// the timestamp in milliseconds as string. -func (t *Time) TimestampMilliStr() string { - if t.IsZero() { - return "" - } - return strconv.FormatInt(t.TimestampMilli(), 10) -} - -// TimestampMicroStr is a convenience method which retrieves and returns -// the timestamp in microseconds as string. -func (t *Time) TimestampMicroStr() string { - if t.IsZero() { - return "" - } - return strconv.FormatInt(t.TimestampMicro(), 10) -} - -// TimestampNanoStr is a convenience method which retrieves and returns -// the timestamp in nanoseconds as string. -func (t *Time) TimestampNanoStr() string { - if t.IsZero() { - return "" - } - return strconv.FormatInt(t.TimestampNano(), 10) -} - -// Month returns the month of the year specified by t. -func (t *Time) Month() int { - if t.IsZero() { - return 0 - } - return int(t.Time.Month()) -} - -// Second returns the second offset within the minute specified by t, -// in the range [0, 59]. -func (t *Time) Second() int { - if t.IsZero() { - return 0 - } - return t.Time.Second() -} - -// Millisecond returns the millisecond offset within the second specified by t, -// in the range [0, 999]. -func (t *Time) Millisecond() int { - if t.IsZero() { - return 0 - } - return t.Time.Nanosecond() / 1e6 -} - -// Microsecond returns the microsecond offset within the second specified by t, -// in the range [0, 999999]. -func (t *Time) Microsecond() int { - if t.IsZero() { - return 0 - } - return t.Time.Nanosecond() / 1e3 -} - -// Nanosecond returns the nanosecond offset within the second specified by t, -// in the range [0, 999999999]. -func (t *Time) Nanosecond() int { - if t.IsZero() { - return 0 - } - return t.Time.Nanosecond() -} - -// String returns current time object as string. -func (t *Time) String() string { - if t.IsZero() { - return "" - } - return t.wrapper.String() -} - -// IsZero reports whether t represents the zero time instant, -// January 1, year 1, 00:00:00 UTC. -func (t *Time) IsZero() bool { - if t == nil { - return true - } - return t.Time.IsZero() -} - -// Clone returns a new Time object which is a clone of current time object. -func (t *Time) Clone() *Time { - return New(t.Time) -} - -// Add adds the duration to current time. -func (t *Time) Add(d time.Duration) *Time { - newTime := t.Clone() - newTime.Time = newTime.Time.Add(d) - return newTime -} - -// AddStr parses the given duration as string and adds it to current time. -func (t *Time) AddStr(duration string) (*Time, error) { - if d, err := time.ParseDuration(duration); err != nil { - err = gerror.Wrapf(err, `time.ParseDuration failed for string "%s"`, duration) - return nil, err - } else { - return t.Add(d), nil - } -} - -// UTC converts current time to UTC timezone. -func (t *Time) UTC() *Time { - newTime := t.Clone() - newTime.Time = newTime.Time.UTC() - return newTime -} - -// ISO8601 formats the time as ISO8601 and returns it as string. -func (t *Time) ISO8601() string { - return t.Layout("2006-01-02T15:04:05-07:00") -} - -// RFC822 formats the time as RFC822 and returns it as string. -func (t *Time) RFC822() string { - return t.Layout("Mon, 02 Jan 06 15:04 MST") -} - -// AddDate adds year, month and day to the time. -func (t *Time) AddDate(years int, months int, days int) *Time { - newTime := t.Clone() - newTime.Time = newTime.Time.AddDate(years, months, days) - return newTime -} - -// Round returns the result of rounding t to the nearest multiple of d (since the zero time). -// The rounding behavior for halfway values is to round up. -// If d <= 0, Round returns t stripped of any monotonic clock reading but otherwise unchanged. -// -// Round operates on the time as an absolute duration since the -// zero time; it does not operate on the presentation form of the -// time. Thus, Round(Hour) may return a time with a non-zero -// minute, depending on the time's Location. -func (t *Time) Round(d time.Duration) *Time { - newTime := t.Clone() - newTime.Time = newTime.Time.Round(d) - return newTime -} - -// Truncate returns the result of rounding t down to a multiple of d (since the zero time). -// If d <= 0, Truncate returns t stripped of any monotonic clock reading but otherwise unchanged. -// -// Truncate operates on the time as an absolute duration since the -// zero time; it does not operate on the presentation form of the -// time. Thus, Truncate(Hour) may return a time with a non-zero -// minute, depending on the time's Location. -func (t *Time) Truncate(d time.Duration) *Time { - newTime := t.Clone() - newTime.Time = newTime.Time.Truncate(d) - return newTime -} - -// Equal reports whether t and u represent the same time instant. -// Two times can be equal even if they are in different locations. -// For example, 6:00 +0200 CEST and 4:00 UTC are Equal. -// See the documentation on the Time type for the pitfalls of using == with -// Time values; most code should use Equal instead. -func (t *Time) Equal(u *Time) bool { - switch { - case t == nil && u != nil: - return false - case t == nil && u == nil: - return true - case t != nil && u == nil: - return false - default: - return t.Time.Equal(u.Time) - } -} - -// Before reports whether the time instant t is before u. -func (t *Time) Before(u *Time) bool { - return t.Time.Before(u.Time) -} - -// After reports whether the time instant t is after u. -func (t *Time) After(u *Time) bool { - switch { - case t == nil: - return false - case t != nil && u == nil: - return true - default: - return t.Time.After(u.Time) - } -} - -// Sub returns the duration t-u. If the result exceeds the maximum (or minimum) -// value that can be stored in a Duration, the maximum (or minimum) duration -// will be returned. -// To compute t-d for a duration d, use t.Add(-d). -func (t *Time) Sub(u *Time) time.Duration { - if t == nil || u == nil { - return 0 - } - return t.Time.Sub(u.Time) -} - -// StartOfMinute clones and returns a new time of which the seconds is set to 0. -func (t *Time) StartOfMinute() *Time { - newTime := t.Clone() - newTime.Time = newTime.Time.Truncate(time.Minute) - return newTime -} - -// StartOfHour clones and returns a new time of which the hour, minutes and seconds are set to 0. -func (t *Time) StartOfHour() *Time { - y, m, d := t.Date() - newTime := t.Clone() - newTime.Time = time.Date(y, m, d, newTime.Time.Hour(), 0, 0, 0, newTime.Time.Location()) - return newTime -} - -// StartOfDay clones and returns a new time which is the start of day, its time is set to 00:00:00. -func (t *Time) StartOfDay() *Time { - y, m, d := t.Date() - newTime := t.Clone() - newTime.Time = time.Date(y, m, d, 0, 0, 0, 0, newTime.Time.Location()) - return newTime -} - -// StartOfWeek clones and returns a new time which is the first day of week and its time is set to -// 00:00:00. -func (t *Time) StartOfWeek() *Time { - weekday := int(t.Weekday()) - return t.StartOfDay().AddDate(0, 0, -weekday) -} - -// StartOfMonth clones and returns a new time which is the first day of the month and its is set to -// 00:00:00 -func (t *Time) StartOfMonth() *Time { - y, m, _ := t.Date() - newTime := t.Clone() - newTime.Time = time.Date(y, m, 1, 0, 0, 0, 0, newTime.Time.Location()) - return newTime -} - -// StartOfQuarter clones and returns a new time which is the first day of the quarter and its time is set -// to 00:00:00. -func (t *Time) StartOfQuarter() *Time { - month := t.StartOfMonth() - offset := (int(month.Month()) - 1) % 3 - return month.AddDate(0, -offset, 0) -} - -// StartOfHalf clones and returns a new time which is the first day of the half year and its time is set -// to 00:00:00. -func (t *Time) StartOfHalf() *Time { - month := t.StartOfMonth() - offset := (int(month.Month()) - 1) % 6 - return month.AddDate(0, -offset, 0) -} - -// StartOfYear clones and returns a new time which is the first day of the year and its time is set to -// 00:00:00. -func (t *Time) StartOfYear() *Time { - y, _, _ := t.Date() - newTime := t.Clone() - newTime.Time = time.Date(y, time.January, 1, 0, 0, 0, 0, newTime.Time.Location()) - return newTime -} - -// getPrecisionDelta returns the precision parameter for time calculation depending on `withNanoPrecision` option. -func getPrecisionDelta(withNanoPrecision ...bool) time.Duration { - if len(withNanoPrecision) > 0 && withNanoPrecision[0] { - return time.Nanosecond - } - return time.Second -} - -// EndOfMinute clones and returns a new time of which the seconds is set to 59. -func (t *Time) EndOfMinute(withNanoPrecision ...bool) *Time { - return t.StartOfMinute().Add(time.Minute - getPrecisionDelta(withNanoPrecision...)) -} - -// EndOfHour clones and returns a new time of which the minutes and seconds are both set to 59. -func (t *Time) EndOfHour(withNanoPrecision ...bool) *Time { - return t.StartOfHour().Add(time.Hour - getPrecisionDelta(withNanoPrecision...)) -} - -// EndOfDay clones and returns a new time which is the end of day the and its time is set to 23:59:59. -func (t *Time) EndOfDay(withNanoPrecision ...bool) *Time { - y, m, d := t.Date() - newTime := t.Clone() - newTime.Time = time.Date( - y, m, d, 23, 59, 59, int(time.Second-getPrecisionDelta(withNanoPrecision...)), newTime.Time.Location(), - ) - return newTime -} - -// EndOfWeek clones and returns a new time which is the end of week and its time is set to 23:59:59. -func (t *Time) EndOfWeek(withNanoPrecision ...bool) *Time { - return t.StartOfWeek().AddDate(0, 0, 7).Add(-getPrecisionDelta(withNanoPrecision...)) -} - -// EndOfMonth clones and returns a new time which is the end of the month and its time is set to 23:59:59. -func (t *Time) EndOfMonth(withNanoPrecision ...bool) *Time { - return t.StartOfMonth().AddDate(0, 1, 0).Add(-getPrecisionDelta(withNanoPrecision...)) -} - -// EndOfQuarter clones and returns a new time which is end of the quarter and its time is set to 23:59:59. -func (t *Time) EndOfQuarter(withNanoPrecision ...bool) *Time { - return t.StartOfQuarter().AddDate(0, 3, 0).Add(-getPrecisionDelta(withNanoPrecision...)) -} - -// EndOfHalf clones and returns a new time which is the end of the half year and its time is set to 23:59:59. -func (t *Time) EndOfHalf(withNanoPrecision ...bool) *Time { - return t.StartOfHalf().AddDate(0, 6, 0).Add(-getPrecisionDelta(withNanoPrecision...)) -} - -// EndOfYear clones and returns a new time which is the end of the year and its time is set to 23:59:59. -func (t *Time) EndOfYear(withNanoPrecision ...bool) *Time { - return t.StartOfYear().AddDate(1, 0, 0).Add(-getPrecisionDelta(withNanoPrecision...)) -} - -// MarshalJSON implements the interface MarshalJSON for json.Marshal. -// Note that, DO NOT use `(t *Time) MarshalJSON() ([]byte, error)` as it looses interface -// implement of `MarshalJSON` for struct of Time. -func (t Time) MarshalJSON() ([]byte, error) { - return []byte(`"` + t.String() + `"`), nil -} - -// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. -func (t *Time) UnmarshalJSON(b []byte) error { - if len(b) == 0 { - t.Time = time.Time{} - return nil - } - newTime, err := StrToTime(string(bytes.Trim(b, `"`))) - if err != nil { - return err - } - t.Time = newTime.Time - return nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// Note that it overwrites the same implementer of `time.Time`. -func (t *Time) UnmarshalText(data []byte) error { - vTime := New(data) - if vTime != nil { - *t = *vTime - return nil - } - return gerror.NewCodef(gcode.CodeInvalidParameter, `invalid time value: %s`, data) -} - -// NoValidation marks this struct object will not be validated by package gvalid. -func (t *Time) NoValidation() {} - -// DeepCopy implements interface for deep copy of current type. -func (t *Time) DeepCopy() interface{} { - if t == nil { - return nil - } - return New(t.Time) -} diff --git a/vendor/github.com/gogf/gf/v2/os/gtime/gtime_time_wrapper.go b/vendor/github.com/gogf/gf/v2/os/gtime/gtime_time_wrapper.go deleted file mode 100644 index 28f8b9bf..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gtime/gtime_time_wrapper.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gtime - -import ( - "time" -) - -// wrapper is a wrapper for stdlib struct time.Time. -// It's used for overwriting some functions of time.Time, for example: String. -type wrapper struct { - time.Time -} - -// String overwrites the String function of time.Time. -func (t wrapper) String() string { - if t.IsZero() { - return "" - } - if t.Year() == 0 { - // Only time. - return t.Format("15:04:05") - } - return t.Format("2006-01-02 15:04:05") -} diff --git a/vendor/github.com/gogf/gf/v2/os/gtime/gtime_time_zone.go b/vendor/github.com/gogf/gf/v2/os/gtime/gtime_time_zone.go deleted file mode 100644 index ce6a8c03..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gtime/gtime_time_zone.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gtime - -import ( - "os" - "strings" - "sync" - "time" - - "github.com/gogf/gf/v2/errors/gcode" - "github.com/gogf/gf/v2/errors/gerror" -) - -var ( - setTimeZoneMu sync.Mutex - setTimeZoneName string - zoneMap = make(map[string]*time.Location) - zoneMu sync.RWMutex -) - -// SetTimeZone sets the time zone for current whole process. -// The parameter `zone` is an area string specifying corresponding time zone, -// eg: Asia/Shanghai. -// -// PLEASE VERY NOTE THAT: -// 1. This should be called before package "time" import. -// 2. This function should be called once. -// 3. Please refer to issue: https://github.com/golang/go/issues/34814 -func SetTimeZone(zone string) (err error) { - setTimeZoneMu.Lock() - defer setTimeZoneMu.Unlock() - if setTimeZoneName != "" && !strings.EqualFold(zone, setTimeZoneName) { - return gerror.NewCodef( - gcode.CodeInvalidOperation, - `process timezone already set using "%s"`, - setTimeZoneName, - ) - } - defer func() { - if err == nil { - setTimeZoneName = zone - } - }() - - // It is already set to time.Local. - if strings.EqualFold(zone, time.Local.String()) { - return - } - - // Load zone info from specified name. - location, err := time.LoadLocation(zone) - if err != nil { - err = gerror.WrapCodef(gcode.CodeInvalidParameter, err, `time.LoadLocation failed for zone "%s"`, zone) - return err - } - - // Update the time.Local for once. - time.Local = location - - // Update the timezone environment for *nix systems. - var ( - envKey = "TZ" - envValue = location.String() - ) - if err = os.Setenv(envKey, envValue); err != nil { - err = gerror.WrapCodef( - gcode.CodeUnknown, - err, - `set environment failed with key "%s", value "%s"`, - envKey, envValue, - ) - } - return -} - -// ToLocation converts current time to specified location. -func (t *Time) ToLocation(location *time.Location) *Time { - newTime := t.Clone() - newTime.Time = newTime.Time.In(location) - return newTime -} - -// ToZone converts current time to specified zone like: Asia/Shanghai. -func (t *Time) ToZone(zone string) (*Time, error) { - if location, err := t.getLocationByZoneName(zone); err == nil { - return t.ToLocation(location), nil - } else { - return nil, err - } -} - -func (t *Time) getLocationByZoneName(name string) (location *time.Location, err error) { - zoneMu.RLock() - location = zoneMap[name] - zoneMu.RUnlock() - if location == nil { - location, err = time.LoadLocation(name) - if err != nil { - err = gerror.Wrapf(err, `time.LoadLocation failed for name "%s"`, name) - } - if location != nil { - zoneMu.Lock() - zoneMap[name] = location - zoneMu.Unlock() - } - } - return -} - -// Local converts the time to local timezone. -func (t *Time) Local() *Time { - newTime := t.Clone() - newTime.Time = newTime.Time.Local() - return newTime -} diff --git a/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer.go b/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer.go deleted file mode 100644 index 7988d4d2..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package gtimer implements timer for interval/delayed jobs running and management. -// -// This package is designed for management for millions of timing jobs. The differences -// between gtimer and gcron are as follows: -// 1. package gcron is implemented based on package gtimer. -// 2. gtimer is designed for high performance and for millions of timing jobs. -// 3. gcron supports configuration pattern grammar like linux crontab, which is more manually -// readable. -// 4. gtimer's benchmark OP is measured in nanoseconds, and gcron's benchmark OP is measured -// in microseconds. -// -// ALSO VERY NOTE the common delay of the timer: https://github.com/golang/go/issues/14410 -package gtimer - -import ( - "context" - "strconv" - "sync" - "time" - - "github.com/gogf/gf/v2/container/gtype" - "github.com/gogf/gf/v2/errors/gcode" - "github.com/gogf/gf/v2/errors/gerror" - "github.com/gogf/gf/v2/internal/command" -) - -// Timer is the timer manager, which uses ticks to calculate the timing interval. -type Timer struct { - mu sync.RWMutex - queue *priorityQueue // queue is a priority queue based on heap structure. - status *gtype.Int // status is the current timer status. - ticks *gtype.Int64 // ticks is the proceeded interval number by the timer. - options TimerOptions // timer options is used for timer configuration. -} - -// TimerOptions is the configuration object for Timer. -type TimerOptions struct { - Interval time.Duration // Interval is the interval escaped of the timer. - Quick bool // Quick is used for quick timer, which means the timer will not wait for the first interval to be elapsed. -} - -// internalPanic is the custom panic for internal usage. -type internalPanic string - -const ( - StatusReady = 0 // Job or Timer is ready for running. - StatusRunning = 1 // Job or Timer is already running. - StatusStopped = 2 // Job or Timer is stopped. - StatusClosed = -1 // Job or Timer is closed and waiting to be deleted. - panicExit internalPanic = "exit" // panicExit is used for custom job exit with panic. - defaultTimerInterval = "100" // defaultTimerInterval is the default timer interval in milliseconds. - // commandEnvKeyForInterval is the key for command argument or environment configuring default interval duration for timer. - commandEnvKeyForInterval = "gf.gtimer.interval" -) - -var ( - defaultInterval = getDefaultInterval() - defaultTimer = New() -) - -func getDefaultInterval() time.Duration { - interval := command.GetOptWithEnv(commandEnvKeyForInterval, defaultTimerInterval) - n, err := strconv.Atoi(interval) - if err != nil { - panic(gerror.WrapCodef( - gcode.CodeInvalidConfiguration, err, `error converting string "%s" to int number`, - interval, - )) - } - return time.Duration(n) * time.Millisecond -} - -// DefaultOptions creates and returns a default options object for Timer creation. -func DefaultOptions() TimerOptions { - return TimerOptions{ - Interval: defaultInterval, - } -} - -// SetTimeout runs the job once after duration of `delay`. -// It is like the one in javascript. -func SetTimeout(ctx context.Context, delay time.Duration, job JobFunc) { - AddOnce(ctx, delay, job) -} - -// SetInterval runs the job every duration of `delay`. -// It is like the one in javascript. -func SetInterval(ctx context.Context, interval time.Duration, job JobFunc) { - Add(ctx, interval, job) -} - -// Add adds a timing job to the default timer, which runs in interval of `interval`. -func Add(ctx context.Context, interval time.Duration, job JobFunc) *Entry { - return defaultTimer.Add(ctx, interval, job) -} - -// AddEntry adds a timing job to the default timer with detailed parameters. -// -// The parameter `interval` specifies the running interval of the job. -// -// The parameter `singleton` specifies whether the job running in singleton mode. -// There's only one of the same job is allowed running when its a singleton mode job. -// -// The parameter `times` specifies limit for the job running times, which means the job -// exits if its run times exceeds the `times`. -// -// The parameter `status` specifies the job status when it's firstly added to the timer. -func AddEntry(ctx context.Context, interval time.Duration, job JobFunc, isSingleton bool, times int, status int) *Entry { - return defaultTimer.AddEntry(ctx, interval, job, isSingleton, times, status) -} - -// AddSingleton is a convenience function for add singleton mode job. -func AddSingleton(ctx context.Context, interval time.Duration, job JobFunc) *Entry { - return defaultTimer.AddSingleton(ctx, interval, job) -} - -// AddOnce is a convenience function for adding a job which only runs once and then exits. -func AddOnce(ctx context.Context, interval time.Duration, job JobFunc) *Entry { - return defaultTimer.AddOnce(ctx, interval, job) -} - -// AddTimes is a convenience function for adding a job which is limited running times. -func AddTimes(ctx context.Context, interval time.Duration, times int, job JobFunc) *Entry { - return defaultTimer.AddTimes(ctx, interval, times, job) -} - -// DelayAdd adds a timing job after delay of `interval` duration. -// Also see Add. -func DelayAdd(ctx context.Context, delay time.Duration, interval time.Duration, job JobFunc) { - defaultTimer.DelayAdd(ctx, delay, interval, job) -} - -// DelayAddEntry adds a timing job after delay of `interval` duration. -// Also see AddEntry. -func DelayAddEntry(ctx context.Context, delay time.Duration, interval time.Duration, job JobFunc, isSingleton bool, times int, status int) { - defaultTimer.DelayAddEntry(ctx, delay, interval, job, isSingleton, times, status) -} - -// DelayAddSingleton adds a timing job after delay of `interval` duration. -// Also see AddSingleton. -func DelayAddSingleton(ctx context.Context, delay time.Duration, interval time.Duration, job JobFunc) { - defaultTimer.DelayAddSingleton(ctx, delay, interval, job) -} - -// DelayAddOnce adds a timing job after delay of `interval` duration. -// Also see AddOnce. -func DelayAddOnce(ctx context.Context, delay time.Duration, interval time.Duration, job JobFunc) { - defaultTimer.DelayAddOnce(ctx, delay, interval, job) -} - -// DelayAddTimes adds a timing job after delay of `interval` duration. -// Also see AddTimes. -func DelayAddTimes(ctx context.Context, delay time.Duration, interval time.Duration, times int, job JobFunc) { - defaultTimer.DelayAddTimes(ctx, delay, interval, times, job) -} diff --git a/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_entry.go b/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_entry.go deleted file mode 100644 index 0e3d1ac5..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_entry.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gtimer - -import ( - "context" - "github.com/gogf/gf/v2/errors/gcode" - - "github.com/gogf/gf/v2/container/gtype" - "github.com/gogf/gf/v2/errors/gerror" -) - -// Entry is the timing job. -type Entry struct { - job JobFunc // The job function. - ctx context.Context // The context for the job, for READ ONLY. - timer *Timer // Belonged timer. - ticks int64 // The job runs every tick. - times *gtype.Int // Limit running times. - status *gtype.Int // Job status. - isSingleton *gtype.Bool // Singleton mode. - nextTicks *gtype.Int64 // Next run ticks of the job. - infinite *gtype.Bool // No times limit. -} - -// JobFunc is the timing called job function in timer. -type JobFunc = func(ctx context.Context) - -// Status returns the status of the job. -func (entry *Entry) Status() int { - return entry.status.Val() -} - -// Run runs the timer job asynchronously. -func (entry *Entry) Run() { - if !entry.infinite.Val() { - leftRunningTimes := entry.times.Add(-1) - // It checks its running times exceeding. - if leftRunningTimes < 0 { - entry.status.Set(StatusClosed) - return - } - } - go func() { - defer func() { - if exception := recover(); exception != nil { - if exception != panicExit { - if v, ok := exception.(error); ok && gerror.HasStack(v) { - panic(v) - } else { - panic(gerror.NewCodef(gcode.CodeInternalPanic, "exception recovered: %+v", exception)) - } - } else { - entry.Close() - return - } - } - if entry.Status() == StatusRunning { - entry.SetStatus(StatusReady) - } - }() - entry.job(entry.ctx) - }() -} - -// doCheckAndRunByTicks checks the if job can run in given timer ticks, -// it runs asynchronously if the given `currentTimerTicks` meets or else -// it increments its ticks and waits for next running check. -func (entry *Entry) doCheckAndRunByTicks(currentTimerTicks int64) { - // Ticks check. - if currentTimerTicks < entry.nextTicks.Val() { - return - } - entry.nextTicks.Set(currentTimerTicks + entry.ticks) - // Perform job checking. - switch entry.status.Val() { - case StatusRunning: - if entry.IsSingleton() { - return - } - case StatusReady: - if !entry.status.Cas(StatusReady, StatusRunning) { - return - } - case StatusStopped: - return - case StatusClosed: - return - } - // Perform job running. - entry.Run() -} - -// SetStatus custom sets the status for the job. -func (entry *Entry) SetStatus(status int) int { - return entry.status.Set(status) -} - -// Start starts the job. -func (entry *Entry) Start() { - entry.status.Set(StatusReady) -} - -// Stop stops the job. -func (entry *Entry) Stop() { - entry.status.Set(StatusStopped) -} - -// Close closes the job, and then it will be removed from the timer. -func (entry *Entry) Close() { - entry.status.Set(StatusClosed) -} - -// Reset resets the job, which resets its ticks for next running. -func (entry *Entry) Reset() { - entry.nextTicks.Set(entry.timer.ticks.Val() + entry.ticks) -} - -// IsSingleton checks and returns whether the job in singleton mode. -func (entry *Entry) IsSingleton() bool { - return entry.isSingleton.Val() -} - -// SetSingleton sets the job singleton mode. -func (entry *Entry) SetSingleton(enabled bool) { - entry.isSingleton.Set(enabled) -} - -// Job returns the job function of this job. -func (entry *Entry) Job() JobFunc { - return entry.job -} - -// Ctx returns the initialized context of this job. -func (entry *Entry) Ctx() context.Context { - return entry.ctx -} - -// SetTimes sets the limit running times for the job. -func (entry *Entry) SetTimes(times int) { - entry.times.Set(times) - entry.infinite.Set(false) -} diff --git a/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_exit.go b/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_exit.go deleted file mode 100644 index 2ff3e698..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_exit.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gtimer - -// Exit is used in timing job internally, which exits and marks it closed from timer. -// The timing job will be automatically removed from timer later. It uses "panic-recover" -// mechanism internally implementing this feature, which is designed for simplification -// and convenience. -func Exit() { - panic(panicExit) -} diff --git a/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_queue.go b/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_queue.go deleted file mode 100644 index 92b57523..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_queue.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gtimer - -import ( - "container/heap" - "math" - "sync" - - "github.com/gogf/gf/v2/container/gtype" -) - -// priorityQueue is an abstract data type similar to a regular queue or stack data structure in which -// each element additionally has a "priority" associated with it. In a priority queue, an element with -// high priority is served before an element with low priority. -// priorityQueue is based on heap structure. -type priorityQueue struct { - mu sync.Mutex - heap *priorityQueueHeap // the underlying queue items manager using heap. - nextPriority *gtype.Int64 // nextPriority stores the next priority value of the heap, which is used to check if necessary to call the Pop of heap by Timer. -} - -// priorityQueueHeap is a heap manager, of which the underlying `array` is an array implementing a heap structure. -type priorityQueueHeap struct { - array []priorityQueueItem -} - -// priorityQueueItem stores the queue item which has a `priority` attribute to sort itself in heap. -type priorityQueueItem struct { - value interface{} - priority int64 -} - -// newPriorityQueue creates and returns a priority queue. -func newPriorityQueue() *priorityQueue { - queue := &priorityQueue{ - heap: &priorityQueueHeap{array: make([]priorityQueueItem, 0)}, - nextPriority: gtype.NewInt64(math.MaxInt64), - } - heap.Init(queue.heap) - return queue -} - -// NextPriority retrieves and returns the minimum and the most priority value of the queue. -func (q *priorityQueue) NextPriority() int64 { - return q.nextPriority.Val() -} - -// Push pushes a value to the queue. -// The `priority` specifies the priority of the value. -// The lesser the `priority` value the higher priority of the `value`. -func (q *priorityQueue) Push(value interface{}, priority int64) { - q.mu.Lock() - defer q.mu.Unlock() - heap.Push(q.heap, priorityQueueItem{ - value: value, - priority: priority, - }) - // Update the minimum priority using atomic operation. - nextPriority := q.nextPriority.Val() - if priority >= nextPriority { - return - } - q.nextPriority.Set(priority) -} - -// Pop retrieves, removes and returns the most high priority value from the queue. -func (q *priorityQueue) Pop() interface{} { - q.mu.Lock() - defer q.mu.Unlock() - if v := heap.Pop(q.heap); v != nil { - var nextPriority int64 = math.MaxInt64 - if len(q.heap.array) > 0 { - nextPriority = q.heap.array[0].priority - } - q.nextPriority.Set(nextPriority) - return v.(priorityQueueItem).value - } - return nil -} diff --git a/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_queue_heap.go b/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_queue_heap.go deleted file mode 100644 index c4b2f5db..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_queue_heap.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gtimer - -// Len is used to implement the interface of sort.Interface. -func (h *priorityQueueHeap) Len() int { - return len(h.array) -} - -// Less is used to implement the interface of sort.Interface. -// The least one is placed to the top of the heap. -func (h *priorityQueueHeap) Less(i, j int) bool { - return h.array[i].priority < h.array[j].priority -} - -// Swap is used to implement the interface of sort.Interface. -func (h *priorityQueueHeap) Swap(i, j int) { - if len(h.array) == 0 { - return - } - h.array[i], h.array[j] = h.array[j], h.array[i] -} - -// Push pushes an item to the heap. -func (h *priorityQueueHeap) Push(x interface{}) { - h.array = append(h.array, x.(priorityQueueItem)) -} - -// Pop retrieves, removes and returns the most high priority item from the heap. -func (h *priorityQueueHeap) Pop() interface{} { - length := len(h.array) - if length == 0 { - return nil - } - item := h.array[length-1] - h.array = h.array[0 : length-1] - return item -} diff --git a/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_timer.go b/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_timer.go deleted file mode 100644 index 2792cc44..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_timer.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gtimer - -import ( - "context" - "time" - - "github.com/gogf/gf/v2/container/gtype" -) - -// New creates and returns a Timer. -func New(options ...TimerOptions) *Timer { - t := &Timer{ - queue: newPriorityQueue(), - status: gtype.NewInt(StatusRunning), - ticks: gtype.NewInt64(), - } - if len(options) > 0 { - t.options = options[0] - if t.options.Interval == 0 { - t.options.Interval = defaultInterval - } - } else { - t.options = DefaultOptions() - } - go t.loop() - return t -} - -// Add adds a timing job to the timer, which runs in interval of `interval`. -func (t *Timer) Add(ctx context.Context, interval time.Duration, job JobFunc) *Entry { - return t.createEntry(createEntryInput{ - Ctx: ctx, - Interval: interval, - Job: job, - IsSingleton: false, - Times: -1, - Status: StatusReady, - }) -} - -// AddEntry adds a timing job to the timer with detailed parameters. -// -// The parameter `interval` specifies the running interval of the job. -// -// The parameter `singleton` specifies whether the job running in singleton mode. -// There's only one of the same job is allowed running when it's a singleton mode job. -// -// The parameter `times` specifies limit for the job running times, which means the job -// exits if its run times exceeds the `times`. -// -// The parameter `status` specifies the job status when it's firstly added to the timer. -func (t *Timer) AddEntry(ctx context.Context, interval time.Duration, job JobFunc, isSingleton bool, times int, status int) *Entry { - return t.createEntry(createEntryInput{ - Ctx: ctx, - Interval: interval, - Job: job, - IsSingleton: isSingleton, - Times: times, - Status: status, - }) -} - -// AddSingleton is a convenience function for add singleton mode job. -func (t *Timer) AddSingleton(ctx context.Context, interval time.Duration, job JobFunc) *Entry { - return t.createEntry(createEntryInput{ - Ctx: ctx, - Interval: interval, - Job: job, - IsSingleton: true, - Times: -1, - Status: StatusReady, - }) -} - -// AddOnce is a convenience function for adding a job which only runs once and then exits. -func (t *Timer) AddOnce(ctx context.Context, interval time.Duration, job JobFunc) *Entry { - return t.createEntry(createEntryInput{ - Ctx: ctx, - Interval: interval, - Job: job, - IsSingleton: true, - Times: 1, - Status: StatusReady, - }) -} - -// AddTimes is a convenience function for adding a job which is limited running times. -func (t *Timer) AddTimes(ctx context.Context, interval time.Duration, times int, job JobFunc) *Entry { - return t.createEntry(createEntryInput{ - Ctx: ctx, - Interval: interval, - Job: job, - IsSingleton: true, - Times: times, - Status: StatusReady, - }) -} - -// DelayAdd adds a timing job after delay of `delay` duration. -// Also see Add. -func (t *Timer) DelayAdd(ctx context.Context, delay time.Duration, interval time.Duration, job JobFunc) { - t.AddOnce(ctx, delay, func(ctx context.Context) { - t.Add(ctx, interval, job) - }) -} - -// DelayAddEntry adds a timing job after delay of `delay` duration. -// Also see AddEntry. -func (t *Timer) DelayAddEntry(ctx context.Context, delay time.Duration, interval time.Duration, job JobFunc, isSingleton bool, times int, status int) { - t.AddOnce(ctx, delay, func(ctx context.Context) { - t.AddEntry(ctx, interval, job, isSingleton, times, status) - }) -} - -// DelayAddSingleton adds a timing job after delay of `delay` duration. -// Also see AddSingleton. -func (t *Timer) DelayAddSingleton(ctx context.Context, delay time.Duration, interval time.Duration, job JobFunc) { - t.AddOnce(ctx, delay, func(ctx context.Context) { - t.AddSingleton(ctx, interval, job) - }) -} - -// DelayAddOnce adds a timing job after delay of `delay` duration. -// Also see AddOnce. -func (t *Timer) DelayAddOnce(ctx context.Context, delay time.Duration, interval time.Duration, job JobFunc) { - t.AddOnce(ctx, delay, func(ctx context.Context) { - t.AddOnce(ctx, interval, job) - }) -} - -// DelayAddTimes adds a timing job after delay of `delay` duration. -// Also see AddTimes. -func (t *Timer) DelayAddTimes(ctx context.Context, delay time.Duration, interval time.Duration, times int, job JobFunc) { - t.AddOnce(ctx, delay, func(ctx context.Context) { - t.AddTimes(ctx, interval, times, job) - }) -} - -// Start starts the timer. -func (t *Timer) Start() { - t.status.Set(StatusRunning) -} - -// Stop stops the timer. -func (t *Timer) Stop() { - t.status.Set(StatusStopped) -} - -// Close closes the timer. -func (t *Timer) Close() { - t.status.Set(StatusClosed) -} - -type createEntryInput struct { - Ctx context.Context - Interval time.Duration - Job JobFunc - IsSingleton bool - Times int - Status int -} - -// createEntry creates and adds a timing job to the timer. -func (t *Timer) createEntry(in createEntryInput) *Entry { - var ( - infinite = false - nextTicks int64 - ) - if in.Times <= 0 { - infinite = true - } - var ( - intervalTicksOfJob = int64(in.Interval / t.options.Interval) - ) - if intervalTicksOfJob == 0 { - // If the given interval is lesser than the one of the wheel, - // then sets it to one tick, which means it will be run in one interval. - intervalTicksOfJob = 1 - } - if t.options.Quick { - // If the quick mode is enabled, which means it will be run right now. - // Don't need to wait for the first interval. - nextTicks = t.ticks.Val() - } else { - nextTicks = t.ticks.Val() + intervalTicksOfJob - } - var ( - entry = &Entry{ - job: in.Job, - ctx: in.Ctx, - timer: t, - ticks: intervalTicksOfJob, - times: gtype.NewInt(in.Times), - status: gtype.NewInt(in.Status), - isSingleton: gtype.NewBool(in.IsSingleton), - nextTicks: gtype.NewInt64(nextTicks), - infinite: gtype.NewBool(infinite), - } - ) - t.queue.Push(entry, nextTicks) - return entry -} diff --git a/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_timer_loop.go b/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_timer_loop.go deleted file mode 100644 index ae94bd31..00000000 --- a/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_timer_loop.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gtimer - -import "time" - -// loop starts the ticker using a standalone goroutine. -func (t *Timer) loop() { - go func() { - var ( - currentTimerTicks int64 - timerIntervalTicker = time.NewTicker(t.options.Interval) - ) - defer timerIntervalTicker.Stop() - for { - select { - case <-timerIntervalTicker.C: - // Check the timer status. - switch t.status.Val() { - case StatusRunning: - // Timer proceeding. - if currentTimerTicks = t.ticks.Add(1); currentTimerTicks >= t.queue.NextPriority() { - t.proceed(currentTimerTicks) - } - - case StatusStopped: - // Do nothing. - - case StatusClosed: - // Timer exits. - return - } - } - } - }() -} - -// proceed function proceeds the timer job checking and running logic. -func (t *Timer) proceed(currentTimerTicks int64) { - var ( - value interface{} - ) - for { - value = t.queue.Pop() - if value == nil { - break - } - entry := value.(*Entry) - // It checks if it meets the ticks' requirement. - if jobNextTicks := entry.nextTicks.Val(); currentTimerTicks < jobNextTicks { - // It pushes the job back if current ticks does not meet its running ticks requirement. - t.queue.Push(entry, entry.nextTicks.Val()) - break - } - // It checks the job running requirements and then does asynchronous running. - entry.doCheckAndRunByTicks(currentTimerTicks) - // Status check: push back or ignore it. - if entry.Status() != StatusClosed { - // It pushes the job back to queue for next running. - t.queue.Push(entry, entry.nextTicks.Val()) - } - } -} diff --git a/vendor/github.com/gogf/gf/v2/text/gregex/gregex.go b/vendor/github.com/gogf/gf/v2/text/gregex/gregex.go deleted file mode 100644 index a1dfc968..00000000 --- a/vendor/github.com/gogf/gf/v2/text/gregex/gregex.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package gregex provides high performance API for regular expression functionality. -package gregex - -import ( - "regexp" -) - -// Quote quotes `s` by replacing special chars in `s` -// to match the rules of regular expression pattern. -// And returns the copy. -// -// Eg: Quote(`[foo]`) returns `\[foo\]`. -func Quote(s string) string { - return regexp.QuoteMeta(s) -} - -// Validate checks whether given regular expression pattern `pattern` valid. -func Validate(pattern string) error { - _, err := getRegexp(pattern) - return err -} - -// IsMatch checks whether given bytes `src` matches `pattern`. -func IsMatch(pattern string, src []byte) bool { - if r, err := getRegexp(pattern); err == nil { - return r.Match(src) - } - return false -} - -// IsMatchString checks whether given string `src` matches `pattern`. -func IsMatchString(pattern string, src string) bool { - return IsMatch(pattern, []byte(src)) -} - -// Match return bytes slice that matched `pattern`. -func Match(pattern string, src []byte) ([][]byte, error) { - if r, err := getRegexp(pattern); err == nil { - return r.FindSubmatch(src), nil - } else { - return nil, err - } -} - -// MatchString return strings that matched `pattern`. -func MatchString(pattern string, src string) ([]string, error) { - if r, err := getRegexp(pattern); err == nil { - return r.FindStringSubmatch(src), nil - } else { - return nil, err - } -} - -// MatchAll return all bytes slices that matched `pattern`. -func MatchAll(pattern string, src []byte) ([][][]byte, error) { - if r, err := getRegexp(pattern); err == nil { - return r.FindAllSubmatch(src, -1), nil - } else { - return nil, err - } -} - -// MatchAllString return all strings that matched `pattern`. -func MatchAllString(pattern string, src string) ([][]string, error) { - if r, err := getRegexp(pattern); err == nil { - return r.FindAllStringSubmatch(src, -1), nil - } else { - return nil, err - } -} - -// Replace replaces all matched `pattern` in bytes `src` with bytes `replace`. -func Replace(pattern string, replace, src []byte) ([]byte, error) { - if r, err := getRegexp(pattern); err == nil { - return r.ReplaceAll(src, replace), nil - } else { - return nil, err - } -} - -// ReplaceString replace all matched `pattern` in string `src` with string `replace`. -func ReplaceString(pattern, replace, src string) (string, error) { - r, e := Replace(pattern, []byte(replace), []byte(src)) - return string(r), e -} - -// ReplaceFunc replace all matched `pattern` in bytes `src` -// with custom replacement function `replaceFunc`. -func ReplaceFunc(pattern string, src []byte, replaceFunc func(b []byte) []byte) ([]byte, error) { - if r, err := getRegexp(pattern); err == nil { - return r.ReplaceAllFunc(src, replaceFunc), nil - } else { - return nil, err - } -} - -// ReplaceFuncMatch replace all matched `pattern` in bytes `src` -// with custom replacement function `replaceFunc`. -// The parameter `match` type for `replaceFunc` is [][]byte, -// which is the result contains all sub-patterns of `pattern` using Match function. -func ReplaceFuncMatch(pattern string, src []byte, replaceFunc func(match [][]byte) []byte) ([]byte, error) { - if r, err := getRegexp(pattern); err == nil { - return r.ReplaceAllFunc(src, func(bytes []byte) []byte { - match, _ := Match(pattern, bytes) - return replaceFunc(match) - }), nil - } else { - return nil, err - } -} - -// ReplaceStringFunc replace all matched `pattern` in string `src` -// with custom replacement function `replaceFunc`. -func ReplaceStringFunc(pattern string, src string, replaceFunc func(s string) string) (string, error) { - bytes, err := ReplaceFunc(pattern, []byte(src), func(bytes []byte) []byte { - return []byte(replaceFunc(string(bytes))) - }) - return string(bytes), err -} - -// ReplaceStringFuncMatch replace all matched `pattern` in string `src` -// with custom replacement function `replaceFunc`. -// The parameter `match` type for `replaceFunc` is []string, -// which is the result contains all sub-patterns of `pattern` using MatchString function. -func ReplaceStringFuncMatch(pattern string, src string, replaceFunc func(match []string) string) (string, error) { - if r, err := getRegexp(pattern); err == nil { - return string(r.ReplaceAllFunc([]byte(src), func(bytes []byte) []byte { - match, _ := MatchString(pattern, string(bytes)) - return []byte(replaceFunc(match)) - })), nil - } else { - return "", err - } -} - -// Split slices `src` into substrings separated by the expression and returns a slice of -// the substrings between those expression matches. -func Split(pattern string, src string) []string { - if r, err := getRegexp(pattern); err == nil { - return r.Split(src, -1) - } - return nil -} diff --git a/vendor/github.com/gogf/gf/v2/text/gregex/gregex_cache.go b/vendor/github.com/gogf/gf/v2/text/gregex/gregex_cache.go deleted file mode 100644 index 1cc9099c..00000000 --- a/vendor/github.com/gogf/gf/v2/text/gregex/gregex_cache.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gregex - -import ( - "regexp" - "sync" - - "github.com/gogf/gf/v2/errors/gerror" -) - -var ( - regexMu = sync.RWMutex{} - // Cache for regex object. - // Note that: - // 1. It uses sync.RWMutex ensuring the concurrent safety. - // 2. There's no expiring logic for this map. - regexMap = make(map[string]*regexp.Regexp) -) - -// getRegexp returns *regexp.Regexp object with given `pattern`. -// It uses cache to enhance the performance for compiling regular expression pattern, -// which means, it will return the same *regexp.Regexp object with the same regular -// expression pattern. -// -// It is concurrent-safe for multiple goroutines. -func getRegexp(pattern string) (regex *regexp.Regexp, err error) { - // Retrieve the regular expression object using reading lock. - regexMu.RLock() - regex = regexMap[pattern] - regexMu.RUnlock() - if regex != nil { - return - } - // If it does not exist in the cache, - // it compiles the pattern and creates one. - if regex, err = regexp.Compile(pattern); err != nil { - err = gerror.Wrapf(err, `regexp.Compile failed for pattern "%s"`, pattern) - return - } - // Cache the result object using writing lock. - regexMu.Lock() - regexMap[pattern] = regex - regexMu.Unlock() - return -} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr.go deleted file mode 100644 index 9932c590..00000000 --- a/vendor/github.com/gogf/gf/v2/text/gstr/gstr.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package gstr provides functions for string handling. -package gstr - -const ( - // NotFoundIndex is the position index for string not found in searching functions. - NotFoundIndex = -1 -) - -const ( - defaultSuffixForStrLimit = "..." -) diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_array.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_array.go deleted file mode 100644 index 1e467023..00000000 --- a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_array.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gstr - -// SearchArray searches string `s` in string slice `a` case-sensitively, -// returns its index in `a`. -// If `s` is not found in `a`, it returns -1. -func SearchArray(a []string, s string) int { - for i, v := range a { - if s == v { - return i - } - } - return NotFoundIndex -} - -// InArray checks whether string `s` in slice `a`. -func InArray(a []string, s string) bool { - return SearchArray(a, s) != NotFoundIndex -} - -// PrefixArray adds `prefix` string for each item of `array`. -func PrefixArray(array []string, prefix string) { - for k, v := range array { - array[k] = prefix + v - } -} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_case.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_case.go deleted file mode 100644 index c07fff72..00000000 --- a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_case.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. -// -// | Function | Result | -// |-----------------------------------|--------------------| -// | CaseSnake(s) | any_kind_of_string | -// | CaseSnakeScreaming(s) | ANY_KIND_OF_STRING | -// | CaseSnakeFirstUpper("RGBCodeMd5") | rgb_code_md5 | -// | CaseKebab(s) | any-kind-of-string | -// | CaseKebabScreaming(s) | ANY-KIND-OF-STRING | -// | CaseDelimited(s, '.') | any.kind.of.string | -// | CaseDelimitedScreaming(s, '.') | ANY.KIND.OF.STRING | -// | CaseCamel(s) | AnyKindOfString | -// | CaseCamelLower(s) | anyKindOfString | - -package gstr - -import ( - "regexp" - "strings" -) - -var ( - numberSequence = regexp.MustCompile(`([a-zA-Z]{0,1})(\d+)([a-zA-Z]{0,1})`) - firstCamelCaseStart = regexp.MustCompile(`([A-Z]+)([A-Z]?[_a-z\d]+)|$`) - firstCamelCaseEnd = regexp.MustCompile(`([\w\W]*?)([_]?[A-Z]+)$`) -) - -// CaseCamel converts a string to CamelCase. -func CaseCamel(s string) string { - return toCamelInitCase(s, true) -} - -// CaseCamelLower converts a string to lowerCamelCase. -func CaseCamelLower(s string) string { - if s == "" { - return s - } - if r := rune(s[0]); r >= 'A' && r <= 'Z' { - s = strings.ToLower(string(r)) + s[1:] - } - return toCamelInitCase(s, false) -} - -// CaseSnake converts a string to snake_case. -func CaseSnake(s string) string { - return CaseDelimited(s, '_') -} - -// CaseSnakeScreaming converts a string to SNAKE_CASE_SCREAMING. -func CaseSnakeScreaming(s string) string { - return CaseDelimitedScreaming(s, '_', true) -} - -// CaseSnakeFirstUpper converts a string like "RGBCodeMd5" to "rgb_code_md5". -// TODO for efficiency should change regexp to traversing string in future. -func CaseSnakeFirstUpper(word string, underscore ...string) string { - replace := "_" - if len(underscore) > 0 { - replace = underscore[0] - } - - m := firstCamelCaseEnd.FindAllStringSubmatch(word, 1) - if len(m) > 0 { - word = m[0][1] + replace + TrimLeft(ToLower(m[0][2]), replace) - } - - for { - m = firstCamelCaseStart.FindAllStringSubmatch(word, 1) - if len(m) > 0 && m[0][1] != "" { - w := strings.ToLower(m[0][1]) - w = w[:len(w)-1] + replace + string(w[len(w)-1]) - - word = strings.Replace(word, m[0][1], w, 1) - } else { - break - } - } - - return TrimLeft(word, replace) -} - -// CaseKebab converts a string to kebab-case -func CaseKebab(s string) string { - return CaseDelimited(s, '-') -} - -// CaseKebabScreaming converts a string to KEBAB-CASE-SCREAMING. -func CaseKebabScreaming(s string) string { - return CaseDelimitedScreaming(s, '-', true) -} - -// CaseDelimited converts a string to snake.case.delimited. -func CaseDelimited(s string, del byte) string { - return CaseDelimitedScreaming(s, del, false) -} - -// CaseDelimitedScreaming converts a string to DELIMITED.SCREAMING.CASE or delimited.screaming.case. -func CaseDelimitedScreaming(s string, del uint8, screaming bool) string { - s = addWordBoundariesToNumbers(s) - s = strings.Trim(s, " ") - n := "" - for i, v := range s { - // treat acronyms as words, eg for JSONData -> JSON is a whole word - nextCaseIsChanged := false - if i+1 < len(s) { - next := s[i+1] - if (v >= 'A' && v <= 'Z' && next >= 'a' && next <= 'z') || (v >= 'a' && v <= 'z' && next >= 'A' && next <= 'Z') { - nextCaseIsChanged = true - } - } - - if i > 0 && n[len(n)-1] != del && nextCaseIsChanged { - // add underscore if next letter case type is changed - if v >= 'A' && v <= 'Z' { - n += string(del) + string(v) - } else if v >= 'a' && v <= 'z' { - n += string(v) + string(del) - } - } else if v == ' ' || v == '_' || v == '-' || v == '.' { - // replace spaces/underscores with delimiters - n += string(del) - } else { - n = n + string(v) - } - } - - if screaming { - n = strings.ToUpper(n) - } else { - n = strings.ToLower(n) - } - return n -} - -func addWordBoundariesToNumbers(s string) string { - r := numberSequence.ReplaceAllFunc([]byte(s), func(bytes []byte) []byte { - var result []byte - match := numberSequence.FindSubmatch(bytes) - if len(match[1]) > 0 { - result = append(result, match[1]...) - result = append(result, []byte(" ")...) - } - result = append(result, match[2]...) - if len(match[3]) > 0 { - result = append(result, []byte(" ")...) - result = append(result, match[3]...) - } - return result - }) - return string(r) -} - -// Converts a string to CamelCase -func toCamelInitCase(s string, initCase bool) string { - s = addWordBoundariesToNumbers(s) - s = strings.Trim(s, " ") - n := "" - capNext := initCase - for _, v := range s { - if v >= 'A' && v <= 'Z' { - n += string(v) - } - if v >= '0' && v <= '9' { - n += string(v) - } - if v >= 'a' && v <= 'z' { - if capNext { - n += strings.ToUpper(string(v)) - } else { - n += string(v) - } - } - if v == '_' || v == ' ' || v == '-' || v == '.' { - capNext = true - } else { - capNext = false - } - } - return n -} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_compare.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_compare.go deleted file mode 100644 index ae877f67..00000000 --- a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_compare.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gstr - -import "strings" - -// Compare returns an integer comparing two strings lexicographically. -// The result will be 0 if a==b, -1 if a < b, and +1 if a > b. -func Compare(a, b string) int { - return strings.Compare(a, b) -} - -// Equal reports whether `a` and `b`, interpreted as UTF-8 strings, -// are equal under Unicode case-folding, case-insensitively. -func Equal(a, b string) bool { - return strings.EqualFold(a, b) -} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_contain.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_contain.go deleted file mode 100644 index 82994580..00000000 --- a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_contain.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gstr - -import "strings" - -// Contains reports whether `substr` is within `str`, case-sensitively. -func Contains(str, substr string) bool { - return strings.Contains(str, substr) -} - -// ContainsI reports whether substr is within str, case-insensitively. -func ContainsI(str, substr string) bool { - return PosI(str, substr) != -1 -} - -// ContainsAny reports whether any Unicode code points in `chars` are within `s`. -func ContainsAny(s, chars string) bool { - return strings.ContainsAny(s, chars) -} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_convert.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_convert.go deleted file mode 100644 index fc29f11c..00000000 --- a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_convert.go +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gstr - -import ( - "bytes" - "fmt" - "math" - "regexp" - "strconv" - "strings" - "unicode" - - "github.com/gogf/gf/v2/util/grand" -) - -var ( - // octReg is the regular expression object for checks octal string. - octReg = regexp.MustCompile(`\\[0-7]{3}`) -) - -// Chr return the ascii string of a number(0-255). -func Chr(ascii int) string { - return string([]byte{byte(ascii % 256)}) -} - -// Ord converts the first byte of a string to a value between 0 and 255. -func Ord(char string) int { - return int(char[0]) -} - -// OctStr converts string container octal string to its original string, -// for example, to Chinese string. -// Eg: `\346\200\241` -> 怡 -func OctStr(str string) string { - return octReg.ReplaceAllStringFunc( - str, - func(s string) string { - i, _ := strconv.ParseInt(s[1:], 8, 0) - return string([]byte{byte(i)}) - }, - ) -} - -// Reverse returns a string which is the reverse of `str`. -func Reverse(str string) string { - runes := []rune(str) - for i, j := 0, len(runes)-1; i < j; i, j = i+1, j-1 { - runes[i], runes[j] = runes[j], runes[i] - } - return string(runes) -} - -// NumberFormat formats a number with grouped thousands. -// `decimals`: Sets the number of decimal points. -// `decPoint`: Sets the separator for the decimal point. -// `thousandsSep`: Sets the thousands' separator. -// See http://php.net/manual/en/function.number-format.php. -func NumberFormat(number float64, decimals int, decPoint, thousandsSep string) string { - neg := false - if number < 0 { - number = -number - neg = true - } - // Will round off - str := fmt.Sprintf("%."+strconv.Itoa(decimals)+"F", number) - prefix, suffix := "", "" - if decimals > 0 { - prefix = str[:len(str)-(decimals+1)] - suffix = str[len(str)-decimals:] - } else { - prefix = str - } - sep := []byte(thousandsSep) - n, l1, l2 := 0, len(prefix), len(sep) - // thousands sep num - c := (l1 - 1) / 3 - tmp := make([]byte, l2*c+l1) - pos := len(tmp) - 1 - for i := l1 - 1; i >= 0; i, n, pos = i-1, n+1, pos-1 { - if l2 > 0 && n > 0 && n%3 == 0 { - for j := range sep { - tmp[pos] = sep[l2-j-1] - pos-- - } - } - tmp[pos] = prefix[i] - } - s := string(tmp) - if decimals > 0 { - s += decPoint + suffix - } - if neg { - s = "-" + s - } - - return s -} - -// Shuffle randomly shuffles a string. -// It considers parameter `str` as unicode string. -func Shuffle(str string) string { - runes := []rune(str) - s := make([]rune, len(runes)) - for i, v := range grand.Perm(len(runes)) { - s[i] = runes[v] - } - return string(s) -} - -// HideStr replaces part of the string `str` to `hide` by `percentage` from the `middle`. -// It considers parameter `str` as unicode string. -func HideStr(str string, percent int, hide string) string { - array := strings.Split(str, "@") - if len(array) > 1 { - str = array[0] - } - var ( - rs = []rune(str) - length = len(rs) - mid = math.Floor(float64(length / 2)) - hideLen = int(math.Floor(float64(length) * (float64(percent) / 100))) - start = int(mid - math.Floor(float64(hideLen)/2)) - hideStr = []rune("") - hideRune = []rune(hide) - ) - for i := 0; i < hideLen; i++ { - hideStr = append(hideStr, hideRune...) - } - buffer := bytes.NewBuffer(nil) - buffer.WriteString(string(rs[0:start])) - buffer.WriteString(string(hideStr)) - buffer.WriteString(string(rs[start+hideLen:])) - if len(array) > 1 { - buffer.WriteString("@" + array[1]) - } - return buffer.String() -} - -// Nl2Br inserts HTML line breaks(`br`|
) before all newlines in a string: -// \n\r, \r\n, \r, \n. -// It considers parameter `str` as unicode string. -func Nl2Br(str string, isXhtml ...bool) string { - r, n, runes := '\r', '\n', []rune(str) - var br []byte - if len(isXhtml) > 0 && isXhtml[0] { - br = []byte("
") - } else { - br = []byte("
") - } - skip := false - length := len(runes) - var buf bytes.Buffer - for i, v := range runes { - if skip { - skip = false - continue - } - switch v { - case n, r: - if (i+1 < length) && ((v == r && runes[i+1] == n) || (v == n && runes[i+1] == r)) { - buf.Write(br) - skip = true - continue - } - buf.Write(br) - default: - buf.WriteRune(v) - } - } - return buf.String() -} - -// WordWrap wraps a string to a given number of characters. -// This function supports cut parameters of both english and chinese punctuations. -// TODO: Enable custom cut parameter, see http://php.net/manual/en/function.wordwrap.php. -func WordWrap(str string, width int, br string) string { - if br == "" { - br = "\n" - } - var ( - current int - wordBuf, spaceBuf bytes.Buffer - init = make([]byte, 0, len(str)) - buf = bytes.NewBuffer(init) - strRunes = []rune(str) - ) - for _, char := range strRunes { - switch { - case char == '\n': - if wordBuf.Len() == 0 { - if current+spaceBuf.Len() > width { - current = 0 - } else { - current += spaceBuf.Len() - _, _ = spaceBuf.WriteTo(buf) - } - spaceBuf.Reset() - } else { - current += spaceBuf.Len() + wordBuf.Len() - _, _ = spaceBuf.WriteTo(buf) - spaceBuf.Reset() - _, _ = wordBuf.WriteTo(buf) - wordBuf.Reset() - } - buf.WriteRune(char) - current = 0 - - case unicode.IsSpace(char): - if spaceBuf.Len() == 0 || wordBuf.Len() > 0 { - current += spaceBuf.Len() + wordBuf.Len() - _, _ = spaceBuf.WriteTo(buf) - spaceBuf.Reset() - _, _ = wordBuf.WriteTo(buf) - wordBuf.Reset() - } - spaceBuf.WriteRune(char) - - case isPunctuation(char): - wordBuf.WriteRune(char) - if spaceBuf.Len() == 0 || wordBuf.Len() > 0 { - current += spaceBuf.Len() + wordBuf.Len() - _, _ = spaceBuf.WriteTo(buf) - spaceBuf.Reset() - _, _ = wordBuf.WriteTo(buf) - wordBuf.Reset() - } - - default: - wordBuf.WriteRune(char) - if current+spaceBuf.Len()+wordBuf.Len() > width && wordBuf.Len() < width { - buf.WriteString(br) - current = 0 - spaceBuf.Reset() - } - } - } - - if wordBuf.Len() == 0 { - if current+spaceBuf.Len() <= width { - _, _ = spaceBuf.WriteTo(buf) - } - } else { - _, _ = spaceBuf.WriteTo(buf) - _, _ = wordBuf.WriteTo(buf) - } - return buf.String() -} - -func isPunctuation(char int32) bool { - switch char { - // English Punctuations. - case ';', '.', ',', ':', '~': - return true - // Chinese Punctuations. - case ';', ',', '。', ':', '?', '!', '…', '、': - return true - default: - return false - } -} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_count.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_count.go deleted file mode 100644 index cd0f0a19..00000000 --- a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_count.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gstr - -import ( - "bytes" - "strings" - "unicode" -) - -// Count counts the number of `substr` appears in `s`. -// It returns 0 if no `substr` found in `s`. -func Count(s, substr string) int { - return strings.Count(s, substr) -} - -// CountI counts the number of `substr` appears in `s`, case-insensitively. -// It returns 0 if no `substr` found in `s`. -func CountI(s, substr string) int { - return strings.Count(ToLower(s), ToLower(substr)) -} - -// CountWords returns information about words' count used in a string. -// It considers parameter `str` as unicode string. -func CountWords(str string) map[string]int { - m := make(map[string]int) - buffer := bytes.NewBuffer(nil) - for _, r := range []rune(str) { - if unicode.IsSpace(r) { - if buffer.Len() > 0 { - m[buffer.String()]++ - buffer.Reset() - } - } else { - buffer.WriteRune(r) - } - } - if buffer.Len() > 0 { - m[buffer.String()]++ - } - return m -} - -// CountChars returns information about chars' count used in a string. -// It considers parameter `str` as unicode string. -func CountChars(str string, noSpace ...bool) map[string]int { - m := make(map[string]int) - countSpace := true - if len(noSpace) > 0 && noSpace[0] { - countSpace = false - } - for _, r := range []rune(str) { - if !countSpace && unicode.IsSpace(r) { - continue - } - m[string(r)]++ - } - return m -} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_create.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_create.go deleted file mode 100644 index 8e5ff312..00000000 --- a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_create.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gstr - -import "strings" - -// Repeat returns a new string consisting of multiplier copies of the string input. -func Repeat(input string, multiplier int) string { - return strings.Repeat(input, multiplier) -} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_domain.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_domain.go deleted file mode 100644 index 35b78d48..00000000 --- a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_domain.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gstr - -import "strings" - -// IsSubDomain checks whether `subDomain` is sub-domain of mainDomain. -// It supports '*' in `mainDomain`. -func IsSubDomain(subDomain string, mainDomain string) bool { - if p := strings.IndexByte(subDomain, ':'); p != -1 { - subDomain = subDomain[0:p] - } - if p := strings.IndexByte(mainDomain, ':'); p != -1 { - mainDomain = mainDomain[0:p] - } - var ( - subArray = strings.Split(subDomain, ".") - mainArray = strings.Split(mainDomain, ".") - subLength = len(subArray) - mainLength = len(mainArray) - ) - // Eg: - // "goframe.org" is not sub-domain of "s.goframe.org". - if mainLength > subLength { - for i := range mainArray[0 : mainLength-subLength] { - if mainArray[i] != "*" { - return false - } - } - } - - // Eg: - // "s.s.goframe.org" is not sub-domain of "*.goframe.org" - // but - // "s.s.goframe.org" is sub-domain of "goframe.org" - if mainLength > 2 && subLength > mainLength { - return false - } - minLength := subLength - if mainLength < minLength { - minLength = mainLength - } - for i := minLength; i > 0; i-- { - if mainArray[mainLength-i] == "*" { - continue - } - if mainArray[mainLength-i] != subArray[subLength-i] { - return false - } - } - return true -} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_is.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_is.go deleted file mode 100644 index 2f52e949..00000000 --- a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_is.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gstr - -import "github.com/gogf/gf/v2/internal/utils" - -// IsNumeric tests whether the given string s is numeric. -func IsNumeric(s string) bool { - return utils.IsNumeric(s) -} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_length.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_length.go deleted file mode 100644 index 9e5b915b..00000000 --- a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_length.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gstr - -import "unicode/utf8" - -// LenRune returns string length of unicode. -func LenRune(str string) int { - return utf8.RuneCountInString(str) -} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_parse.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_parse.go deleted file mode 100644 index 52878bc7..00000000 --- a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_parse.go +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gstr - -import ( - "net/url" - "strings" - - "github.com/gogf/gf/v2/errors/gcode" - "github.com/gogf/gf/v2/errors/gerror" -) - -// Parse parses the string into map[string]interface{}. -// -// v1=m&v2=n -> map[v1:m v2:n] -// v[a]=m&v[b]=n -> map[v:map[a:m b:n]] -// v[a][a]=m&v[a][b]=n -> map[v:map[a:map[a:m b:n]]] -// v[]=m&v[]=n -> map[v:[m n]] -// v[a][]=m&v[a][]=n -> map[v:map[a:[m n]]] -// v[][]=m&v[][]=n -> map[v:[map[]]] // Currently does not support nested slice. -// v=m&v[a]=n -> error -// a .[[b=c -> map[a___[b:c] -func Parse(s string) (result map[string]interface{}, err error) { - if s == "" { - return nil, nil - } - result = make(map[string]interface{}) - parts := strings.Split(s, "&") - for _, part := range parts { - pos := strings.Index(part, "=") - if pos <= 0 { - continue - } - key, err := url.QueryUnescape(part[:pos]) - if err != nil { - err = gerror.Wrapf(err, `url.QueryUnescape failed for string "%s"`, part[:pos]) - return nil, err - } - - for len(key) > 0 && key[0] == ' ' { - key = key[1:] - } - - if key == "" || key[0] == '[' { - continue - } - value, err := url.QueryUnescape(part[pos+1:]) - if err != nil { - err = gerror.Wrapf(err, `url.QueryUnescape failed for string "%s"`, part[pos+1:]) - return nil, err - } - // split into multiple keys - var keys []string - left := 0 - for i, k := range key { - if k == '[' && left == 0 { - left = i - } else if k == ']' { - if left > 0 { - if len(keys) == 0 { - keys = append(keys, key[:left]) - } - keys = append(keys, key[left+1:i]) - left = 0 - if i+1 < len(key) && key[i+1] != '[' { - break - } - } - } - } - if len(keys) == 0 { - keys = append(keys, key) - } - // first key - first := "" - for i, chr := range keys[0] { - if chr == ' ' || chr == '.' || chr == '[' { - first += "_" - } else { - first += string(chr) - } - if chr == '[' { - first += keys[0][i+1:] - break - } - } - keys[0] = first - - // build nested map - if err = build(result, keys, value); err != nil { - return nil, err - } - } - return result, nil -} - -// build nested map. -func build(result map[string]interface{}, keys []string, value interface{}) error { - var ( - length = len(keys) - key = strings.Trim(keys[0], "'\"") - ) - if length == 1 { - result[key] = value - return nil - } - - // The end is slice. like f[], f[a][] - if keys[1] == "" && length == 2 { - // TODO nested slice - if key == "" { - return nil - } - val, ok := result[key] - if !ok { - result[key] = []interface{}{value} - return nil - } - children, ok := val.([]interface{}) - if !ok { - return gerror.NewCodef( - gcode.CodeInvalidParameter, - "expected type '[]interface{}' for key '%s', but got '%T'", - key, val, - ) - } - result[key] = append(children, value) - return nil - } - // The end is slice + map. like v[][a] - if keys[1] == "" && length > 2 && keys[2] != "" { - val, ok := result[key] - if !ok { - result[key] = []interface{}{} - val = result[key] - } - children, ok := val.([]interface{}) - if !ok { - return gerror.NewCodef( - gcode.CodeInvalidParameter, - "expected type '[]interface{}' for key '%s', but got '%T'", - key, val, - ) - } - if l := len(children); l > 0 { - if child, ok := children[l-1].(map[string]interface{}); ok { - if _, ok := child[keys[2]]; !ok { - _ = build(child, keys[2:], value) - return nil - } - } - } - child := map[string]interface{}{} - _ = build(child, keys[2:], value) - result[key] = append(children, child) - return nil - } - - // map, like v[a], v[a][b] - val, ok := result[key] - if !ok { - result[key] = map[string]interface{}{} - val = result[key] - } - children, ok := val.(map[string]interface{}) - if !ok { - return gerror.NewCodef( - gcode.CodeInvalidParameter, - "expected type 'map[string]interface{}' for key '%s', but got '%T'", - key, val, - ) - } - if err := build(children, keys[1:], value); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_pos.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_pos.go deleted file mode 100644 index bf76a629..00000000 --- a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_pos.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gstr - -import "strings" - -// Pos returns the position of the first occurrence of `needle` -// in `haystack` from `startOffset`, case-sensitively. -// It returns -1, if not found. -func Pos(haystack, needle string, startOffset ...int) int { - length := len(haystack) - offset := 0 - if len(startOffset) > 0 { - offset = startOffset[0] - } - if length == 0 || offset > length || -offset > length { - return -1 - } - if offset < 0 { - offset += length - } - pos := strings.Index(haystack[offset:], needle) - if pos == NotFoundIndex { - return NotFoundIndex - } - return pos + offset -} - -// PosRune acts like function Pos but considers `haystack` and `needle` as unicode string. -func PosRune(haystack, needle string, startOffset ...int) int { - pos := Pos(haystack, needle, startOffset...) - if pos < 3 { - return pos - } - return len([]rune(haystack[:pos])) -} - -// PosI returns the position of the first occurrence of `needle` -// in `haystack` from `startOffset`, case-insensitively. -// It returns -1, if not found. -func PosI(haystack, needle string, startOffset ...int) int { - length := len(haystack) - offset := 0 - if len(startOffset) > 0 { - offset = startOffset[0] - } - if length == 0 || offset > length || -offset > length { - return -1 - } - - if offset < 0 { - offset += length - } - pos := strings.Index(strings.ToLower(haystack[offset:]), strings.ToLower(needle)) - if pos == -1 { - return -1 - } - return pos + offset -} - -// PosIRune acts like function PosI but considers `haystack` and `needle` as unicode string. -func PosIRune(haystack, needle string, startOffset ...int) int { - pos := PosI(haystack, needle, startOffset...) - if pos < 3 { - return pos - } - return len([]rune(haystack[:pos])) -} - -// PosR returns the position of the last occurrence of `needle` -// in `haystack` from `startOffset`, case-sensitively. -// It returns -1, if not found. -func PosR(haystack, needle string, startOffset ...int) int { - offset := 0 - if len(startOffset) > 0 { - offset = startOffset[0] - } - pos, length := 0, len(haystack) - if length == 0 || offset > length || -offset > length { - return -1 - } - - if offset < 0 { - haystack = haystack[:offset+length+1] - } else { - haystack = haystack[offset:] - } - pos = strings.LastIndex(haystack, needle) - if offset > 0 && pos != -1 { - pos += offset - } - return pos -} - -// PosRRune acts like function PosR but considers `haystack` and `needle` as unicode string. -func PosRRune(haystack, needle string, startOffset ...int) int { - pos := PosR(haystack, needle, startOffset...) - if pos < 3 { - return pos - } - return len([]rune(haystack[:pos])) -} - -// PosRI returns the position of the last occurrence of `needle` -// in `haystack` from `startOffset`, case-insensitively. -// It returns -1, if not found. -func PosRI(haystack, needle string, startOffset ...int) int { - offset := 0 - if len(startOffset) > 0 { - offset = startOffset[0] - } - pos, length := 0, len(haystack) - if length == 0 || offset > length || -offset > length { - return -1 - } - - if offset < 0 { - haystack = haystack[:offset+length+1] - } else { - haystack = haystack[offset:] - } - pos = strings.LastIndex(strings.ToLower(haystack), strings.ToLower(needle)) - if offset > 0 && pos != -1 { - pos += offset - } - return pos -} - -// PosRIRune acts like function PosRI but considers `haystack` and `needle` as unicode string. -func PosRIRune(haystack, needle string, startOffset ...int) int { - pos := PosRI(haystack, needle, startOffset...) - if pos < 3 { - return pos - } - return len([]rune(haystack[:pos])) -} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_replace.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_replace.go deleted file mode 100644 index 1449f9be..00000000 --- a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_replace.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gstr - -import ( - "strings" - - "github.com/gogf/gf/v2/internal/utils" -) - -// Replace returns a copy of the string `origin` -// in which string `search` replaced by `replace` case-sensitively. -func Replace(origin, search, replace string, count ...int) string { - n := -1 - if len(count) > 0 { - n = count[0] - } - return strings.Replace(origin, search, replace, n) -} - -// ReplaceI returns a copy of the string `origin` -// in which string `search` replaced by `replace` case-insensitively. -func ReplaceI(origin, search, replace string, count ...int) string { - n := -1 - if len(count) > 0 { - n = count[0] - } - if n == 0 { - return origin - } - var ( - searchLength = len(search) - replaceLength = len(replace) - searchLower = strings.ToLower(search) - originLower string - pos int - ) - for { - originLower = strings.ToLower(origin) - if pos = Pos(originLower, searchLower, pos); pos != -1 { - origin = origin[:pos] + replace + origin[pos+searchLength:] - pos += replaceLength - if n--; n == 0 { - break - } - } else { - break - } - } - return origin -} - -// ReplaceByArray returns a copy of `origin`, -// which is replaced by a slice in order, case-sensitively. -func ReplaceByArray(origin string, array []string) string { - for i := 0; i < len(array); i += 2 { - if i+1 >= len(array) { - break - } - origin = Replace(origin, array[i], array[i+1]) - } - return origin -} - -// ReplaceIByArray returns a copy of `origin`, -// which is replaced by a slice in order, case-insensitively. -func ReplaceIByArray(origin string, array []string) string { - for i := 0; i < len(array); i += 2 { - if i+1 >= len(array) { - break - } - origin = ReplaceI(origin, array[i], array[i+1]) - } - return origin -} - -// ReplaceByMap returns a copy of `origin`, -// which is replaced by a map in unordered way, case-sensitively. -func ReplaceByMap(origin string, replaces map[string]string) string { - return utils.ReplaceByMap(origin, replaces) -} - -// ReplaceIByMap returns a copy of `origin`, -// which is replaced by a map in unordered way, case-insensitively. -func ReplaceIByMap(origin string, replaces map[string]string) string { - for k, v := range replaces { - origin = ReplaceI(origin, k, v) - } - return origin -} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_similar.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_similar.go deleted file mode 100644 index 7c19618f..00000000 --- a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_similar.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gstr - -// Levenshtein calculates Levenshtein distance between two strings. -// costIns: Defines the cost of insertion. -// costRep: Defines the cost of replacement. -// costDel: Defines the cost of deletion. -// See http://php.net/manual/en/function.levenshtein.php. -func Levenshtein(str1, str2 string, costIns, costRep, costDel int) int { - var maxLen = 255 - l1 := len(str1) - l2 := len(str2) - if l1 == 0 { - return l2 * costIns - } - if l2 == 0 { - return l1 * costDel - } - if l1 > maxLen || l2 > maxLen { - return -1 - } - - tmp := make([]int, l2+1) - p1 := make([]int, l2+1) - p2 := make([]int, l2+1) - var c0, c1, c2 int - var i1, i2 int - for i2 := 0; i2 <= l2; i2++ { - p1[i2] = i2 * costIns - } - for i1 = 0; i1 < l1; i1++ { - p2[0] = p1[0] + costDel - for i2 = 0; i2 < l2; i2++ { - if str1[i1] == str2[i2] { - c0 = p1[i2] - } else { - c0 = p1[i2] + costRep - } - c1 = p1[i2+1] + costDel - if c1 < c0 { - c0 = c1 - } - c2 = p2[i2] + costIns - if c2 < c0 { - c0 = c2 - } - p2[i2+1] = c0 - } - tmp = p1 - p1 = p2 - p2 = tmp - } - c0 = p1[l2] - - return c0 -} - -// SimilarText calculates the similarity between two strings. -// See http://php.net/manual/en/function.similar-text.php. -func SimilarText(first, second string, percent *float64) int { - var similarText func(string, string, int, int) int - similarText = func(str1, str2 string, len1, len2 int) int { - var sum, max int - pos1, pos2 := 0, 0 - - // Find the longest segment of the same section in two strings - for i := 0; i < len1; i++ { - for j := 0; j < len2; j++ { - for l := 0; (i+l < len1) && (j+l < len2) && (str1[i+l] == str2[j+l]); l++ { - if l+1 > max { - max = l + 1 - pos1 = i - pos2 = j - } - } - } - } - - if sum = max; sum > 0 { - if pos1 > 0 && pos2 > 0 { - sum += similarText(str1, str2, pos1, pos2) - } - if (pos1+max < len1) && (pos2+max < len2) { - s1 := []byte(str1) - s2 := []byte(str2) - sum += similarText(string(s1[pos1+max:]), string(s2[pos2+max:]), len1-pos1-max, len2-pos2-max) - } - } - - return sum - } - - l1, l2 := len(first), len(second) - if l1+l2 == 0 { - return 0 - } - sim := similarText(first, second, l1, l2) - if percent != nil { - *percent = float64(sim*200) / float64(l1+l2) - } - return sim -} - -// Soundex calculates the soundex key of a string. -// See http://php.net/manual/en/function.soundex.php. -func Soundex(str string) string { - if str == "" { - panic("str: cannot be an empty string") - } - table := [26]rune{ - '0', '1', '2', '3', // A, B, C, D - '0', '1', '2', // E, F, G - '0', // H - '0', '2', '2', '4', '5', '5', // I, J, K, L, M, N - '0', '1', '2', '6', '2', '3', // O, P, Q, R, S, T - '0', '1', // U, V - '0', '2', // W, X - '0', '2', // Y, Z - } - last, code, small := -1, 0, 0 - sd := make([]rune, 4) - // build soundex string - for i := 0; i < len(str) && small < 4; i++ { - // ToUpper - char := str[i] - if char < '\u007F' && 'a' <= char && char <= 'z' { - code = int(char - 'a' + 'A') - } else { - code = int(char) - } - if code >= 'A' && code <= 'Z' { - if small == 0 { - sd[small] = rune(code) - small++ - last = int(table[code-'A']) - } else { - code = int(table[code-'A']) - if code != last { - if code != 0 { - sd[small] = rune(code) - small++ - } - last = code - } - } - } - } - // pad with "0" - for ; small < 4; small++ { - sd[small] = '0' - } - return string(sd) -} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_slashes.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_slashes.go deleted file mode 100644 index 2fed1814..00000000 --- a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_slashes.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gstr - -import ( - "bytes" - - "github.com/gogf/gf/v2/internal/utils" -) - -// AddSlashes quotes chars('"\) with slashes. -func AddSlashes(str string) string { - var buf bytes.Buffer - for _, char := range str { - switch char { - case '\'', '"', '\\': - buf.WriteRune('\\') - } - buf.WriteRune(char) - } - return buf.String() -} - -// StripSlashes un-quotes a quoted string by AddSlashes. -func StripSlashes(str string) string { - return utils.StripSlashes(str) -} - -// QuoteMeta returns a version of str with a backslash character (\) -// before every character that is among: .\+*?[^]($) -func QuoteMeta(str string, chars ...string) string { - var buf bytes.Buffer - for _, char := range str { - if len(chars) > 0 { - for _, c := range chars[0] { - if c == char { - buf.WriteRune('\\') - break - } - } - } else { - switch char { - case '.', '+', '\\', '(', '$', ')', '[', '^', ']', '*', '?': - buf.WriteRune('\\') - } - } - buf.WriteRune(char) - } - return buf.String() -} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_split_join.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_split_join.go deleted file mode 100644 index 8858cacf..00000000 --- a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_split_join.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gstr - -import ( - "strings" - - "github.com/gogf/gf/v2/internal/utils" - "github.com/gogf/gf/v2/util/gconv" -) - -// Split splits string `str` by a string `delimiter`, to an array. -func Split(str, delimiter string) []string { - return strings.Split(str, delimiter) -} - -// SplitAndTrim splits string `str` by a string `delimiter` to an array, -// and calls Trim to every element of this array. It ignores the elements -// which are empty after Trim. -func SplitAndTrim(str, delimiter string, characterMask ...string) []string { - return utils.SplitAndTrim(str, delimiter, characterMask...) -} - -// Join concatenates the elements of `array` to create a single string. The separator string -// `sep` is placed between elements in the resulting string. -func Join(array []string, sep string) string { - return strings.Join(array, sep) -} - -// JoinAny concatenates the elements of `array` to create a single string. The separator string -// `sep` is placed between elements in the resulting string. -// -// The parameter `array` can be any type of slice, which be converted to string array. -func JoinAny(array interface{}, sep string) string { - return strings.Join(gconv.Strings(array), sep) -} - -// Explode splits string `str` by a string `delimiter`, to an array. -// See http://php.net/manual/en/function.explode.php. -func Explode(delimiter, str string) []string { - return Split(str, delimiter) -} - -// Implode joins array elements `pieces` with a string `glue`. -// http://php.net/manual/en/function.implode.php -func Implode(glue string, pieces []string) string { - return strings.Join(pieces, glue) -} - -// ChunkSplit splits a string into smaller chunks. -// Can be used to split a string into smaller chunks which is useful for -// e.g. converting BASE64 string output to match RFC 2045 semantics. -// It inserts end every chunkLen characters. -// It considers parameter `body` and `end` as unicode string. -func ChunkSplit(body string, chunkLen int, end string) string { - if end == "" { - end = "\r\n" - } - runes, endRunes := []rune(body), []rune(end) - l := len(runes) - if l <= 1 || l < chunkLen { - return body + end - } - ns := make([]rune, 0, len(runes)+len(endRunes)) - for i := 0; i < l; i += chunkLen { - if i+chunkLen > l { - ns = append(ns, runes[i:]...) - } else { - ns = append(ns, runes[i:i+chunkLen]...) - } - ns = append(ns, endRunes...) - } - return string(ns) -} - -// Fields returns the words used in a string as slice. -func Fields(str string) []string { - return strings.Fields(str) -} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_sub.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_sub.go deleted file mode 100644 index e237cee3..00000000 --- a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_sub.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gstr - -import "strings" - -// Str returns part of `haystack` string starting from and including -// the first occurrence of `needle` to the end of `haystack`. -// See http://php.net/manual/en/function.strstr.php. -func Str(haystack string, needle string) string { - if needle == "" { - return "" - } - pos := strings.Index(haystack, needle) - if pos == NotFoundIndex { - return "" - } - return haystack[pos+len([]byte(needle))-1:] -} - -// StrEx returns part of `haystack` string starting from and excluding -// the first occurrence of `needle` to the end of `haystack`. -func StrEx(haystack string, needle string) string { - if s := Str(haystack, needle); s != "" { - return s[1:] - } - return "" -} - -// StrTill returns part of `haystack` string ending to and including -// the first occurrence of `needle` from the start of `haystack`. -func StrTill(haystack string, needle string) string { - pos := strings.Index(haystack, needle) - if pos == NotFoundIndex || pos == 0 { - return "" - } - return haystack[:pos+1] -} - -// StrTillEx returns part of `haystack` string ending to and excluding -// the first occurrence of `needle` from the start of `haystack`. -func StrTillEx(haystack string, needle string) string { - pos := strings.Index(haystack, needle) - if pos == NotFoundIndex || pos == 0 { - return "" - } - return haystack[:pos] -} - -// SubStr returns a portion of string `str` specified by the `start` and `length` parameters. -// The parameter `length` is optional, it uses the length of `str` in default. -func SubStr(str string, start int, length ...int) (substr string) { - strLength := len(str) - if start < 0 { - if -start > strLength { - start = 0 - } else { - start = strLength + start - } - } else if start > strLength { - return "" - } - realLength := 0 - if len(length) > 0 { - realLength = length[0] - if realLength < 0 { - if -realLength > strLength-start { - realLength = 0 - } else { - realLength = strLength - start + realLength - } - } else if realLength > strLength-start { - realLength = strLength - start - } - } else { - realLength = strLength - start - } - - if realLength == strLength { - return str - } else { - end := start + realLength - return str[start:end] - } -} - -// SubStrRune returns a portion of string `str` specified by the `start` and `length` parameters. -// SubStrRune considers parameter `str` as unicode string. -// The parameter `length` is optional, it uses the length of `str` in default. -func SubStrRune(str string, start int, length ...int) (substr string) { - // Converting to []rune to support unicode. - var ( - runes = []rune(str) - runesLength = len(runes) - ) - - strLength := runesLength - if start < 0 { - if -start > strLength { - start = 0 - } else { - start = strLength + start - } - } else if start > strLength { - return "" - } - realLength := 0 - if len(length) > 0 { - realLength = length[0] - if realLength < 0 { - if -realLength > strLength-start { - realLength = 0 - } else { - realLength = strLength - start + realLength - } - } else if realLength > strLength-start { - realLength = strLength - start - } - } else { - realLength = strLength - start - } - end := start + realLength - if end > runesLength { - end = runesLength - } - return string(runes[start:end]) -} - -// StrLimit returns a portion of string `str` specified by `length` parameters, if the length -// of `str` is greater than `length`, then the `suffix` will be appended to the result string. -func StrLimit(str string, length int, suffix ...string) string { - if len(str) < length { - return str - } - suffixStr := defaultSuffixForStrLimit - if len(suffix) > 0 { - suffixStr = suffix[0] - } - return str[0:length] + suffixStr -} - -// StrLimitRune returns a portion of string `str` specified by `length` parameters, if the length -// of `str` is greater than `length`, then the `suffix` will be appended to the result string. -// StrLimitRune considers parameter `str` as unicode string. -func StrLimitRune(str string, length int, suffix ...string) string { - runes := []rune(str) - if len(runes) < length { - return str - } - suffixStr := defaultSuffixForStrLimit - if len(suffix) > 0 { - suffixStr = suffix[0] - } - return string(runes[0:length]) + suffixStr -} - -// SubStrFrom returns a portion of string `str` starting from first occurrence of and including `need` -// to the end of `str`. -func SubStrFrom(str string, need string) (substr string) { - pos := Pos(str, need) - if pos < 0 { - return "" - } - return str[pos:] -} - -// SubStrFromEx returns a portion of string `str` starting from first occurrence of and excluding `need` -// to the end of `str`. -func SubStrFromEx(str string, need string) (substr string) { - pos := Pos(str, need) - if pos < 0 { - return "" - } - return str[pos+len(need):] -} - -// SubStrFromR returns a portion of string `str` starting from last occurrence of and including `need` -// to the end of `str`. -func SubStrFromR(str string, need string) (substr string) { - pos := PosR(str, need) - if pos < 0 { - return "" - } - return str[pos:] -} - -// SubStrFromREx returns a portion of string `str` starting from last occurrence of and excluding `need` -// to the end of `str`. -func SubStrFromREx(str string, need string) (substr string) { - pos := PosR(str, need) - if pos < 0 { - return "" - } - return str[pos+len(need):] -} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_trim.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_trim.go deleted file mode 100644 index f7701505..00000000 --- a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_trim.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gstr - -import ( - "strings" - - "github.com/gogf/gf/v2/internal/utils" -) - -// Trim strips whitespace (or other characters) from the beginning and end of a string. -// The optional parameter `characterMask` specifies the additional stripped characters. -func Trim(str string, characterMask ...string) string { - return utils.Trim(str, characterMask...) -} - -// TrimStr strips all the given `cut` string from the beginning and end of a string. -// Note that it does not strip the whitespaces of its beginning or end. -func TrimStr(str string, cut string, count ...int) string { - return TrimLeftStr(TrimRightStr(str, cut, count...), cut, count...) -} - -// TrimLeft strips whitespace (or other characters) from the beginning of a string. -func TrimLeft(str string, characterMask ...string) string { - trimChars := utils.DefaultTrimChars - if len(characterMask) > 0 { - trimChars += characterMask[0] - } - return strings.TrimLeft(str, trimChars) -} - -// TrimLeftStr strips all the given `cut` string from the beginning of a string. -// Note that it does not strip the whitespaces of its beginning. -func TrimLeftStr(str string, cut string, count ...int) string { - var ( - lenCut = len(cut) - cutCount = 0 - ) - for len(str) >= lenCut && str[0:lenCut] == cut { - str = str[lenCut:] - cutCount++ - if len(count) > 0 && count[0] != -1 && cutCount >= count[0] { - break - } - } - return str -} - -// TrimRight strips whitespace (or other characters) from the end of a string. -func TrimRight(str string, characterMask ...string) string { - trimChars := utils.DefaultTrimChars - if len(characterMask) > 0 { - trimChars += characterMask[0] - } - return strings.TrimRight(str, trimChars) -} - -// TrimRightStr strips all the given `cut` string from the end of a string. -// Note that it does not strip the whitespaces of its end. -func TrimRightStr(str string, cut string, count ...int) string { - var ( - lenStr = len(str) - lenCut = len(cut) - cutCount = 0 - ) - for lenStr >= lenCut && str[lenStr-lenCut:lenStr] == cut { - lenStr = lenStr - lenCut - str = str[:lenStr] - cutCount++ - if len(count) > 0 && count[0] != -1 && cutCount >= count[0] { - break - } - } - return str -} - -// TrimAll trims all characters in string `str`. -func TrimAll(str string, characterMask ...string) string { - trimChars := utils.DefaultTrimChars - if len(characterMask) > 0 { - trimChars += characterMask[0] - } - var ( - filtered bool - slice = make([]rune, 0, len(str)) - ) - for _, char := range str { - filtered = false - for _, trimChar := range trimChars { - if char == trimChar { - filtered = true - break - } - } - if !filtered { - slice = append(slice, char) - } - } - return string(slice) -} - -// HasPrefix tests whether the string s begins with prefix. -func HasPrefix(s, prefix string) bool { - return strings.HasPrefix(s, prefix) -} - -// HasSuffix tests whether the string s ends with suffix. -func HasSuffix(s, suffix string) bool { - return strings.HasSuffix(s, suffix) -} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_upper_lower.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_upper_lower.go deleted file mode 100644 index 69ad78c1..00000000 --- a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_upper_lower.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gstr - -import ( - "strings" - - "github.com/gogf/gf/v2/internal/utils" -) - -// ToLower returns a copy of the string s with all Unicode letters mapped to their lower case. -func ToLower(s string) string { - return strings.ToLower(s) -} - -// ToUpper returns a copy of the string s with all Unicode letters mapped to their upper case. -func ToUpper(s string) string { - return strings.ToUpper(s) -} - -// UcFirst returns a copy of the string s with the first letter mapped to its upper case. -func UcFirst(s string) string { - return utils.UcFirst(s) -} - -// LcFirst returns a copy of the string s with the first letter mapped to its lower case. -func LcFirst(s string) string { - if len(s) == 0 { - return s - } - if IsLetterUpper(s[0]) { - return string(s[0]+32) + s[1:] - } - return s -} - -// UcWords uppercase the first character of each word in a string. -func UcWords(str string) string { - return strings.Title(str) -} - -// IsLetterLower tests whether the given byte b is in lower case. -func IsLetterLower(b byte) bool { - return utils.IsLetterLower(b) -} - -// IsLetterUpper tests whether the given byte b is in upper case. -func IsLetterUpper(b byte) bool { - return utils.IsLetterUpper(b) -} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_version.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_version.go deleted file mode 100644 index f931b2c9..00000000 --- a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_version.go +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gstr - -import ( - "strings" - - "github.com/gogf/gf/v2/util/gconv" -) - -// IsGNUVersion checks and returns whether given `version` is valid GNU version string. -func IsGNUVersion(version string) bool { - if version != "" && (version[0] == 'v' || version[0] == 'V') { - version = version[1:] - } - if version == "" { - return false - } - var array = strings.Split(version, ".") - if len(array) > 3 { - return false - } - for _, v := range array { - if v == "" { - return false - } - if !IsNumeric(v) { - return false - } - if v[0] == '-' || v[0] == '+' { - return false - } - } - return true -} - -// CompareVersion compares `a` and `b` as standard GNU version. -// -// It returns 1 if `a` > `b`. -// -// It returns -1 if `a` < `b`. -// -// It returns 0 if `a` = `b`. -// -// GNU standard version is like: -// v1.0 -// 1 -// 1.0.0 -// v1.0.1 -// v2.10.8 -// 10.2.0 -// etc. -func CompareVersion(a, b string) int { - if a != "" && a[0] == 'v' { - a = a[1:] - } - if b != "" && b[0] == 'v' { - b = b[1:] - } - var ( - array1 = strings.Split(a, ".") - array2 = strings.Split(b, ".") - diff int - ) - diff = len(array2) - len(array1) - for i := 0; i < diff; i++ { - array1 = append(array1, "0") - } - diff = len(array1) - len(array2) - for i := 0; i < diff; i++ { - array2 = append(array2, "0") - } - v1 := 0 - v2 := 0 - for i := 0; i < len(array1); i++ { - v1 = gconv.Int(array1[i]) - v2 = gconv.Int(array2[i]) - if v1 > v2 { - return 1 - } - if v1 < v2 { - return -1 - } - } - return 0 -} - -// CompareVersionGo compares `a` and `b` as standard Golang version. -// -// It returns 1 if `a` > `b`. -// -// It returns -1 if `a` < `b`. -// -// It returns 0 if `a` = `b`. -// -// Golang standard version is like: -// 1.0.0 -// v1.0.1 -// v2.10.8 -// 10.2.0 -// v0.0.0-20190626092158-b2ccc519800e -// v1.12.2-0.20200413154443-b17e3a6804fa -// v4.20.0+incompatible -// etc. -// -// Docs: https://go.dev/doc/modules/version-numbers -func CompareVersionGo(a, b string) int { - a = Trim(a) - b = Trim(b) - if a != "" && a[0] == 'v' { - a = a[1:] - } - if b != "" && b[0] == 'v' { - b = b[1:] - } - var ( - rawA = a - rawB = b - ) - if Count(a, "-") > 1 { - if i := PosR(a, "-"); i > 0 { - a = a[:i] - } - } - if Count(b, "-") > 1 { - if i := PosR(b, "-"); i > 0 { - b = b[:i] - } - } - if i := Pos(a, "+"); i > 0 { - a = a[:i] - } - if i := Pos(b, "+"); i > 0 { - b = b[:i] - } - a = Replace(a, "-", ".") - b = Replace(b, "-", ".") - var ( - array1 = strings.Split(a, ".") - array2 = strings.Split(b, ".") - diff = len(array1) - len(array2) - ) - - for i := diff; i < 0; i++ { - array1 = append(array1, "0") - } - for i := 0; i < diff; i++ { - array2 = append(array2, "0") - } - - // check Major.Minor.Patch first - v1, v2 := 0, 0 - for i := 0; i < len(array1); i++ { - v1, v2 = gconv.Int(array1[i]), gconv.Int(array2[i]) - // Specially in Golang: - // "v1.12.2-0.20200413154443-b17e3a6804fa" < "v1.12.2" - // "v1.12.3-0.20200413154443-b17e3a6804fa" > "v1.12.2" - if i == 4 && v1 != v2 && (v1 == 0 || v2 == 0) { - if v1 > v2 { - return -1 - } else { - return 1 - } - } - - if v1 > v2 { - return 1 - } - if v1 < v2 { - return -1 - } - } - - // Specially in Golang: - // "v4.20.1+incompatible" < "v4.20.1" - inA, inB := Contains(rawA, "+incompatible"), Contains(rawB, "+incompatible") - if inA && !inB { - return -1 - } - if !inA && inB { - return 1 - } - - return 0 -} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv.go deleted file mode 100644 index 85ea1e42..00000000 --- a/vendor/github.com/gogf/gf/v2/util/gconv/gconv.go +++ /dev/null @@ -1,286 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package gconv implements powerful and convenient converting functionality for any types of variables. -// -// This package should keep much less dependencies with other packages. -package gconv - -import ( - "context" - "fmt" - "math" - "reflect" - "strconv" - "strings" - "time" - - "github.com/gogf/gf/v2/encoding/gbinary" - "github.com/gogf/gf/v2/internal/intlog" - "github.com/gogf/gf/v2/internal/json" - "github.com/gogf/gf/v2/internal/reflection" - "github.com/gogf/gf/v2/os/gtime" - "github.com/gogf/gf/v2/util/gtag" -) - -var ( - // Empty strings. - emptyStringMap = map[string]struct{}{ - "": {}, - "0": {}, - "no": {}, - "off": {}, - "false": {}, - } - - // StructTagPriority defines the default priority tags for Map*/Struct* functions. - // Note that, the `gconv/param` tags are used by old version of package. - // It is strongly recommended using short tag `c/p` instead in the future. - StructTagPriority = []string{ - gtag.GConv, gtag.Param, gtag.GConvShort, gtag.ParamShort, gtag.Json, - } -) - -// Byte converts `any` to byte. -func Byte(any interface{}) byte { - if v, ok := any.(byte); ok { - return v - } - return Uint8(any) -} - -// Bytes converts `any` to []byte. -func Bytes(any interface{}) []byte { - if any == nil { - return nil - } - switch value := any.(type) { - case string: - return []byte(value) - - case []byte: - return value - - default: - if f, ok := value.(iBytes); ok { - return f.Bytes() - } - originValueAndKind := reflection.OriginValueAndKind(any) - switch originValueAndKind.OriginKind { - case reflect.Map: - bytes, err := json.Marshal(any) - if err != nil { - intlog.Errorf(context.TODO(), `%+v`, err) - } - return bytes - - case reflect.Array, reflect.Slice: - var ( - ok = true - bytes = make([]byte, originValueAndKind.OriginValue.Len()) - ) - for i := range bytes { - int32Value := Int32(originValueAndKind.OriginValue.Index(i).Interface()) - if int32Value < 0 || int32Value > math.MaxUint8 { - ok = false - break - } - bytes[i] = byte(int32Value) - } - if ok { - return bytes - } - } - return gbinary.Encode(any) - } -} - -// Rune converts `any` to rune. -func Rune(any interface{}) rune { - if v, ok := any.(rune); ok { - return v - } - return Int32(any) -} - -// Runes converts `any` to []rune. -func Runes(any interface{}) []rune { - if v, ok := any.([]rune); ok { - return v - } - return []rune(String(any)) -} - -// String converts `any` to string. -// It's most commonly used converting function. -func String(any interface{}) string { - if any == nil { - return "" - } - switch value := any.(type) { - case int: - return strconv.Itoa(value) - case int8: - return strconv.Itoa(int(value)) - case int16: - return strconv.Itoa(int(value)) - case int32: - return strconv.Itoa(int(value)) - case int64: - return strconv.FormatInt(value, 10) - case uint: - return strconv.FormatUint(uint64(value), 10) - case uint8: - return strconv.FormatUint(uint64(value), 10) - case uint16: - return strconv.FormatUint(uint64(value), 10) - case uint32: - return strconv.FormatUint(uint64(value), 10) - case uint64: - return strconv.FormatUint(value, 10) - case float32: - return strconv.FormatFloat(float64(value), 'f', -1, 32) - case float64: - return strconv.FormatFloat(value, 'f', -1, 64) - case bool: - return strconv.FormatBool(value) - case string: - return value - case []byte: - return string(value) - case time.Time: - if value.IsZero() { - return "" - } - return value.String() - case *time.Time: - if value == nil { - return "" - } - return value.String() - case gtime.Time: - if value.IsZero() { - return "" - } - return value.String() - case *gtime.Time: - if value == nil { - return "" - } - return value.String() - default: - // Empty checks. - if value == nil { - return "" - } - if f, ok := value.(iString); ok { - // If the variable implements the String() interface, - // then use that interface to perform the conversion - return f.String() - } - if f, ok := value.(iError); ok { - // If the variable implements the Error() interface, - // then use that interface to perform the conversion - return f.Error() - } - // Reflect checks. - var ( - rv = reflect.ValueOf(value) - kind = rv.Kind() - ) - switch kind { - case reflect.Chan, - reflect.Map, - reflect.Slice, - reflect.Func, - reflect.Ptr, - reflect.Interface, - reflect.UnsafePointer: - if rv.IsNil() { - return "" - } - case reflect.String: - return rv.String() - } - if kind == reflect.Ptr { - return String(rv.Elem().Interface()) - } - // Finally, we use json.Marshal to convert. - if jsonContent, err := json.Marshal(value); err != nil { - return fmt.Sprint(value) - } else { - return string(jsonContent) - } - } -} - -// Bool converts `any` to bool. -// It returns false if `any` is: false, "", 0, "false", "off", "no", empty slice/map. -func Bool(any interface{}) bool { - if any == nil { - return false - } - switch value := any.(type) { - case bool: - return value - case []byte: - if _, ok := emptyStringMap[strings.ToLower(string(value))]; ok { - return false - } - return true - case string: - if _, ok := emptyStringMap[strings.ToLower(value)]; ok { - return false - } - return true - default: - if f, ok := value.(iBool); ok { - return f.Bool() - } - rv := reflect.ValueOf(any) - switch rv.Kind() { - case reflect.Ptr: - return !rv.IsNil() - case reflect.Map: - fallthrough - case reflect.Array: - fallthrough - case reflect.Slice: - return rv.Len() != 0 - case reflect.Struct: - return true - default: - s := strings.ToLower(String(any)) - if _, ok := emptyStringMap[s]; ok { - return false - } - return true - } - } -} - -// checkJsonAndUnmarshalUseNumber checks if given `any` is JSON formatted string value and does converting using `json.UnmarshalUseNumber`. -func checkJsonAndUnmarshalUseNumber(any interface{}, target interface{}) bool { - switch r := any.(type) { - case []byte: - if json.Valid(r) { - if err := json.UnmarshalUseNumber(r, &target); err != nil { - return false - } - return true - } - - case string: - anyAsBytes := []byte(r) - if json.Valid(anyAsBytes) { - if err := json.UnmarshalUseNumber(anyAsBytes, &target); err != nil { - return false - } - return true - } - } - return false -} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_convert.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_convert.go deleted file mode 100644 index 609d9e61..00000000 --- a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_convert.go +++ /dev/null @@ -1,320 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gconv - -import ( - "reflect" - "time" - - "github.com/gogf/gf/v2/os/gtime" -) - -// Convert converts the variable `fromValue` to the type `toTypeName`, the type `toTypeName` is specified by string. -// -// The optional parameter `extraParams` is used for additional necessary parameter for this conversion. -// It supports common basic types conversion as its conversion based on type name string. -func Convert(fromValue interface{}, toTypeName string, extraParams ...interface{}) interface{} { - return doConvert(doConvertInput{ - FromValue: fromValue, - ToTypeName: toTypeName, - ReferValue: nil, - Extra: extraParams, - }) -} - -type doConvertInput struct { - FromValue interface{} // Value that is converted from. - ToTypeName string // Target value type name in string. - ReferValue interface{} // Referred value, a value in type `ToTypeName`. Note that its type might be reflect.Value. - Extra []interface{} // Extra values for implementing the converting. - // Marks that the value is already converted and set to `ReferValue`. Caller can ignore the returned result. - // It is an attribute for internal usage purpose. - alreadySetToReferValue bool -} - -// doConvert does commonly use types converting. -func doConvert(in doConvertInput) (convertedValue interface{}) { - switch in.ToTypeName { - case "int": - return Int(in.FromValue) - case "*int": - if _, ok := in.FromValue.(*int); ok { - return in.FromValue - } - v := Int(in.FromValue) - return &v - - case "int8": - return Int8(in.FromValue) - case "*int8": - if _, ok := in.FromValue.(*int8); ok { - return in.FromValue - } - v := Int8(in.FromValue) - return &v - - case "int16": - return Int16(in.FromValue) - case "*int16": - if _, ok := in.FromValue.(*int16); ok { - return in.FromValue - } - v := Int16(in.FromValue) - return &v - - case "int32": - return Int32(in.FromValue) - case "*int32": - if _, ok := in.FromValue.(*int32); ok { - return in.FromValue - } - v := Int32(in.FromValue) - return &v - - case "int64": - return Int64(in.FromValue) - case "*int64": - if _, ok := in.FromValue.(*int64); ok { - return in.FromValue - } - v := Int64(in.FromValue) - return &v - - case "uint": - return Uint(in.FromValue) - case "*uint": - if _, ok := in.FromValue.(*uint); ok { - return in.FromValue - } - v := Uint(in.FromValue) - return &v - - case "uint8": - return Uint8(in.FromValue) - case "*uint8": - if _, ok := in.FromValue.(*uint8); ok { - return in.FromValue - } - v := Uint8(in.FromValue) - return &v - - case "uint16": - return Uint16(in.FromValue) - case "*uint16": - if _, ok := in.FromValue.(*uint16); ok { - return in.FromValue - } - v := Uint16(in.FromValue) - return &v - - case "uint32": - return Uint32(in.FromValue) - case "*uint32": - if _, ok := in.FromValue.(*uint32); ok { - return in.FromValue - } - v := Uint32(in.FromValue) - return &v - - case "uint64": - return Uint64(in.FromValue) - case "*uint64": - if _, ok := in.FromValue.(*uint64); ok { - return in.FromValue - } - v := Uint64(in.FromValue) - return &v - - case "float32": - return Float32(in.FromValue) - case "*float32": - if _, ok := in.FromValue.(*float32); ok { - return in.FromValue - } - v := Float32(in.FromValue) - return &v - - case "float64": - return Float64(in.FromValue) - case "*float64": - if _, ok := in.FromValue.(*float64); ok { - return in.FromValue - } - v := Float64(in.FromValue) - return &v - - case "bool": - return Bool(in.FromValue) - case "*bool": - if _, ok := in.FromValue.(*bool); ok { - return in.FromValue - } - v := Bool(in.FromValue) - return &v - - case "string": - return String(in.FromValue) - case "*string": - if _, ok := in.FromValue.(*string); ok { - return in.FromValue - } - v := String(in.FromValue) - return &v - - case "[]byte": - return Bytes(in.FromValue) - case "[]int": - return Ints(in.FromValue) - case "[]int32": - return Int32s(in.FromValue) - case "[]int64": - return Int64s(in.FromValue) - case "[]uint": - return Uints(in.FromValue) - case "[]uint8": - return Bytes(in.FromValue) - case "[]uint32": - return Uint32s(in.FromValue) - case "[]uint64": - return Uint64s(in.FromValue) - case "[]float32": - return Float32s(in.FromValue) - case "[]float64": - return Float64s(in.FromValue) - case "[]string": - return Strings(in.FromValue) - - case "Time", "time.Time": - if len(in.Extra) > 0 { - return Time(in.FromValue, String(in.Extra[0])) - } - return Time(in.FromValue) - case "*time.Time": - var v interface{} - if len(in.Extra) > 0 { - v = Time(in.FromValue, String(in.Extra[0])) - } else { - if _, ok := in.FromValue.(*time.Time); ok { - return in.FromValue - } - v = Time(in.FromValue) - } - return &v - - case "GTime", "gtime.Time": - if len(in.Extra) > 0 { - if v := GTime(in.FromValue, String(in.Extra[0])); v != nil { - return *v - } else { - return *gtime.New() - } - } - if v := GTime(in.FromValue); v != nil { - return *v - } else { - return *gtime.New() - } - case "*gtime.Time": - if len(in.Extra) > 0 { - if v := GTime(in.FromValue, String(in.Extra[0])); v != nil { - return v - } else { - return gtime.New() - } - } - if v := GTime(in.FromValue); v != nil { - return v - } else { - return gtime.New() - } - - case "Duration", "time.Duration": - return Duration(in.FromValue) - case "*time.Duration": - if _, ok := in.FromValue.(*time.Duration); ok { - return in.FromValue - } - v := Duration(in.FromValue) - return &v - - case "map[string]string": - return MapStrStr(in.FromValue) - - case "map[string]interface{}": - return Map(in.FromValue) - - case "[]map[string]interface{}": - return Maps(in.FromValue) - - case "json.RawMessage": - return Bytes(in.FromValue) - - default: - if in.ReferValue != nil { - var referReflectValue reflect.Value - if v, ok := in.ReferValue.(reflect.Value); ok { - referReflectValue = v - } else { - referReflectValue = reflect.ValueOf(in.ReferValue) - } - - defer func() { - if recover() != nil { - in.alreadySetToReferValue = false - if err := bindVarToReflectValue(referReflectValue, in.FromValue, nil); err == nil { - in.alreadySetToReferValue = true - convertedValue = referReflectValue.Interface() - } - } - }() - switch referReflectValue.Kind() { - case reflect.Ptr: - // Type converting for custom type pointers. - // Eg: - // type PayMode int - // type Req struct{ - // Mode *PayMode - // } - // - // Struct(`{"Mode": 1000}`, &req) - originType := referReflectValue.Type().Elem() - switch originType.Kind() { - case reflect.Struct: - // Not support some kinds. - default: - in.ToTypeName = originType.Kind().String() - in.ReferValue = nil - refElementValue := reflect.ValueOf(doConvert(in)) - originTypeValue := reflect.New(refElementValue.Type()).Elem() - originTypeValue.Set(refElementValue) - in.alreadySetToReferValue = true - return originTypeValue.Addr().Convert(referReflectValue.Type()).Interface() - } - - case reflect.Map: - var targetValue = reflect.New(referReflectValue.Type()).Elem() - if err := doMapToMap(in.FromValue, targetValue); err == nil { - in.alreadySetToReferValue = true - } - return targetValue.Interface() - } - in.ToTypeName = referReflectValue.Kind().String() - in.ReferValue = nil - in.alreadySetToReferValue = true - convertedValue = reflect.ValueOf(doConvert(in)).Convert(referReflectValue.Type()).Interface() - return convertedValue - } - return in.FromValue - } -} - -func doConvertWithReflectValueSet(reflectValue reflect.Value, in doConvertInput) { - convertedValue := doConvert(in) - if !in.alreadySetToReferValue { - reflectValue.Set(reflect.ValueOf(convertedValue)) - } -} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_converter.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_converter.go deleted file mode 100644 index a5d7bb72..00000000 --- a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_converter.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gconv - -import ( - "reflect" - - "github.com/gogf/gf/v2/errors/gcode" - "github.com/gogf/gf/v2/errors/gerror" -) - -type ( - converterInType = reflect.Type - converterOutType = reflect.Type - converterFunc = reflect.Value -) - -// customConverters for internal converter storing. -var customConverters = make(map[converterInType]map[converterOutType]converterFunc) - -// RegisterConverter to register custom converter. -// It must be registered before you use this custom converting feature. -// It is suggested to do it in boot. -// -// Note: -// 1. The parameter `fn` must be defined as pattern `func(T1) (T2, error)`. -// It will convert type `T1` to type `T2`. -// 2. The `T1` should not be type of pointer, but the `T2` should be type of pointer. -func RegisterConverter(fn interface{}) (err error) { - var ( - fnReflectType = reflect.TypeOf(fn) - errType = reflect.TypeOf((*error)(nil)).Elem() - ) - if fnReflectType.Kind() != reflect.Func || - fnReflectType.NumIn() != 1 || fnReflectType.NumOut() != 2 || - !fnReflectType.Out(1).Implements(errType) { - err = gerror.NewCodef( - gcode.CodeInvalidParameter, - "parameter must be type of function and defined as pattern `func(T1) (T2, error)`, but defined as `%s`", - fnReflectType.String(), - ) - return - } - - // The Key and Value of the converter map should not be pointer. - var ( - inType = fnReflectType.In(0) - outType = fnReflectType.Out(0) - ) - if inType.Kind() == reflect.Pointer { - err = gerror.NewCodef( - gcode.CodeInvalidParameter, - "invalid input parameter type `%s`: should not be type of pointer", - inType.String(), - ) - return - } - if outType.Kind() != reflect.Pointer { - err = gerror.NewCodef( - gcode.CodeInvalidParameter, - "invalid output parameter type `%s`: should be type of pointer", - outType.String(), - ) - return - } - - registeredOutTypeMap, ok := customConverters[inType] - if !ok { - registeredOutTypeMap = make(map[converterOutType]converterFunc) - customConverters[inType] = registeredOutTypeMap - } - if _, ok = registeredOutTypeMap[outType]; ok { - err = gerror.NewCodef( - gcode.CodeInvalidOperation, - "the converter parameter type `%s` to type `%s` has already been registered", - inType.String(), outType.String(), - ) - return - } - registeredOutTypeMap[outType] = reflect.ValueOf(fn) - return -} - -// callCustomConverter call the custom converter. It will try some possible type. -func callCustomConverter(srcReflectValue reflect.Value, dstReflectValue reflect.Value) (converted bool, err error) { - if len(customConverters) == 0 { - return false, nil - } - var ( - ok bool - srcType = srcReflectValue.Type() - ) - for srcType.Kind() == reflect.Pointer { - srcType = srcType.Elem() - } - var ( - registeredOutTypeMap map[converterOutType]converterFunc - registeredConverterFunc converterFunc - ) - // firstly, it searches the map by input parameter type. - registeredOutTypeMap, ok = customConverters[srcType] - if !ok { - return false, nil - } - var dstType = dstReflectValue.Type() - if dstType.Kind() == reflect.Pointer && dstReflectValue.Elem().Kind() == reflect.Pointer { - dstType = dstReflectValue.Elem().Type() - } else if dstType.Kind() != reflect.Pointer && dstReflectValue.CanAddr() { - dstType = dstReflectValue.Addr().Type() - } - // secondly, it searches the input parameter type map - // and finds the result converter function by the output parameter type. - registeredConverterFunc, ok = registeredOutTypeMap[dstType] - if !ok { - return false, nil - } - // Converter function calling. - for srcReflectValue.Type() != srcType { - srcReflectValue = srcReflectValue.Elem() - } - result := registeredConverterFunc.Call([]reflect.Value{srcReflectValue}) - if !result[1].IsNil() { - return false, result[1].Interface().(error) - } - // The `result[0]` is a pointer. - if result[0].IsNil() { - return false, nil - } - var resultValue = result[0] - for { - if resultValue.Type() == dstReflectValue.Type() && dstReflectValue.CanSet() { - dstReflectValue.Set(resultValue) - converted = true - } else if dstReflectValue.Kind() == reflect.Pointer { - if resultValue.Type() == dstReflectValue.Elem().Type() && dstReflectValue.Elem().CanSet() { - dstReflectValue.Elem().Set(resultValue) - converted = true - } - } - if converted { - break - } - if resultValue.Kind() == reflect.Pointer { - resultValue = resultValue.Elem() - } else { - break - } - } - - return converted, nil -} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_float.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_float.go deleted file mode 100644 index 41cfb1cc..00000000 --- a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_float.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gconv - -import ( - "strconv" - - "github.com/gogf/gf/v2/encoding/gbinary" -) - -// Float32 converts `any` to float32. -func Float32(any interface{}) float32 { - if any == nil { - return 0 - } - switch value := any.(type) { - case float32: - return value - case float64: - return float32(value) - case []byte: - return gbinary.DecodeToFloat32(value) - default: - if f, ok := value.(iFloat32); ok { - return f.Float32() - } - v, _ := strconv.ParseFloat(String(any), 64) - return float32(v) - } -} - -// Float64 converts `any` to float64. -func Float64(any interface{}) float64 { - if any == nil { - return 0 - } - switch value := any.(type) { - case float32: - return float64(value) - case float64: - return value - case []byte: - return gbinary.DecodeToFloat64(value) - default: - if f, ok := value.(iFloat64); ok { - return f.Float64() - } - v, _ := strconv.ParseFloat(String(any), 64) - return v - } -} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_int.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_int.go deleted file mode 100644 index 48e26fc2..00000000 --- a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_int.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gconv - -import ( - "math" - "strconv" - - "github.com/gogf/gf/v2/encoding/gbinary" -) - -// Int converts `any` to int. -func Int(any interface{}) int { - if any == nil { - return 0 - } - if v, ok := any.(int); ok { - return v - } - return int(Int64(any)) -} - -// Int8 converts `any` to int8. -func Int8(any interface{}) int8 { - if any == nil { - return 0 - } - if v, ok := any.(int8); ok { - return v - } - return int8(Int64(any)) -} - -// Int16 converts `any` to int16. -func Int16(any interface{}) int16 { - if any == nil { - return 0 - } - if v, ok := any.(int16); ok { - return v - } - return int16(Int64(any)) -} - -// Int32 converts `any` to int32. -func Int32(any interface{}) int32 { - if any == nil { - return 0 - } - if v, ok := any.(int32); ok { - return v - } - return int32(Int64(any)) -} - -// Int64 converts `any` to int64. -func Int64(any interface{}) int64 { - if any == nil { - return 0 - } - switch value := any.(type) { - case int: - return int64(value) - case int8: - return int64(value) - case int16: - return int64(value) - case int32: - return int64(value) - case int64: - return value - case uint: - return int64(value) - case uint8: - return int64(value) - case uint16: - return int64(value) - case uint32: - return int64(value) - case uint64: - return int64(value) - case float32: - return int64(value) - case float64: - return int64(value) - case bool: - if value { - return 1 - } - return 0 - case []byte: - return gbinary.DecodeToInt64(value) - default: - if f, ok := value.(iInt64); ok { - return f.Int64() - } - var ( - s = String(value) - isMinus = false - ) - if len(s) > 0 { - if s[0] == '-' { - isMinus = true - s = s[1:] - } else if s[0] == '+' { - s = s[1:] - } - } - // Hexadecimal - if len(s) > 2 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X') { - if v, e := strconv.ParseInt(s[2:], 16, 64); e == nil { - if isMinus { - return -v - } - return v - } - } - // Decimal - if v, e := strconv.ParseInt(s, 10, 64); e == nil { - if isMinus { - return -v - } - return v - } - // Float64 - if valueInt64 := Float64(value); math.IsNaN(valueInt64) { - return 0 - } else { - return int64(valueInt64) - } - } -} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_interface.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_interface.go deleted file mode 100644 index 9440e978..00000000 --- a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_interface.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gconv - -import "github.com/gogf/gf/v2/os/gtime" - -// iString is used for type assert api for String(). -type iString interface { - String() string -} - -// iBool is used for type assert api for Bool(). -type iBool interface { - Bool() bool -} - -// iInt64 is used for type assert api for Int64(). -type iInt64 interface { - Int64() int64 -} - -// iUint64 is used for type assert api for Uint64(). -type iUint64 interface { - Uint64() uint64 -} - -// iFloat32 is used for type assert api for Float32(). -type iFloat32 interface { - Float32() float32 -} - -// iFloat64 is used for type assert api for Float64(). -type iFloat64 interface { - Float64() float64 -} - -// iError is used for type assert api for Error(). -type iError interface { - Error() string -} - -// iBytes is used for type assert api for Bytes(). -type iBytes interface { - Bytes() []byte -} - -// iInterface is used for type assert api for Interface(). -type iInterface interface { - Interface() interface{} -} - -// iInterfaces is used for type assert api for Interfaces(). -type iInterfaces interface { - Interfaces() []interface{} -} - -// iFloats is used for type assert api for Floats(). -type iFloats interface { - Floats() []float64 -} - -// iInts is used for type assert api for Ints(). -type iInts interface { - Ints() []int -} - -// iStrings is used for type assert api for Strings(). -type iStrings interface { - Strings() []string -} - -// iUints is used for type assert api for Uints(). -type iUints interface { - Uints() []uint -} - -// iMapStrAny is the interface support for converting struct parameter to map. -type iMapStrAny interface { - MapStrAny() map[string]interface{} -} - -// iUnmarshalValue is the interface for custom defined types customizing value assignment. -// Note that only pointer can implement interface iUnmarshalValue. -type iUnmarshalValue interface { - UnmarshalValue(interface{}) error -} - -// iUnmarshalText is the interface for custom defined types customizing value assignment. -// Note that only pointer can implement interface iUnmarshalText. -type iUnmarshalText interface { - UnmarshalText(text []byte) error -} - -// iUnmarshalText is the interface for custom defined types customizing value assignment. -// Note that only pointer can implement interface iUnmarshalJSON. -type iUnmarshalJSON interface { - UnmarshalJSON(b []byte) error -} - -// iSet is the interface for custom value assignment. -type iSet interface { - Set(value interface{}) (old interface{}) -} - -// iGTime is the interface for gtime.Time converting. -type iGTime interface { - GTime(format ...string) *gtime.Time -} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_map.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_map.go deleted file mode 100644 index 9e3605cb..00000000 --- a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_map.go +++ /dev/null @@ -1,531 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gconv - -import ( - "reflect" - "strings" - - "github.com/gogf/gf/v2/internal/empty" - "github.com/gogf/gf/v2/internal/json" - "github.com/gogf/gf/v2/internal/utils" -) - -type recursiveType string - -const ( - recursiveTypeAuto recursiveType = "auto" - recursiveTypeTrue recursiveType = "true" -) - -// Map converts any variable `value` to map[string]interface{}. If the parameter `value` is not a -// map/struct/*struct type, then the conversion will fail and returns nil. -// -// If `value` is a struct/*struct object, the second parameter `tags` specifies the most priority -// tags that will be detected, otherwise it detects the tags in order of: -// gconv, json, field name. -func Map(value interface{}, tags ...string) map[string]interface{} { - return doMapConvert(value, recursiveTypeAuto, false, tags...) -} - -// MapDeep does Map function recursively, which means if the attribute of `value` -// is also a struct/*struct, calls Map function on this attribute converting it to -// a map[string]interface{} type variable. -// Also see Map. -func MapDeep(value interface{}, tags ...string) map[string]interface{} { - return doMapConvert(value, recursiveTypeTrue, false, tags...) -} - -// doMapConvert implements the map converting. -// It automatically checks and converts json string to map if `value` is string/[]byte. -// -// TODO completely implement the recursive converting for all types, especially the map. -func doMapConvert(value interface{}, recursive recursiveType, mustMapReturn bool, tags ...string) map[string]interface{} { - if value == nil { - return nil - } - newTags := StructTagPriority - switch len(tags) { - case 0: - // No need handling. - case 1: - newTags = append(strings.Split(tags[0], ","), StructTagPriority...) - default: - newTags = append(tags, StructTagPriority...) - } - // Assert the common combination of types, and finally it uses reflection. - dataMap := make(map[string]interface{}) - switch r := value.(type) { - case string: - // If it is a JSON string, automatically unmarshal it! - if len(r) > 0 && r[0] == '{' && r[len(r)-1] == '}' { - if err := json.UnmarshalUseNumber([]byte(r), &dataMap); err != nil { - return nil - } - } else { - return nil - } - case []byte: - // If it is a JSON string, automatically unmarshal it! - if len(r) > 0 && r[0] == '{' && r[len(r)-1] == '}' { - if err := json.UnmarshalUseNumber(r, &dataMap); err != nil { - return nil - } - } else { - return nil - } - case map[interface{}]interface{}: - for k, v := range r { - dataMap[String(k)] = doMapConvertForMapOrStructValue( - doMapConvertForMapOrStructValueInput{ - IsRoot: false, - Value: v, - RecursiveType: recursive, - RecursiveOption: recursive == recursiveTypeTrue, - Tags: newTags, - }, - ) - } - case map[interface{}]string: - for k, v := range r { - dataMap[String(k)] = v - } - case map[interface{}]int: - for k, v := range r { - dataMap[String(k)] = v - } - case map[interface{}]uint: - for k, v := range r { - dataMap[String(k)] = v - } - case map[interface{}]float32: - for k, v := range r { - dataMap[String(k)] = v - } - case map[interface{}]float64: - for k, v := range r { - dataMap[String(k)] = v - } - case map[string]bool: - for k, v := range r { - dataMap[k] = v - } - case map[string]int: - for k, v := range r { - dataMap[k] = v - } - case map[string]uint: - for k, v := range r { - dataMap[k] = v - } - case map[string]float32: - for k, v := range r { - dataMap[k] = v - } - case map[string]float64: - for k, v := range r { - dataMap[k] = v - } - case map[string]string: - for k, v := range r { - dataMap[k] = v - } - case map[string]interface{}: - if recursive == recursiveTypeTrue { - // A copy of current map. - for k, v := range r { - dataMap[k] = doMapConvertForMapOrStructValue( - doMapConvertForMapOrStructValueInput{ - IsRoot: false, - Value: v, - RecursiveType: recursive, - RecursiveOption: recursive == recursiveTypeTrue, - Tags: newTags, - }, - ) - } - } else { - // It returns the map directly without any changing. - return r - } - case map[int]interface{}: - for k, v := range r { - dataMap[String(k)] = doMapConvertForMapOrStructValue( - doMapConvertForMapOrStructValueInput{ - IsRoot: false, - Value: v, - RecursiveType: recursive, - RecursiveOption: recursive == recursiveTypeTrue, - Tags: newTags, - }, - ) - } - case map[int]string: - for k, v := range r { - dataMap[String(k)] = v - } - case map[uint]string: - for k, v := range r { - dataMap[String(k)] = v - } - - default: - // Not a common type, it then uses reflection for conversion. - var reflectValue reflect.Value - if v, ok := value.(reflect.Value); ok { - reflectValue = v - } else { - reflectValue = reflect.ValueOf(value) - } - reflectKind := reflectValue.Kind() - // If it is a pointer, we should find its real data type. - for reflectKind == reflect.Ptr { - reflectValue = reflectValue.Elem() - reflectKind = reflectValue.Kind() - } - switch reflectKind { - // If `value` is type of array, it converts the value of even number index as its key and - // the value of odd number index as its corresponding value, for example: - // []string{"k1","v1","k2","v2"} => map[string]interface{}{"k1":"v1", "k2":"v2"} - // []string{"k1","v1","k2"} => map[string]interface{}{"k1":"v1", "k2":nil} - case reflect.Slice, reflect.Array: - length := reflectValue.Len() - for i := 0; i < length; i += 2 { - if i+1 < length { - dataMap[String(reflectValue.Index(i).Interface())] = reflectValue.Index(i + 1).Interface() - } else { - dataMap[String(reflectValue.Index(i).Interface())] = nil - } - } - case reflect.Map, reflect.Struct, reflect.Interface: - convertedValue := doMapConvertForMapOrStructValue( - doMapConvertForMapOrStructValueInput{ - IsRoot: true, - Value: value, - RecursiveType: recursive, - RecursiveOption: recursive == recursiveTypeTrue, - Tags: newTags, - MustMapReturn: mustMapReturn, - }, - ) - if m, ok := convertedValue.(map[string]interface{}); ok { - return m - } - return nil - default: - return nil - } - } - return dataMap -} - -type doMapConvertForMapOrStructValueInput struct { - IsRoot bool // It returns directly if it is not root and with no recursive converting. - Value interface{} // Current operation value. - RecursiveType recursiveType // The type from top function entry. - RecursiveOption bool // Whether convert recursively for `current` operation. - Tags []string // Map key mapping. - MustMapReturn bool // Must return map instead of Value when empty. -} - -func doMapConvertForMapOrStructValue(in doMapConvertForMapOrStructValueInput) interface{} { - if !in.IsRoot && !in.RecursiveOption { - return in.Value - } - - var reflectValue reflect.Value - if v, ok := in.Value.(reflect.Value); ok { - reflectValue = v - in.Value = v.Interface() - } else { - reflectValue = reflect.ValueOf(in.Value) - } - reflectKind := reflectValue.Kind() - // If it is a pointer, we should find its real data type. - for reflectKind == reflect.Ptr { - reflectValue = reflectValue.Elem() - reflectKind = reflectValue.Kind() - } - switch reflectKind { - case reflect.Map: - var ( - mapKeys = reflectValue.MapKeys() - dataMap = make(map[string]interface{}) - ) - for _, k := range mapKeys { - var ( - mapKeyValue = reflectValue.MapIndex(k) - mapValue interface{} - ) - switch { - case mapKeyValue.IsZero(): - if mapKeyValue.IsNil() { - // quick check for nil value. - mapValue = nil - } else { - // in case of: - // exception recovered: reflect: call of reflect.Value.Interface on zero Value - mapValue = reflect.New(mapKeyValue.Type()).Elem().Interface() - } - default: - mapValue = mapKeyValue.Interface() - } - dataMap[String(k.Interface())] = doMapConvertForMapOrStructValue( - doMapConvertForMapOrStructValueInput{ - IsRoot: false, - Value: mapValue, - RecursiveType: in.RecursiveType, - RecursiveOption: in.RecursiveType == recursiveTypeTrue, - Tags: in.Tags, - }, - ) - } - return dataMap - - case reflect.Struct: - var dataMap = make(map[string]interface{}) - // Map converting interface check. - if v, ok := in.Value.(iMapStrAny); ok { - // Value copy, in case of concurrent safety. - for mapK, mapV := range v.MapStrAny() { - if in.RecursiveOption { - dataMap[mapK] = doMapConvertForMapOrStructValue( - doMapConvertForMapOrStructValueInput{ - IsRoot: false, - Value: mapV, - RecursiveType: in.RecursiveType, - RecursiveOption: in.RecursiveType == recursiveTypeTrue, - Tags: in.Tags, - }, - ) - } else { - dataMap[mapK] = mapV - } - } - return dataMap - } - // Using reflect for converting. - var ( - rtField reflect.StructField - rvField reflect.Value - reflectType = reflectValue.Type() // attribute value type. - mapKey = "" // mapKey may be the tag name or the struct attribute name. - ) - for i := 0; i < reflectValue.NumField(); i++ { - rtField = reflectType.Field(i) - rvField = reflectValue.Field(i) - // Only convert the public attributes. - fieldName := rtField.Name - if !utils.IsLetterUpper(fieldName[0]) { - continue - } - mapKey = "" - fieldTag := rtField.Tag - for _, tag := range in.Tags { - if mapKey = fieldTag.Get(tag); mapKey != "" { - break - } - } - if mapKey == "" { - mapKey = fieldName - } else { - // Support json tag feature: -, omitempty - mapKey = strings.TrimSpace(mapKey) - if mapKey == "-" { - continue - } - array := strings.Split(mapKey, ",") - if len(array) > 1 { - switch strings.TrimSpace(array[1]) { - case "omitempty": - if empty.IsEmpty(rvField.Interface()) { - continue - } else { - mapKey = strings.TrimSpace(array[0]) - } - default: - mapKey = strings.TrimSpace(array[0]) - } - } - if mapKey == "" { - mapKey = fieldName - } - } - if in.RecursiveOption || rtField.Anonymous { - // Do map converting recursively. - var ( - rvAttrField = rvField - rvAttrKind = rvField.Kind() - ) - if rvAttrKind == reflect.Ptr { - rvAttrField = rvField.Elem() - rvAttrKind = rvAttrField.Kind() - } - switch rvAttrKind { - case reflect.Struct: - // Embedded struct and has no fields, just ignores it. - // Eg: gmeta.Meta - if rvAttrField.Type().NumField() == 0 { - continue - } - var ( - hasNoTag = mapKey == fieldName - // DO NOT use rvAttrField.Interface() here, - // as it might be changed from pointer to struct. - rvInterface = rvField.Interface() - ) - switch { - case hasNoTag && rtField.Anonymous: - // It means this attribute field has no tag. - // Overwrite the attribute with sub-struct attribute fields. - anonymousValue := doMapConvertForMapOrStructValue(doMapConvertForMapOrStructValueInput{ - IsRoot: false, - Value: rvInterface, - RecursiveType: in.RecursiveType, - RecursiveOption: true, - Tags: in.Tags, - }) - if m, ok := anonymousValue.(map[string]interface{}); ok { - for k, v := range m { - dataMap[k] = v - } - } else { - dataMap[mapKey] = rvInterface - } - - // It means this attribute field has desired tag. - case !hasNoTag && rtField.Anonymous: - dataMap[mapKey] = doMapConvertForMapOrStructValue(doMapConvertForMapOrStructValueInput{ - IsRoot: false, - Value: rvInterface, - RecursiveType: in.RecursiveType, - RecursiveOption: true, - Tags: in.Tags, - }) - - default: - dataMap[mapKey] = doMapConvertForMapOrStructValue(doMapConvertForMapOrStructValueInput{ - IsRoot: false, - Value: rvInterface, - RecursiveType: in.RecursiveType, - RecursiveOption: in.RecursiveType == recursiveTypeTrue, - Tags: in.Tags, - }) - } - - // The struct attribute is type of slice. - case reflect.Array, reflect.Slice: - length := rvAttrField.Len() - if length == 0 { - dataMap[mapKey] = rvAttrField.Interface() - break - } - array := make([]interface{}, length) - for arrayIndex := 0; arrayIndex < length; arrayIndex++ { - array[arrayIndex] = doMapConvertForMapOrStructValue( - doMapConvertForMapOrStructValueInput{ - IsRoot: false, - Value: rvAttrField.Index(arrayIndex).Interface(), - RecursiveType: in.RecursiveType, - RecursiveOption: in.RecursiveType == recursiveTypeTrue, - Tags: in.Tags, - }, - ) - } - dataMap[mapKey] = array - case reflect.Map: - var ( - mapKeys = rvAttrField.MapKeys() - nestedMap = make(map[string]interface{}) - ) - for _, k := range mapKeys { - nestedMap[String(k.Interface())] = doMapConvertForMapOrStructValue( - doMapConvertForMapOrStructValueInput{ - IsRoot: false, - Value: rvAttrField.MapIndex(k).Interface(), - RecursiveType: in.RecursiveType, - RecursiveOption: in.RecursiveType == recursiveTypeTrue, - Tags: in.Tags, - }, - ) - } - dataMap[mapKey] = nestedMap - default: - if rvField.IsValid() { - dataMap[mapKey] = reflectValue.Field(i).Interface() - } else { - dataMap[mapKey] = nil - } - } - } else { - // No recursive map value converting - if rvField.IsValid() { - dataMap[mapKey] = reflectValue.Field(i).Interface() - } else { - dataMap[mapKey] = nil - } - } - } - if !in.MustMapReturn && len(dataMap) == 0 { - return in.Value - } - return dataMap - - // The given value is type of slice. - case reflect.Array, reflect.Slice: - length := reflectValue.Len() - if length == 0 { - break - } - array := make([]interface{}, reflectValue.Len()) - for i := 0; i < length; i++ { - array[i] = doMapConvertForMapOrStructValue(doMapConvertForMapOrStructValueInput{ - IsRoot: false, - Value: reflectValue.Index(i).Interface(), - RecursiveType: in.RecursiveType, - RecursiveOption: in.RecursiveType == recursiveTypeTrue, - Tags: in.Tags, - }) - } - return array - } - return in.Value -} - -// MapStrStr converts `value` to map[string]string. -// Note that there might be data copy for this map type converting. -func MapStrStr(value interface{}, tags ...string) map[string]string { - if r, ok := value.(map[string]string); ok { - return r - } - m := Map(value, tags...) - if len(m) > 0 { - vMap := make(map[string]string, len(m)) - for k, v := range m { - vMap[k] = String(v) - } - return vMap - } - return nil -} - -// MapStrStrDeep converts `value` to map[string]string recursively. -// Note that there might be data copy for this map type converting. -func MapStrStrDeep(value interface{}, tags ...string) map[string]string { - if r, ok := value.(map[string]string); ok { - return r - } - m := MapDeep(value, tags...) - if len(m) > 0 { - vMap := make(map[string]string, len(m)) - for k, v := range m { - vMap[k] = String(v) - } - return vMap - } - return nil -} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_maps.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_maps.go deleted file mode 100644 index 3efe2386..00000000 --- a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_maps.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gconv - -import "github.com/gogf/gf/v2/internal/json" - -// SliceMap is alias of Maps. -func SliceMap(any interface{}) []map[string]interface{} { - return Maps(any) -} - -// SliceMapDeep is alias of MapsDeep. -func SliceMapDeep(any interface{}) []map[string]interface{} { - return MapsDeep(any) -} - -// SliceStruct is alias of Structs. -func SliceStruct(params interface{}, pointer interface{}, mapping ...map[string]string) (err error) { - return Structs(params, pointer, mapping...) -} - -// Maps converts `value` to []map[string]interface{}. -// Note that it automatically checks and converts json string to []map if `value` is string/[]byte. -func Maps(value interface{}, tags ...string) []map[string]interface{} { - if value == nil { - return nil - } - switch r := value.(type) { - case string: - list := make([]map[string]interface{}, 0) - if len(r) > 0 && r[0] == '[' && r[len(r)-1] == ']' { - if err := json.UnmarshalUseNumber([]byte(r), &list); err != nil { - return nil - } - return list - } else { - return nil - } - - case []byte: - list := make([]map[string]interface{}, 0) - if len(r) > 0 && r[0] == '[' && r[len(r)-1] == ']' { - if err := json.UnmarshalUseNumber(r, &list); err != nil { - return nil - } - return list - } else { - return nil - } - - case []map[string]interface{}: - return r - - default: - array := Interfaces(value) - if len(array) == 0 { - return nil - } - list := make([]map[string]interface{}, len(array)) - for k, v := range array { - list[k] = Map(v, tags...) - } - return list - } -} - -// MapsDeep converts `value` to []map[string]interface{} recursively. -// -// TODO completely implement the recursive converting for all types. -func MapsDeep(value interface{}, tags ...string) []map[string]interface{} { - if value == nil { - return nil - } - switch r := value.(type) { - case string: - list := make([]map[string]interface{}, 0) - if len(r) > 0 && r[0] == '[' && r[len(r)-1] == ']' { - if err := json.UnmarshalUseNumber([]byte(r), &list); err != nil { - return nil - } - return list - } else { - return nil - } - - case []byte: - list := make([]map[string]interface{}, 0) - if len(r) > 0 && r[0] == '[' && r[len(r)-1] == ']' { - if err := json.UnmarshalUseNumber(r, &list); err != nil { - return nil - } - return list - } else { - return nil - } - - case []map[string]interface{}: - list := make([]map[string]interface{}, len(r)) - for k, v := range r { - list[k] = MapDeep(v, tags...) - } - return list - - default: - array := Interfaces(value) - if len(array) == 0 { - return nil - } - list := make([]map[string]interface{}, len(array)) - for k, v := range array { - list[k] = MapDeep(v, tags...) - } - return list - } -} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_maptomap.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_maptomap.go deleted file mode 100644 index d9b5322d..00000000 --- a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_maptomap.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gconv - -import ( - "reflect" - - "github.com/gogf/gf/v2/errors/gcode" - "github.com/gogf/gf/v2/errors/gerror" - "github.com/gogf/gf/v2/internal/json" -) - -// MapToMap converts any map type variable `params` to another map type variable `pointer` -// using reflect. -// See doMapToMap. -func MapToMap(params interface{}, pointer interface{}, mapping ...map[string]string) error { - return doMapToMap(params, pointer, mapping...) -} - -// doMapToMap converts any map type variable `params` to another map type variable `pointer`. -// -// The parameter `params` can be any type of map, like: -// map[string]string, map[string]struct, map[string]*struct, reflect.Value, etc. -// -// The parameter `pointer` should be type of *map, like: -// map[int]string, map[string]struct, map[string]*struct, reflect.Value, etc. -// -// The optional parameter `mapping` is used for struct attribute to map key mapping, which makes -// sense only if the items of original map `params` is type struct. -func doMapToMap(params interface{}, pointer interface{}, mapping ...map[string]string) (err error) { - // If given `params` is JSON, it then uses json.Unmarshal doing the converting. - switch r := params.(type) { - case []byte: - if json.Valid(r) { - if rv, ok := pointer.(reflect.Value); ok { - if rv.Kind() == reflect.Ptr { - return json.UnmarshalUseNumber(r, rv.Interface()) - } - } else { - return json.UnmarshalUseNumber(r, pointer) - } - } - case string: - if paramsBytes := []byte(r); json.Valid(paramsBytes) { - if rv, ok := pointer.(reflect.Value); ok { - if rv.Kind() == reflect.Ptr { - return json.UnmarshalUseNumber(paramsBytes, rv.Interface()) - } - } else { - return json.UnmarshalUseNumber(paramsBytes, pointer) - } - } - } - var ( - paramsRv reflect.Value - paramsKind reflect.Kind - keyToAttributeNameMapping map[string]string - ) - if len(mapping) > 0 { - keyToAttributeNameMapping = mapping[0] - } - if v, ok := params.(reflect.Value); ok { - paramsRv = v - } else { - paramsRv = reflect.ValueOf(params) - } - paramsKind = paramsRv.Kind() - if paramsKind == reflect.Ptr { - paramsRv = paramsRv.Elem() - paramsKind = paramsRv.Kind() - } - if paramsKind != reflect.Map { - return doMapToMap(Map(params), pointer, mapping...) - } - // Empty params map, no need continue. - if paramsRv.Len() == 0 { - return nil - } - var pointerRv reflect.Value - if v, ok := pointer.(reflect.Value); ok { - pointerRv = v - } else { - pointerRv = reflect.ValueOf(pointer) - } - pointerKind := pointerRv.Kind() - for pointerKind == reflect.Ptr { - pointerRv = pointerRv.Elem() - pointerKind = pointerRv.Kind() - } - if pointerKind != reflect.Map { - return gerror.NewCodef(gcode.CodeInvalidParameter, "pointer should be type of *map, but got:%s", pointerKind) - } - defer func() { - // Catch the panic, especially the reflection operation panics. - if exception := recover(); exception != nil { - if v, ok := exception.(error); ok && gerror.HasStack(v) { - err = v - } else { - err = gerror.NewCodeSkipf(gcode.CodeInternalPanic, 1, "%+v", exception) - } - } - }() - var ( - paramsKeys = paramsRv.MapKeys() - pointerKeyType = pointerRv.Type().Key() - pointerValueType = pointerRv.Type().Elem() - pointerValueKind = pointerValueType.Kind() - dataMap = reflect.MakeMapWithSize(pointerRv.Type(), len(paramsKeys)) - ) - // Retrieve the true element type of target map. - if pointerValueKind == reflect.Ptr { - pointerValueKind = pointerValueType.Elem().Kind() - } - for _, key := range paramsKeys { - mapValue := reflect.New(pointerValueType).Elem() - switch pointerValueKind { - case reflect.Map, reflect.Struct: - if err = doStruct(paramsRv.MapIndex(key).Interface(), mapValue, keyToAttributeNameMapping, ""); err != nil { - return err - } - default: - mapValue.Set( - reflect.ValueOf( - doConvert(doConvertInput{ - FromValue: paramsRv.MapIndex(key).Interface(), - ToTypeName: pointerValueType.String(), - ReferValue: mapValue, - Extra: nil, - }), - ), - ) - } - var mapKey = reflect.ValueOf( - doConvert(doConvertInput{ - FromValue: key.Interface(), - ToTypeName: pointerKeyType.Name(), - ReferValue: reflect.New(pointerKeyType).Elem().Interface(), - Extra: nil, - }), - ) - dataMap.SetMapIndex(mapKey, mapValue) - } - pointerRv.Set(dataMap) - return nil -} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_maptomaps.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_maptomaps.go deleted file mode 100644 index 4a474ef8..00000000 --- a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_maptomaps.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gconv - -import ( - "reflect" - - "github.com/gogf/gf/v2/errors/gcode" - "github.com/gogf/gf/v2/errors/gerror" - "github.com/gogf/gf/v2/internal/json" -) - -// MapToMaps converts any slice type variable `params` to another map slice type variable `pointer`. -// See doMapToMaps. -func MapToMaps(params interface{}, pointer interface{}, mapping ...map[string]string) error { - return doMapToMaps(params, pointer, mapping...) -} - -// doMapToMaps converts any map type variable `params` to another map slice variable `pointer`. -// -// The parameter `params` can be type of []map, []*map, []struct, []*struct. -// -// The parameter `pointer` should be type of []map, []*map. -// -// The optional parameter `mapping` is used for struct attribute to map key mapping, which makes -// sense only if the item of `params` is type struct. -func doMapToMaps(params interface{}, pointer interface{}, mapping ...map[string]string) (err error) { - // If given `params` is JSON, it then uses json.Unmarshal doing the converting. - switch r := params.(type) { - case []byte: - if json.Valid(r) { - if rv, ok := pointer.(reflect.Value); ok { - if rv.Kind() == reflect.Ptr { - return json.UnmarshalUseNumber(r, rv.Interface()) - } - } else { - return json.UnmarshalUseNumber(r, pointer) - } - } - case string: - if paramsBytes := []byte(r); json.Valid(paramsBytes) { - if rv, ok := pointer.(reflect.Value); ok { - if rv.Kind() == reflect.Ptr { - return json.UnmarshalUseNumber(paramsBytes, rv.Interface()) - } - } else { - return json.UnmarshalUseNumber(paramsBytes, pointer) - } - } - } - // Params and its element type check. - var ( - paramsRv reflect.Value - paramsKind reflect.Kind - ) - if v, ok := params.(reflect.Value); ok { - paramsRv = v - } else { - paramsRv = reflect.ValueOf(params) - } - paramsKind = paramsRv.Kind() - if paramsKind == reflect.Ptr { - paramsRv = paramsRv.Elem() - paramsKind = paramsRv.Kind() - } - if paramsKind != reflect.Array && paramsKind != reflect.Slice { - return gerror.NewCode(gcode.CodeInvalidParameter, "params should be type of slice, eg: []map/[]*map/[]struct/[]*struct") - } - var ( - paramsElem = paramsRv.Type().Elem() - paramsElemKind = paramsElem.Kind() - ) - if paramsElemKind == reflect.Ptr { - paramsElem = paramsElem.Elem() - paramsElemKind = paramsElem.Kind() - } - if paramsElemKind != reflect.Map && paramsElemKind != reflect.Struct && paramsElemKind != reflect.Interface { - return gerror.NewCodef(gcode.CodeInvalidParameter, "params element should be type of map/*map/struct/*struct, but got: %s", paramsElemKind) - } - // Empty slice, no need continue. - if paramsRv.Len() == 0 { - return nil - } - // Pointer and its element type check. - var ( - pointerRv = reflect.ValueOf(pointer) - pointerKind = pointerRv.Kind() - ) - for pointerKind == reflect.Ptr { - pointerRv = pointerRv.Elem() - pointerKind = pointerRv.Kind() - } - if pointerKind != reflect.Array && pointerKind != reflect.Slice { - return gerror.NewCode(gcode.CodeInvalidParameter, "pointer should be type of *[]map/*[]*map") - } - var ( - pointerElemType = pointerRv.Type().Elem() - pointerElemKind = pointerElemType.Kind() - ) - if pointerElemKind == reflect.Ptr { - pointerElemKind = pointerElemType.Elem().Kind() - } - if pointerElemKind != reflect.Map { - return gerror.NewCode(gcode.CodeInvalidParameter, "pointer element should be type of map/*map") - } - defer func() { - // Catch the panic, especially the reflection operation panics. - if exception := recover(); exception != nil { - if v, ok := exception.(error); ok && gerror.HasStack(v) { - err = v - } else { - err = gerror.NewCodeSkipf(gcode.CodeInternalPanic, 1, "%+v", exception) - } - } - }() - var ( - pointerSlice = reflect.MakeSlice(pointerRv.Type(), paramsRv.Len(), paramsRv.Len()) - ) - for i := 0; i < paramsRv.Len(); i++ { - var item reflect.Value - if pointerElemType.Kind() == reflect.Ptr { - item = reflect.New(pointerElemType.Elem()) - if err = MapToMap(paramsRv.Index(i).Interface(), item, mapping...); err != nil { - return err - } - pointerSlice.Index(i).Set(item) - } else { - item = reflect.New(pointerElemType) - if err = MapToMap(paramsRv.Index(i).Interface(), item, mapping...); err != nil { - return err - } - pointerSlice.Index(i).Set(item.Elem()) - } - } - pointerRv.Set(pointerSlice) - return -} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_ptr.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_ptr.go deleted file mode 100644 index d23066da..00000000 --- a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_ptr.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gconv - -// PtrAny creates and returns an interface{} pointer variable to this value. -func PtrAny(any interface{}) *interface{} { - return &any -} - -// PtrString creates and returns a string pointer variable to this value. -func PtrString(any interface{}) *string { - v := String(any) - return &v -} - -// PtrBool creates and returns a bool pointer variable to this value. -func PtrBool(any interface{}) *bool { - v := Bool(any) - return &v -} - -// PtrInt creates and returns an int pointer variable to this value. -func PtrInt(any interface{}) *int { - v := Int(any) - return &v -} - -// PtrInt8 creates and returns an int8 pointer variable to this value. -func PtrInt8(any interface{}) *int8 { - v := Int8(any) - return &v -} - -// PtrInt16 creates and returns an int16 pointer variable to this value. -func PtrInt16(any interface{}) *int16 { - v := Int16(any) - return &v -} - -// PtrInt32 creates and returns an int32 pointer variable to this value. -func PtrInt32(any interface{}) *int32 { - v := Int32(any) - return &v -} - -// PtrInt64 creates and returns an int64 pointer variable to this value. -func PtrInt64(any interface{}) *int64 { - v := Int64(any) - return &v -} - -// PtrUint creates and returns an uint pointer variable to this value. -func PtrUint(any interface{}) *uint { - v := Uint(any) - return &v -} - -// PtrUint8 creates and returns an uint8 pointer variable to this value. -func PtrUint8(any interface{}) *uint8 { - v := Uint8(any) - return &v -} - -// PtrUint16 creates and returns an uint16 pointer variable to this value. -func PtrUint16(any interface{}) *uint16 { - v := Uint16(any) - return &v -} - -// PtrUint32 creates and returns an uint32 pointer variable to this value. -func PtrUint32(any interface{}) *uint32 { - v := Uint32(any) - return &v -} - -// PtrUint64 creates and returns an uint64 pointer variable to this value. -func PtrUint64(any interface{}) *uint64 { - v := Uint64(any) - return &v -} - -// PtrFloat32 creates and returns a float32 pointer variable to this value. -func PtrFloat32(any interface{}) *float32 { - v := Float32(any) - return &v -} - -// PtrFloat64 creates and returns a float64 pointer variable to this value. -func PtrFloat64(any interface{}) *float64 { - v := Float64(any) - return &v -} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_scan.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_scan.go deleted file mode 100644 index 28f7d7b4..00000000 --- a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_scan.go +++ /dev/null @@ -1,525 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gconv - -import ( - "database/sql" - "reflect" - - "github.com/gogf/gf/v2/errors/gcode" - "github.com/gogf/gf/v2/errors/gerror" - "github.com/gogf/gf/v2/internal/utils" - "github.com/gogf/gf/v2/os/gstructs" -) - -// Scan automatically checks the type of `pointer` and converts `params` to `pointer`. It supports `pointer` -// with type of `*map/*[]map/*[]*map/*struct/**struct/*[]struct/*[]*struct` for converting. -// -// It calls function `doMapToMap` internally if `pointer` is type of *map for converting. -// It calls function `doMapToMaps` internally if `pointer` is type of *[]map/*[]*map for converting. -// It calls function `doStruct` internally if `pointer` is type of *struct/**struct for converting. -// It calls function `doStructs` internally if `pointer` is type of *[]struct/*[]*struct for converting. -func Scan(params interface{}, pointer interface{}, mapping ...map[string]string) (err error) { - var ( - pointerType reflect.Type - pointerKind reflect.Kind - pointerValue reflect.Value - ) - if v, ok := pointer.(reflect.Value); ok { - pointerValue = v - pointerType = v.Type() - } else { - pointerValue = reflect.ValueOf(pointer) - pointerType = reflect.TypeOf(pointer) // Do not use pointerValue.Type() as pointerValue might be zero. - } - - if pointerType == nil { - return gerror.NewCode(gcode.CodeInvalidParameter, "parameter pointer should not be nil") - } - pointerKind = pointerType.Kind() - if pointerKind != reflect.Ptr { - if pointerValue.CanAddr() { - pointerValue = pointerValue.Addr() - pointerType = pointerValue.Type() - pointerKind = pointerType.Kind() - } else { - return gerror.NewCodef( - gcode.CodeInvalidParameter, - "params should be type of pointer, but got type: %v", - pointerType, - ) - } - } - // Direct assignment checks! - var ( - paramsType reflect.Type - paramsValue reflect.Value - ) - if v, ok := params.(reflect.Value); ok { - paramsValue = v - paramsType = paramsValue.Type() - } else { - paramsValue = reflect.ValueOf(params) - paramsType = reflect.TypeOf(params) // Do not use paramsValue.Type() as paramsValue might be zero. - } - // If `params` and `pointer` are the same type, the do directly assignment. - // For performance enhancement purpose. - var ( - pointerValueElem = pointerValue.Elem() - ) - if pointerValueElem.CanSet() && paramsType == pointerValueElem.Type() { - pointerValueElem.Set(paramsValue) - return nil - } - - // Converting. - var ( - pointerElem = pointerType.Elem() - pointerElemKind = pointerElem.Kind() - keyToAttributeNameMapping map[string]string - ) - if len(mapping) > 0 { - keyToAttributeNameMapping = mapping[0] - } - switch pointerElemKind { - case reflect.Map: - return doMapToMap(params, pointer, mapping...) - - case reflect.Array, reflect.Slice: - var ( - sliceElem = pointerElem.Elem() - sliceElemKind = sliceElem.Kind() - ) - for sliceElemKind == reflect.Ptr { - sliceElem = sliceElem.Elem() - sliceElemKind = sliceElem.Kind() - } - if sliceElemKind == reflect.Map { - return doMapToMaps(params, pointer, mapping...) - } - return doStructs(params, pointer, keyToAttributeNameMapping, "") - - default: - return doStruct(params, pointer, keyToAttributeNameMapping, "") - } -} - -// ScanList converts `structSlice` to struct slice which contains other complex struct attributes. -// Note that the parameter `structSlicePointer` should be type of *[]struct/*[]*struct. -// -// Usage example 1: Normal attribute struct relation: -// -// type EntityUser struct { -// Uid int -// Name string -// } -// -// type EntityUserDetail struct { -// Uid int -// Address string -// } -// -// type EntityUserScores struct { -// Id int -// Uid int -// Score int -// Course string -// } -// -// type Entity struct { -// User *EntityUser -// UserDetail *EntityUserDetail -// UserScores []*EntityUserScores -// } -// -// var users []*Entity -// ScanList(records, &users, "User") -// ScanList(records, &users, "User", "uid") -// ScanList(records, &users, "UserDetail", "User", "uid:Uid") -// ScanList(records, &users, "UserScores", "User", "uid:Uid") -// ScanList(records, &users, "UserScores", "User", "uid") -// -// Usage example 2: Embedded attribute struct relation: -// -// type EntityUser struct { -// Uid int -// Name string -// } -// -// type EntityUserDetail struct { -// Uid int -// Address string -// } -// -// type EntityUserScores struct { -// Id int -// Uid int -// Score int -// } -// -// type Entity struct { -// EntityUser -// UserDetail EntityUserDetail -// UserScores []EntityUserScores -// } -// -// var users []*Entity -// ScanList(records, &users) -// ScanList(records, &users, "UserDetail", "uid") -// ScanList(records, &users, "UserScores", "uid") -// -// The parameters "User/UserDetail/UserScores" in the example codes specify the target attribute struct -// that current result will be bound to. -// -// The "uid" in the example codes is the table field name of the result, and the "Uid" is the relational -// struct attribute name - not the attribute name of the bound to target. In the example codes, it's attribute -// name "Uid" of "User" of entity "Entity". It automatically calculates the HasOne/HasMany relationship with -// given `relation` parameter. -// -// See the example or unit testing cases for clear understanding for this function. -func ScanList(structSlice interface{}, structSlicePointer interface{}, bindToAttrName string, relationAttrNameAndFields ...string) (err error) { - var ( - relationAttrName string - relationFields string - ) - switch len(relationAttrNameAndFields) { - case 2: - relationAttrName = relationAttrNameAndFields[0] - relationFields = relationAttrNameAndFields[1] - case 1: - relationFields = relationAttrNameAndFields[0] - } - return doScanList(structSlice, structSlicePointer, bindToAttrName, relationAttrName, relationFields) -} - -// doScanList converts `structSlice` to struct slice which contains other complex struct attributes recursively. -// Note that the parameter `structSlicePointer` should be type of *[]struct/*[]*struct. -func doScanList( - structSlice interface{}, structSlicePointer interface{}, bindToAttrName, relationAttrName, relationFields string, -) (err error) { - var ( - maps = Maps(structSlice) - ) - if len(maps) == 0 { - return nil - } - // Necessary checks for parameters. - if bindToAttrName == "" { - return gerror.NewCode(gcode.CodeInvalidParameter, `bindToAttrName should not be empty`) - } - - if relationAttrName == "." { - relationAttrName = "" - } - - var ( - reflectValue = reflect.ValueOf(structSlicePointer) - reflectKind = reflectValue.Kind() - ) - if reflectKind == reflect.Interface { - reflectValue = reflectValue.Elem() - reflectKind = reflectValue.Kind() - } - if reflectKind != reflect.Ptr { - return gerror.NewCodef( - gcode.CodeInvalidParameter, - "structSlicePointer should be type of *[]struct/*[]*struct, but got: %v", - reflectKind, - ) - } - reflectValue = reflectValue.Elem() - reflectKind = reflectValue.Kind() - if reflectKind != reflect.Slice && reflectKind != reflect.Array { - return gerror.NewCodef( - gcode.CodeInvalidParameter, - "structSlicePointer should be type of *[]struct/*[]*struct, but got: %v", - reflectKind, - ) - } - length := len(maps) - if length == 0 { - // The pointed slice is not empty. - if reflectValue.Len() > 0 { - // It here checks if it has struct item, which is already initialized. - // It then returns error to warn the developer its empty and no conversion. - if v := reflectValue.Index(0); v.Kind() != reflect.Ptr { - return sql.ErrNoRows - } - } - // Do nothing for empty struct slice. - return nil - } - var ( - arrayValue reflect.Value // Like: []*Entity - arrayItemType reflect.Type // Like: *Entity - reflectType = reflect.TypeOf(structSlicePointer) - ) - if reflectValue.Len() > 0 { - arrayValue = reflectValue - } else { - arrayValue = reflect.MakeSlice(reflectType.Elem(), length, length) - } - - // Slice element item. - arrayItemType = arrayValue.Index(0).Type() - - // Relation variables. - var ( - relationDataMap map[string]interface{} - relationFromFieldName string // Eg: relationKV: id:uid -> id - relationBindToFieldName string // Eg: relationKV: id:uid -> uid - ) - if len(relationFields) > 0 { - // The relation key string of table filed name and attribute name - // can be joined with char '=' or ':'. - array := utils.SplitAndTrim(relationFields, "=") - if len(array) == 1 { - // Compatible with old splitting char ':'. - array = utils.SplitAndTrim(relationFields, ":") - } - if len(array) == 1 { - // The relation names are the same. - array = []string{relationFields, relationFields} - } - if len(array) == 2 { - // Defined table field to relation attribute name. - // Like: - // uid:Uid - // uid:UserId - relationFromFieldName = array[0] - relationBindToFieldName = array[1] - if key, _ := utils.MapPossibleItemByKey(maps[0], relationFromFieldName); key == "" { - return gerror.NewCodef( - gcode.CodeInvalidParameter, - `cannot find possible related table field name "%s" from given relation fields "%s"`, - relationFromFieldName, - relationFields, - ) - } else { - relationFromFieldName = key - } - } else { - return gerror.NewCode( - gcode.CodeInvalidParameter, - `parameter relationKV should be format of "ResultFieldName:BindToAttrName"`, - ) - } - if relationFromFieldName != "" { - // Note that the value might be type of slice. - relationDataMap = utils.ListToMapByKey(maps, relationFromFieldName) - } - if len(relationDataMap) == 0 { - return gerror.NewCodef( - gcode.CodeInvalidParameter, - `cannot find the relation data map, maybe invalid relation fields given "%v"`, - relationFields, - ) - } - } - // Bind to target attribute. - var ( - ok bool - bindToAttrValue reflect.Value - bindToAttrKind reflect.Kind - bindToAttrType reflect.Type - bindToAttrField reflect.StructField - ) - if arrayItemType.Kind() == reflect.Ptr { - if bindToAttrField, ok = arrayItemType.Elem().FieldByName(bindToAttrName); !ok { - return gerror.NewCodef( - gcode.CodeInvalidParameter, - `invalid parameter bindToAttrName: cannot find attribute with name "%s" from slice element`, - bindToAttrName, - ) - } - } else { - if bindToAttrField, ok = arrayItemType.FieldByName(bindToAttrName); !ok { - return gerror.NewCodef( - gcode.CodeInvalidParameter, - `invalid parameter bindToAttrName: cannot find attribute with name "%s" from slice element`, - bindToAttrName, - ) - } - } - bindToAttrType = bindToAttrField.Type - bindToAttrKind = bindToAttrType.Kind() - - // Bind to relation conditions. - var ( - relationFromAttrValue reflect.Value - relationFromAttrField reflect.Value - relationBindToFieldNameChecked bool - ) - for i := 0; i < arrayValue.Len(); i++ { - arrayElemValue := arrayValue.Index(i) - // The FieldByName should be called on non-pointer reflect.Value. - if arrayElemValue.Kind() == reflect.Ptr { - // Like: []*Entity - arrayElemValue = arrayElemValue.Elem() - if !arrayElemValue.IsValid() { - // The element is nil, then create one and set it to the slice. - // The "reflect.New(itemType.Elem())" creates a new element and returns the address of it. - // For example: - // reflect.New(itemType.Elem()) => *Entity - // reflect.New(itemType.Elem()).Elem() => Entity - arrayElemValue = reflect.New(arrayItemType.Elem()).Elem() - arrayValue.Index(i).Set(arrayElemValue.Addr()) - } - } else { - // Like: []Entity - } - bindToAttrValue = arrayElemValue.FieldByName(bindToAttrName) - if relationAttrName != "" { - // Attribute value of current slice element. - relationFromAttrValue = arrayElemValue.FieldByName(relationAttrName) - if relationFromAttrValue.Kind() == reflect.Ptr { - relationFromAttrValue = relationFromAttrValue.Elem() - } - } else { - // Current slice element. - relationFromAttrValue = arrayElemValue - } - if len(relationDataMap) > 0 && !relationFromAttrValue.IsValid() { - return gerror.NewCodef(gcode.CodeInvalidParameter, `invalid relation fields specified: "%v"`, relationFields) - } - // Check and find possible bind to attribute name. - if relationFields != "" && !relationBindToFieldNameChecked { - relationFromAttrField = relationFromAttrValue.FieldByName(relationBindToFieldName) - if !relationFromAttrField.IsValid() { - var ( - filedMap, _ = gstructs.FieldMap(gstructs.FieldMapInput{ - Pointer: relationFromAttrValue, - RecursiveOption: gstructs.RecursiveOptionEmbeddedNoTag, - }) - ) - if key, _ := utils.MapPossibleItemByKey(Map(filedMap), relationBindToFieldName); key == "" { - return gerror.NewCodef( - gcode.CodeInvalidParameter, - `cannot find possible related attribute name "%s" from given relation fields "%s"`, - relationBindToFieldName, - relationFields, - ) - } else { - relationBindToFieldName = key - } - } - relationBindToFieldNameChecked = true - } - switch bindToAttrKind { - case reflect.Array, reflect.Slice: - if len(relationDataMap) > 0 { - relationFromAttrField = relationFromAttrValue.FieldByName(relationBindToFieldName) - if relationFromAttrField.IsValid() { - // results := make(Result, 0) - results := make([]interface{}, 0) - for _, v := range SliceAny(relationDataMap[String(relationFromAttrField.Interface())]) { - item := v - results = append(results, item) - } - if err = Structs(results, bindToAttrValue.Addr()); err != nil { - return err - } - } else { - // Maybe the attribute does not exist yet. - return gerror.NewCodef(gcode.CodeInvalidParameter, `invalid relation fields specified: "%v"`, relationFields) - } - } else { - return gerror.NewCodef( - gcode.CodeInvalidParameter, - `relationKey should not be empty as field "%s" is slice`, - bindToAttrName, - ) - } - - case reflect.Ptr: - var element reflect.Value - if bindToAttrValue.IsNil() { - element = reflect.New(bindToAttrType.Elem()).Elem() - } else { - element = bindToAttrValue.Elem() - } - if len(relationDataMap) > 0 { - relationFromAttrField = relationFromAttrValue.FieldByName(relationBindToFieldName) - if relationFromAttrField.IsValid() { - v := relationDataMap[String(relationFromAttrField.Interface())] - if v == nil { - // There's no relational data. - continue - } - if utils.IsSlice(v) { - if err = Struct(SliceAny(v)[0], element); err != nil { - return err - } - } else { - if err = Struct(v, element); err != nil { - return err - } - } - } else { - // Maybe the attribute does not exist yet. - return gerror.NewCodef(gcode.CodeInvalidParameter, `invalid relation fields specified: "%v"`, relationFields) - } - } else { - if i >= len(maps) { - // There's no relational data. - continue - } - v := maps[i] - if v == nil { - // There's no relational data. - continue - } - if err = Struct(v, element); err != nil { - return err - } - } - bindToAttrValue.Set(element.Addr()) - - case reflect.Struct: - if len(relationDataMap) > 0 { - relationFromAttrField = relationFromAttrValue.FieldByName(relationBindToFieldName) - if relationFromAttrField.IsValid() { - relationDataItem := relationDataMap[String(relationFromAttrField.Interface())] - if relationDataItem == nil { - // There's no relational data. - continue - } - if utils.IsSlice(relationDataItem) { - if err = Struct(SliceAny(relationDataItem)[0], bindToAttrValue); err != nil { - return err - } - } else { - if err = Struct(relationDataItem, bindToAttrValue); err != nil { - return err - } - } - } else { - // Maybe the attribute does not exist yet. - return gerror.NewCodef(gcode.CodeInvalidParameter, `invalid relation fields specified: "%v"`, relationFields) - } - } else { - if i >= len(maps) { - // There's no relational data. - continue - } - relationDataItem := maps[i] - if relationDataItem == nil { - // There's no relational data. - continue - } - if err = Struct(relationDataItem, bindToAttrValue); err != nil { - return err - } - } - - default: - return gerror.NewCodef(gcode.CodeInvalidParameter, `unsupported attribute type: %s`, bindToAttrKind.String()) - } - } - reflect.ValueOf(structSlicePointer).Elem().Set(arrayValue) - return nil -} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_any.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_any.go deleted file mode 100644 index 8308fdf6..00000000 --- a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_any.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gconv - -import ( - "reflect" - - "github.com/gogf/gf/v2/internal/json" - "github.com/gogf/gf/v2/internal/reflection" -) - -// SliceAny is alias of Interfaces. -func SliceAny(any interface{}) []interface{} { - return Interfaces(any) -} - -// Interfaces converts `any` to []interface{}. -func Interfaces(any interface{}) []interface{} { - if any == nil { - return nil - } - var array []interface{} - switch value := any.(type) { - case []interface{}: - array = value - case []string: - array = make([]interface{}, len(value)) - for k, v := range value { - array[k] = v - } - case []int: - array = make([]interface{}, len(value)) - for k, v := range value { - array[k] = v - } - case []int8: - array = make([]interface{}, len(value)) - for k, v := range value { - array[k] = v - } - case []int16: - array = make([]interface{}, len(value)) - for k, v := range value { - array[k] = v - } - case []int32: - array = make([]interface{}, len(value)) - for k, v := range value { - array[k] = v - } - case []int64: - array = make([]interface{}, len(value)) - for k, v := range value { - array[k] = v - } - case []uint: - array = make([]interface{}, len(value)) - for k, v := range value { - array[k] = v - } - case []uint8: - if json.Valid(value) { - _ = json.UnmarshalUseNumber(value, &array) - } else { - array = make([]interface{}, len(value)) - for k, v := range value { - array[k] = v - } - } - case []uint16: - array = make([]interface{}, len(value)) - for k, v := range value { - array[k] = v - } - case []uint32: - for _, v := range value { - array = append(array, v) - } - case []uint64: - array = make([]interface{}, len(value)) - for k, v := range value { - array[k] = v - } - case []bool: - array = make([]interface{}, len(value)) - for k, v := range value { - array[k] = v - } - case []float32: - array = make([]interface{}, len(value)) - for k, v := range value { - array[k] = v - } - case []float64: - array = make([]interface{}, len(value)) - for k, v := range value { - array[k] = v - } - } - if array != nil { - return array - } - if v, ok := any.(iInterfaces); ok { - return v.Interfaces() - } - // JSON format string value converting. - if checkJsonAndUnmarshalUseNumber(any, &array) { - return array - } - // Not a common type, it then uses reflection for conversion. - originValueAndKind := reflection.OriginValueAndKind(any) - switch originValueAndKind.OriginKind { - case reflect.Slice, reflect.Array: - var ( - length = originValueAndKind.OriginValue.Len() - slice = make([]interface{}, length) - ) - for i := 0; i < length; i++ { - slice[i] = originValueAndKind.OriginValue.Index(i).Interface() - } - return slice - - default: - return []interface{}{any} - } -} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_float.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_float.go deleted file mode 100644 index 3d7b4994..00000000 --- a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_float.go +++ /dev/null @@ -1,282 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gconv - -import ( - "reflect" - - "github.com/gogf/gf/v2/internal/json" - "github.com/gogf/gf/v2/internal/reflection" -) - -// SliceFloat is alias of Floats. -func SliceFloat(any interface{}) []float64 { - return Floats(any) -} - -// SliceFloat32 is alias of Float32s. -func SliceFloat32(any interface{}) []float32 { - return Float32s(any) -} - -// SliceFloat64 is alias of Float64s. -func SliceFloat64(any interface{}) []float64 { - return Floats(any) -} - -// Floats converts `any` to []float64. -func Floats(any interface{}) []float64 { - return Float64s(any) -} - -// Float32s converts `any` to []float32. -func Float32s(any interface{}) []float32 { - if any == nil { - return nil - } - var ( - array []float32 = nil - ) - switch value := any.(type) { - case string: - if value == "" { - return []float32{} - } - return []float32{Float32(value)} - case []string: - array = make([]float32, len(value)) - for k, v := range value { - array[k] = Float32(v) - } - case []int: - array = make([]float32, len(value)) - for k, v := range value { - array[k] = Float32(v) - } - case []int8: - array = make([]float32, len(value)) - for k, v := range value { - array[k] = Float32(v) - } - case []int16: - array = make([]float32, len(value)) - for k, v := range value { - array[k] = Float32(v) - } - case []int32: - array = make([]float32, len(value)) - for k, v := range value { - array[k] = Float32(v) - } - case []int64: - array = make([]float32, len(value)) - for k, v := range value { - array[k] = Float32(v) - } - case []uint: - for _, v := range value { - array = append(array, Float32(v)) - } - case []uint8: - if json.Valid(value) { - _ = json.UnmarshalUseNumber(value, &array) - } else { - array = make([]float32, len(value)) - for k, v := range value { - array[k] = Float32(v) - } - } - case []uint16: - array = make([]float32, len(value)) - for k, v := range value { - array[k] = Float32(v) - } - case []uint32: - array = make([]float32, len(value)) - for k, v := range value { - array[k] = Float32(v) - } - case []uint64: - array = make([]float32, len(value)) - for k, v := range value { - array[k] = Float32(v) - } - case []bool: - array = make([]float32, len(value)) - for k, v := range value { - array[k] = Float32(v) - } - case []float32: - array = value - case []float64: - array = make([]float32, len(value)) - for k, v := range value { - array[k] = Float32(v) - } - case []interface{}: - array = make([]float32, len(value)) - for k, v := range value { - array[k] = Float32(v) - } - } - if array != nil { - return array - } - if v, ok := any.(iFloats); ok { - return Float32s(v.Floats()) - } - if v, ok := any.(iInterfaces); ok { - return Float32s(v.Interfaces()) - } - // JSON format string value converting. - if checkJsonAndUnmarshalUseNumber(any, &array) { - return array - } - // Not a common type, it then uses reflection for conversion. - originValueAndKind := reflection.OriginValueAndKind(any) - switch originValueAndKind.OriginKind { - case reflect.Slice, reflect.Array: - var ( - length = originValueAndKind.OriginValue.Len() - slice = make([]float32, length) - ) - for i := 0; i < length; i++ { - slice[i] = Float32(originValueAndKind.OriginValue.Index(i).Interface()) - } - return slice - - default: - if originValueAndKind.OriginValue.IsZero() { - return []float32{} - } - return []float32{Float32(any)} - } -} - -// Float64s converts `any` to []float64. -func Float64s(any interface{}) []float64 { - if any == nil { - return nil - } - var ( - array []float64 = nil - ) - switch value := any.(type) { - case string: - if value == "" { - return []float64{} - } - return []float64{Float64(value)} - case []string: - array = make([]float64, len(value)) - for k, v := range value { - array[k] = Float64(v) - } - case []int: - array = make([]float64, len(value)) - for k, v := range value { - array[k] = Float64(v) - } - case []int8: - array = make([]float64, len(value)) - for k, v := range value { - array[k] = Float64(v) - } - case []int16: - array = make([]float64, len(value)) - for k, v := range value { - array[k] = Float64(v) - } - case []int32: - array = make([]float64, len(value)) - for k, v := range value { - array[k] = Float64(v) - } - case []int64: - array = make([]float64, len(value)) - for k, v := range value { - array[k] = Float64(v) - } - case []uint: - for _, v := range value { - array = append(array, Float64(v)) - } - case []uint8: - if json.Valid(value) { - _ = json.UnmarshalUseNumber(value, &array) - } else { - array = make([]float64, len(value)) - for k, v := range value { - array[k] = Float64(v) - } - } - case []uint16: - array = make([]float64, len(value)) - for k, v := range value { - array[k] = Float64(v) - } - case []uint32: - array = make([]float64, len(value)) - for k, v := range value { - array[k] = Float64(v) - } - case []uint64: - array = make([]float64, len(value)) - for k, v := range value { - array[k] = Float64(v) - } - case []bool: - array = make([]float64, len(value)) - for k, v := range value { - array[k] = Float64(v) - } - case []float32: - array = make([]float64, len(value)) - for k, v := range value { - array[k] = Float64(v) - } - case []float64: - array = value - case []interface{}: - array = make([]float64, len(value)) - for k, v := range value { - array[k] = Float64(v) - } - } - if array != nil { - return array - } - if v, ok := any.(iFloats); ok { - return v.Floats() - } - if v, ok := any.(iInterfaces); ok { - return Floats(v.Interfaces()) - } - // JSON format string value converting. - if checkJsonAndUnmarshalUseNumber(any, &array) { - return array - } - // Not a common type, it then uses reflection for conversion. - originValueAndKind := reflection.OriginValueAndKind(any) - switch originValueAndKind.OriginKind { - case reflect.Slice, reflect.Array: - var ( - length = originValueAndKind.OriginValue.Len() - slice = make([]float64, length) - ) - for i := 0; i < length; i++ { - slice[i] = Float64(originValueAndKind.OriginValue.Index(i).Interface()) - } - return slice - - default: - if originValueAndKind.OriginValue.IsZero() { - return []float64{} - } - return []float64{Float64(any)} - } -} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_int.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_int.go deleted file mode 100644 index f28e7fd1..00000000 --- a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_int.go +++ /dev/null @@ -1,416 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gconv - -import ( - "reflect" - - "github.com/gogf/gf/v2/internal/json" - "github.com/gogf/gf/v2/internal/reflection" -) - -// SliceInt is alias of Ints. -func SliceInt(any interface{}) []int { - return Ints(any) -} - -// SliceInt32 is alias of Int32s. -func SliceInt32(any interface{}) []int32 { - return Int32s(any) -} - -// SliceInt64 is alias of Int64s. -func SliceInt64(any interface{}) []int64 { - return Int64s(any) -} - -// Ints converts `any` to []int. -func Ints(any interface{}) []int { - if any == nil { - return nil - } - var ( - array []int = nil - ) - switch value := any.(type) { - case []string: - array = make([]int, len(value)) - for k, v := range value { - array[k] = Int(v) - } - case []int: - array = value - case []int8: - array = make([]int, len(value)) - for k, v := range value { - array[k] = int(v) - } - case []int16: - array = make([]int, len(value)) - for k, v := range value { - array[k] = int(v) - } - case []int32: - array = make([]int, len(value)) - for k, v := range value { - array[k] = int(v) - } - case []int64: - array = make([]int, len(value)) - for k, v := range value { - array[k] = int(v) - } - case []uint: - array = make([]int, len(value)) - for k, v := range value { - array[k] = int(v) - } - case []uint8: - if json.Valid(value) { - _ = json.UnmarshalUseNumber(value, &array) - } else { - array = make([]int, len(value)) - for k, v := range value { - array[k] = int(v) - } - } - case []uint16: - array = make([]int, len(value)) - for k, v := range value { - array[k] = int(v) - } - case []uint32: - array = make([]int, len(value)) - for k, v := range value { - array[k] = int(v) - } - case []uint64: - array = make([]int, len(value)) - for k, v := range value { - array[k] = int(v) - } - case []bool: - array = make([]int, len(value)) - for k, v := range value { - if v { - array[k] = 1 - } else { - array[k] = 0 - } - } - case []float32: - array = make([]int, len(value)) - for k, v := range value { - array[k] = Int(v) - } - case []float64: - array = make([]int, len(value)) - for k, v := range value { - array[k] = Int(v) - } - case []interface{}: - array = make([]int, len(value)) - for k, v := range value { - array[k] = Int(v) - } - case [][]byte: - array = make([]int, len(value)) - for k, v := range value { - array[k] = Int(v) - } - } - if array != nil { - return array - } - if v, ok := any.(iInts); ok { - return v.Ints() - } - if v, ok := any.(iInterfaces); ok { - return Ints(v.Interfaces()) - } - // JSON format string value converting. - if checkJsonAndUnmarshalUseNumber(any, &array) { - return array - } - // Not a common type, it then uses reflection for conversion. - originValueAndKind := reflection.OriginValueAndKind(any) - switch originValueAndKind.OriginKind { - case reflect.Slice, reflect.Array: - var ( - length = originValueAndKind.OriginValue.Len() - slice = make([]int, length) - ) - for i := 0; i < length; i++ { - slice[i] = Int(originValueAndKind.OriginValue.Index(i).Interface()) - } - return slice - - default: - if originValueAndKind.OriginValue.IsZero() { - return []int{} - } - return []int{Int(any)} - } -} - -// Int32s converts `any` to []int32. -func Int32s(any interface{}) []int32 { - if any == nil { - return nil - } - var ( - array []int32 = nil - ) - switch value := any.(type) { - case []string: - array = make([]int32, len(value)) - for k, v := range value { - array[k] = Int32(v) - } - case []int: - array = make([]int32, len(value)) - for k, v := range value { - array[k] = int32(v) - } - case []int8: - array = make([]int32, len(value)) - for k, v := range value { - array[k] = int32(v) - } - case []int16: - array = make([]int32, len(value)) - for k, v := range value { - array[k] = int32(v) - } - case []int32: - array = value - case []int64: - array = make([]int32, len(value)) - for k, v := range value { - array[k] = int32(v) - } - case []uint: - array = make([]int32, len(value)) - for k, v := range value { - array[k] = int32(v) - } - case []uint8: - if json.Valid(value) { - _ = json.UnmarshalUseNumber(value, &array) - } else { - array = make([]int32, len(value)) - for k, v := range value { - array[k] = int32(v) - } - } - case []uint16: - array = make([]int32, len(value)) - for k, v := range value { - array[k] = int32(v) - } - case []uint32: - array = make([]int32, len(value)) - for k, v := range value { - array[k] = int32(v) - } - case []uint64: - array = make([]int32, len(value)) - for k, v := range value { - array[k] = int32(v) - } - case []bool: - array = make([]int32, len(value)) - for k, v := range value { - if v { - array[k] = 1 - } else { - array[k] = 0 - } - } - case []float32: - array = make([]int32, len(value)) - for k, v := range value { - array[k] = Int32(v) - } - case []float64: - array = make([]int32, len(value)) - for k, v := range value { - array[k] = Int32(v) - } - case []interface{}: - array = make([]int32, len(value)) - for k, v := range value { - array[k] = Int32(v) - } - case [][]byte: - array = make([]int32, len(value)) - for k, v := range value { - array[k] = Int32(v) - } - } - if array != nil { - return array - } - if v, ok := any.(iInts); ok { - return Int32s(v.Ints()) - } - if v, ok := any.(iInterfaces); ok { - return Int32s(v.Interfaces()) - } - // JSON format string value converting. - if checkJsonAndUnmarshalUseNumber(any, &array) { - return array - } - // Not a common type, it then uses reflection for conversion. - originValueAndKind := reflection.OriginValueAndKind(any) - switch originValueAndKind.OriginKind { - case reflect.Slice, reflect.Array: - var ( - length = originValueAndKind.OriginValue.Len() - slice = make([]int32, length) - ) - for i := 0; i < length; i++ { - slice[i] = Int32(originValueAndKind.OriginValue.Index(i).Interface()) - } - return slice - - default: - if originValueAndKind.OriginValue.IsZero() { - return []int32{} - } - return []int32{Int32(any)} - } -} - -// Int64s converts `any` to []int64. -func Int64s(any interface{}) []int64 { - if any == nil { - return nil - } - var ( - array []int64 = nil - ) - switch value := any.(type) { - case []string: - array = make([]int64, len(value)) - for k, v := range value { - array[k] = Int64(v) - } - case []int: - array = make([]int64, len(value)) - for k, v := range value { - array[k] = int64(v) - } - case []int8: - array = make([]int64, len(value)) - for k, v := range value { - array[k] = int64(v) - } - case []int16: - array = make([]int64, len(value)) - for k, v := range value { - array[k] = int64(v) - } - case []int32: - array = make([]int64, len(value)) - for k, v := range value { - array[k] = int64(v) - } - case []int64: - array = value - case []uint: - array = make([]int64, len(value)) - for k, v := range value { - array[k] = int64(v) - } - case []uint8: - if json.Valid(value) { - _ = json.UnmarshalUseNumber(value, &array) - } else { - array = make([]int64, len(value)) - for k, v := range value { - array[k] = int64(v) - } - } - case []uint16: - array = make([]int64, len(value)) - for k, v := range value { - array[k] = int64(v) - } - case []uint32: - array = make([]int64, len(value)) - for k, v := range value { - array[k] = int64(v) - } - case []uint64: - array = make([]int64, len(value)) - for k, v := range value { - array[k] = int64(v) - } - case []bool: - array = make([]int64, len(value)) - for k, v := range value { - if v { - array[k] = 1 - } else { - array[k] = 0 - } - } - case []float32: - array = make([]int64, len(value)) - for k, v := range value { - array[k] = Int64(v) - } - case []float64: - array = make([]int64, len(value)) - for k, v := range value { - array[k] = Int64(v) - } - case []interface{}: - array = make([]int64, len(value)) - for k, v := range value { - array[k] = Int64(v) - } - case [][]byte: - array = make([]int64, len(value)) - for k, v := range value { - array[k] = Int64(v) - } - } - if array != nil { - return array - } - if v, ok := any.(iInts); ok { - return Int64s(v.Ints()) - } - if v, ok := any.(iInterfaces); ok { - return Int64s(v.Interfaces()) - } - // JSON format string value converting. - if checkJsonAndUnmarshalUseNumber(any, &array) { - return array - } - // Not a common type, it then uses reflection for conversion. - originValueAndKind := reflection.OriginValueAndKind(any) - switch originValueAndKind.OriginKind { - case reflect.Slice, reflect.Array: - var ( - length = originValueAndKind.OriginValue.Len() - slice = make([]int64, length) - ) - for i := 0; i < length; i++ { - slice[i] = Int64(originValueAndKind.OriginValue.Index(i).Interface()) - } - return slice - - default: - if originValueAndKind.OriginValue.IsZero() { - return []int64{} - } - return []int64{Int64(any)} - } -} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_str.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_str.go deleted file mode 100644 index c085d271..00000000 --- a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_str.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gconv - -import ( - "reflect" - - "github.com/gogf/gf/v2/internal/json" - "github.com/gogf/gf/v2/internal/reflection" -) - -// SliceStr is alias of Strings. -func SliceStr(any interface{}) []string { - return Strings(any) -} - -// Strings converts `any` to []string. -func Strings(any interface{}) []string { - if any == nil { - return nil - } - var ( - array []string = nil - ) - switch value := any.(type) { - case []int: - array = make([]string, len(value)) - for k, v := range value { - array[k] = String(v) - } - case []int8: - array = make([]string, len(value)) - for k, v := range value { - array[k] = String(v) - } - case []int16: - array = make([]string, len(value)) - for k, v := range value { - array[k] = String(v) - } - case []int32: - array = make([]string, len(value)) - for k, v := range value { - array[k] = String(v) - } - case []int64: - array = make([]string, len(value)) - for k, v := range value { - array[k] = String(v) - } - case []uint: - array = make([]string, len(value)) - for k, v := range value { - array[k] = String(v) - } - case []uint8: - if json.Valid(value) { - _ = json.UnmarshalUseNumber(value, &array) - } else { - array = make([]string, len(value)) - for k, v := range value { - array[k] = String(v) - } - } - case []uint16: - array = make([]string, len(value)) - for k, v := range value { - array[k] = String(v) - } - case []uint32: - array = make([]string, len(value)) - for k, v := range value { - array[k] = String(v) - } - case []uint64: - array = make([]string, len(value)) - for k, v := range value { - array[k] = String(v) - } - case []bool: - array = make([]string, len(value)) - for k, v := range value { - array[k] = String(v) - } - case []float32: - array = make([]string, len(value)) - for k, v := range value { - array[k] = String(v) - } - case []float64: - array = make([]string, len(value)) - for k, v := range value { - array[k] = String(v) - } - case []interface{}: - array = make([]string, len(value)) - for k, v := range value { - array[k] = String(v) - } - case []string: - array = value - case [][]byte: - array = make([]string, len(value)) - for k, v := range value { - array[k] = String(v) - } - } - if array != nil { - return array - } - if v, ok := any.(iStrings); ok { - return v.Strings() - } - if v, ok := any.(iInterfaces); ok { - return Strings(v.Interfaces()) - } - // JSON format string value converting. - if checkJsonAndUnmarshalUseNumber(any, &array) { - return array - } - // Not a common type, it then uses reflection for conversion. - originValueAndKind := reflection.OriginValueAndKind(any) - switch originValueAndKind.OriginKind { - case reflect.Slice, reflect.Array: - var ( - length = originValueAndKind.OriginValue.Len() - slice = make([]string, length) - ) - for i := 0; i < length; i++ { - slice[i] = String(originValueAndKind.OriginValue.Index(i).Interface()) - } - return slice - - default: - if originValueAndKind.OriginValue.IsZero() { - return []string{} - } - return []string{String(any)} - } -} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_uint.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_uint.go deleted file mode 100644 index a1ffa761..00000000 --- a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_uint.go +++ /dev/null @@ -1,436 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gconv - -import ( - "reflect" - "strings" - - "github.com/gogf/gf/v2/internal/json" - "github.com/gogf/gf/v2/internal/reflection" - "github.com/gogf/gf/v2/internal/utils" -) - -// SliceUint is alias of Uints. -func SliceUint(any interface{}) []uint { - return Uints(any) -} - -// SliceUint32 is alias of Uint32s. -func SliceUint32(any interface{}) []uint32 { - return Uint32s(any) -} - -// SliceUint64 is alias of Uint64s. -func SliceUint64(any interface{}) []uint64 { - return Uint64s(any) -} - -// Uints converts `any` to []uint. -func Uints(any interface{}) []uint { - if any == nil { - return nil - } - - var ( - array []uint = nil - ) - switch value := any.(type) { - case string: - value = strings.TrimSpace(value) - if value == "" { - return []uint{} - } - if utils.IsNumeric(value) { - return []uint{Uint(value)} - } - - case []string: - array = make([]uint, len(value)) - for k, v := range value { - array[k] = Uint(v) - } - case []int8: - array = make([]uint, len(value)) - for k, v := range value { - array[k] = uint(v) - } - case []int16: - array = make([]uint, len(value)) - for k, v := range value { - array[k] = uint(v) - } - case []int32: - array = make([]uint, len(value)) - for k, v := range value { - array[k] = uint(v) - } - case []int64: - array = make([]uint, len(value)) - for k, v := range value { - array[k] = uint(v) - } - case []uint: - array = value - case []uint8: - if json.Valid(value) { - _ = json.UnmarshalUseNumber(value, &array) - } else { - array = make([]uint, len(value)) - for k, v := range value { - array[k] = uint(v) - } - } - case []uint16: - array = make([]uint, len(value)) - for k, v := range value { - array[k] = uint(v) - } - case []uint32: - array = make([]uint, len(value)) - for k, v := range value { - array[k] = uint(v) - } - case []uint64: - array = make([]uint, len(value)) - for k, v := range value { - array[k] = uint(v) - } - case []bool: - array = make([]uint, len(value)) - for k, v := range value { - if v { - array[k] = 1 - } else { - array[k] = 0 - } - } - case []float32: - array = make([]uint, len(value)) - for k, v := range value { - array[k] = Uint(v) - } - case []float64: - array = make([]uint, len(value)) - for k, v := range value { - array[k] = Uint(v) - } - case []interface{}: - array = make([]uint, len(value)) - for k, v := range value { - array[k] = Uint(v) - } - case [][]byte: - array = make([]uint, len(value)) - for k, v := range value { - array[k] = Uint(v) - } - } - - if array != nil { - return array - } - - // Default handler. - if v, ok := any.(iUints); ok { - return v.Uints() - } - if v, ok := any.(iInterfaces); ok { - return Uints(v.Interfaces()) - } - // JSON format string value converting. - if checkJsonAndUnmarshalUseNumber(any, &array) { - return array - } - // Not a common type, it then uses reflection for conversion. - originValueAndKind := reflection.OriginValueAndKind(any) - switch originValueAndKind.OriginKind { - case reflect.Slice, reflect.Array: - var ( - length = originValueAndKind.OriginValue.Len() - slice = make([]uint, length) - ) - for i := 0; i < length; i++ { - slice[i] = Uint(originValueAndKind.OriginValue.Index(i).Interface()) - } - return slice - - default: - if originValueAndKind.OriginValue.IsZero() { - return []uint{} - } - return []uint{Uint(any)} - } -} - -// Uint32s converts `any` to []uint32. -func Uint32s(any interface{}) []uint32 { - if any == nil { - return nil - } - var ( - array []uint32 = nil - ) - switch value := any.(type) { - case string: - value = strings.TrimSpace(value) - if value == "" { - return []uint32{} - } - if utils.IsNumeric(value) { - return []uint32{Uint32(value)} - } - case []string: - array = make([]uint32, len(value)) - for k, v := range value { - array[k] = Uint32(v) - } - case []int8: - array = make([]uint32, len(value)) - for k, v := range value { - array[k] = uint32(v) - } - case []int16: - array = make([]uint32, len(value)) - for k, v := range value { - array[k] = uint32(v) - } - case []int32: - array = make([]uint32, len(value)) - for k, v := range value { - array[k] = uint32(v) - } - case []int64: - array = make([]uint32, len(value)) - for k, v := range value { - array[k] = uint32(v) - } - case []uint: - array = make([]uint32, len(value)) - for k, v := range value { - array[k] = uint32(v) - } - case []uint8: - if json.Valid(value) { - _ = json.UnmarshalUseNumber(value, &array) - } else { - array = make([]uint32, len(value)) - for k, v := range value { - array[k] = uint32(v) - } - } - case []uint16: - array = make([]uint32, len(value)) - for k, v := range value { - array[k] = uint32(v) - } - case []uint32: - array = value - case []uint64: - array = make([]uint32, len(value)) - for k, v := range value { - array[k] = uint32(v) - } - case []bool: - array = make([]uint32, len(value)) - for k, v := range value { - if v { - array[k] = 1 - } else { - array[k] = 0 - } - } - case []float32: - array = make([]uint32, len(value)) - for k, v := range value { - array[k] = Uint32(v) - } - case []float64: - array = make([]uint32, len(value)) - for k, v := range value { - array[k] = Uint32(v) - } - case []interface{}: - array = make([]uint32, len(value)) - for k, v := range value { - array[k] = Uint32(v) - } - case [][]byte: - array = make([]uint32, len(value)) - for k, v := range value { - array[k] = Uint32(v) - } - } - if array != nil { - return array - } - - // Default handler. - if v, ok := any.(iUints); ok { - return Uint32s(v.Uints()) - } - if v, ok := any.(iInterfaces); ok { - return Uint32s(v.Interfaces()) - } - // JSON format string value converting. - if checkJsonAndUnmarshalUseNumber(any, &array) { - return array - } - // Not a common type, it then uses reflection for conversion. - originValueAndKind := reflection.OriginValueAndKind(any) - switch originValueAndKind.OriginKind { - case reflect.Slice, reflect.Array: - var ( - length = originValueAndKind.OriginValue.Len() - slice = make([]uint32, length) - ) - for i := 0; i < length; i++ { - slice[i] = Uint32(originValueAndKind.OriginValue.Index(i).Interface()) - } - return slice - - default: - if originValueAndKind.OriginValue.IsZero() { - return []uint32{} - } - return []uint32{Uint32(any)} - } -} - -// Uint64s converts `any` to []uint64. -func Uint64s(any interface{}) []uint64 { - if any == nil { - return nil - } - var ( - array []uint64 = nil - ) - switch value := any.(type) { - case string: - value = strings.TrimSpace(value) - if value == "" { - return []uint64{} - } - if utils.IsNumeric(value) { - return []uint64{Uint64(value)} - } - - case []string: - array = make([]uint64, len(value)) - for k, v := range value { - array[k] = Uint64(v) - } - case []int8: - array = make([]uint64, len(value)) - for k, v := range value { - array[k] = uint64(v) - } - case []int16: - array = make([]uint64, len(value)) - for k, v := range value { - array[k] = uint64(v) - } - case []int32: - array = make([]uint64, len(value)) - for k, v := range value { - array[k] = uint64(v) - } - case []int64: - array = make([]uint64, len(value)) - for k, v := range value { - array[k] = uint64(v) - } - case []uint: - array = make([]uint64, len(value)) - for k, v := range value { - array[k] = uint64(v) - } - case []uint8: - if json.Valid(value) { - _ = json.UnmarshalUseNumber(value, &array) - } else { - array = make([]uint64, len(value)) - for k, v := range value { - array[k] = uint64(v) - } - } - case []uint16: - array = make([]uint64, len(value)) - for k, v := range value { - array[k] = uint64(v) - } - case []uint32: - array = make([]uint64, len(value)) - for k, v := range value { - array[k] = uint64(v) - } - case []uint64: - array = value - case []bool: - array = make([]uint64, len(value)) - for k, v := range value { - if v { - array[k] = 1 - } else { - array[k] = 0 - } - } - case []float32: - array = make([]uint64, len(value)) - for k, v := range value { - array[k] = Uint64(v) - } - case []float64: - array = make([]uint64, len(value)) - for k, v := range value { - array[k] = Uint64(v) - } - case []interface{}: - array = make([]uint64, len(value)) - for k, v := range value { - array[k] = Uint64(v) - } - case [][]byte: - array = make([]uint64, len(value)) - for k, v := range value { - array[k] = Uint64(v) - } - } - if array != nil { - return array - } - // Default handler. - if v, ok := any.(iUints); ok { - return Uint64s(v.Uints()) - } - if v, ok := any.(iInterfaces); ok { - return Uint64s(v.Interfaces()) - } - // JSON format string value converting. - if checkJsonAndUnmarshalUseNumber(any, &array) { - return array - } - // Not a common type, it then uses reflection for conversion. - originValueAndKind := reflection.OriginValueAndKind(any) - switch originValueAndKind.OriginKind { - case reflect.Slice, reflect.Array: - var ( - length = originValueAndKind.OriginValue.Len() - slice = make([]uint64, length) - ) - for i := 0; i < length; i++ { - slice[i] = Uint64(originValueAndKind.OriginValue.Index(i).Interface()) - } - return slice - - default: - if originValueAndKind.OriginValue.IsZero() { - return []uint64{} - } - return []uint64{Uint64(any)} - } -} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_struct.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_struct.go deleted file mode 100644 index 0acfb264..00000000 --- a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_struct.go +++ /dev/null @@ -1,656 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gconv - -import ( - "reflect" - "strings" - - "github.com/gogf/gf/v2/errors/gcode" - "github.com/gogf/gf/v2/errors/gerror" - "github.com/gogf/gf/v2/internal/empty" - "github.com/gogf/gf/v2/internal/json" - "github.com/gogf/gf/v2/internal/utils" - "github.com/gogf/gf/v2/os/gstructs" -) - -// Struct maps the params key-value pairs to the corresponding struct object's attributes. -// The third parameter `mapping` is unnecessary, indicating the mapping rules between the -// custom key name and the attribute name(case-sensitive). -// -// Note: -// 1. The `params` can be any type of map/struct, usually a map. -// 2. The `pointer` should be type of *struct/**struct, which is a pointer to struct object -// or struct pointer. -// 3. Only the public attributes of struct object can be mapped. -// 4. If `params` is a map, the key of the map `params` can be lowercase. -// It will automatically convert the first letter of the key to uppercase -// in mapping procedure to do the matching. -// It ignores the map key, if it does not match. -func Struct(params interface{}, pointer interface{}, mapping ...map[string]string) (err error) { - return Scan(params, pointer, mapping...) -} - -// StructTag acts as Struct but also with support for priority tag feature, which retrieves the -// specified tags for `params` key-value items to struct attribute names mapping. -// The parameter `priorityTag` supports multiple tags that can be joined with char ','. -func StructTag(params interface{}, pointer interface{}, priorityTag string) (err error) { - return doStruct(params, pointer, nil, priorityTag) -} - -// doStructWithJsonCheck checks if given `params` is JSON, it then uses json.Unmarshal doing the converting. -func doStructWithJsonCheck(params interface{}, pointer interface{}) (err error, ok bool) { - switch r := params.(type) { - case []byte: - if json.Valid(r) { - if rv, ok := pointer.(reflect.Value); ok { - if rv.Kind() == reflect.Ptr { - if rv.IsNil() { - return nil, false - } - return json.UnmarshalUseNumber(r, rv.Interface()), true - } else if rv.CanAddr() { - return json.UnmarshalUseNumber(r, rv.Addr().Interface()), true - } - } else { - return json.UnmarshalUseNumber(r, pointer), true - } - } - case string: - if paramsBytes := []byte(r); json.Valid(paramsBytes) { - if rv, ok := pointer.(reflect.Value); ok { - if rv.Kind() == reflect.Ptr { - if rv.IsNil() { - return nil, false - } - return json.UnmarshalUseNumber(paramsBytes, rv.Interface()), true - } else if rv.CanAddr() { - return json.UnmarshalUseNumber(paramsBytes, rv.Addr().Interface()), true - } - } else { - return json.UnmarshalUseNumber(paramsBytes, pointer), true - } - } - default: - // The `params` might be struct that implements interface function Interface, eg: gvar.Var. - if v, ok := params.(iInterface); ok { - return doStructWithJsonCheck(v.Interface(), pointer) - } - } - return nil, false -} - -// doStruct is the core internal converting function for any data to struct. -func doStruct(params interface{}, pointer interface{}, mapping map[string]string, priorityTag string) (err error) { - if params == nil { - // If `params` is nil, no conversion. - return nil - } - if pointer == nil { - return gerror.NewCode(gcode.CodeInvalidParameter, "object pointer cannot be nil") - } - - defer func() { - // Catch the panic, especially the reflection operation panics. - if exception := recover(); exception != nil { - if v, ok := exception.(error); ok && gerror.HasStack(v) { - err = v - } else { - err = gerror.NewCodeSkipf(gcode.CodeInternalPanic, 1, "%+v", exception) - } - } - }() - - // JSON content converting. - err, ok := doStructWithJsonCheck(params, pointer) - if err != nil { - return err - } - if ok { - return nil - } - - var ( - paramsReflectValue reflect.Value - paramsInterface interface{} // DO NOT use `params` directly as it might be type `reflect.Value` - pointerReflectValue reflect.Value - pointerReflectKind reflect.Kind - pointerElemReflectValue reflect.Value // The pointed element. - ) - if v, ok := params.(reflect.Value); ok { - paramsReflectValue = v - } else { - paramsReflectValue = reflect.ValueOf(params) - } - paramsInterface = paramsReflectValue.Interface() - if v, ok := pointer.(reflect.Value); ok { - pointerReflectValue = v - pointerElemReflectValue = v - } else { - pointerReflectValue = reflect.ValueOf(pointer) - pointerReflectKind = pointerReflectValue.Kind() - if pointerReflectKind != reflect.Ptr { - return gerror.NewCodef(gcode.CodeInvalidParameter, "object pointer should be type of '*struct', but got '%v'", pointerReflectKind) - } - // Using IsNil on reflect.Ptr variable is OK. - if !pointerReflectValue.IsValid() || pointerReflectValue.IsNil() { - return gerror.NewCode(gcode.CodeInvalidParameter, "object pointer cannot be nil") - } - pointerElemReflectValue = pointerReflectValue.Elem() - } - - // custom convert try first - if ok, err = callCustomConverter(paramsReflectValue, pointerReflectValue); ok { - return err - } - - // If `params` and `pointer` are the same type, the do directly assignment. - // For performance enhancement purpose. - if pointerElemReflectValue.IsValid() { - switch { - // Eg: - // UploadFile => UploadFile - // *UploadFile => *UploadFile - case pointerElemReflectValue.Type() == paramsReflectValue.Type(): - pointerElemReflectValue.Set(paramsReflectValue) - return nil - - // Eg: - // UploadFile => *UploadFile - case pointerElemReflectValue.Kind() == reflect.Ptr && pointerElemReflectValue.Elem().IsValid() && - pointerElemReflectValue.Elem().Type() == paramsReflectValue.Type(): - pointerElemReflectValue.Elem().Set(paramsReflectValue) - return nil - - // Eg: - // *UploadFile => UploadFile - case paramsReflectValue.Kind() == reflect.Ptr && paramsReflectValue.Elem().IsValid() && - pointerElemReflectValue.Type() == paramsReflectValue.Elem().Type(): - pointerElemReflectValue.Set(paramsReflectValue.Elem()) - return nil - } - } - - // Normal unmarshalling interfaces checks. - if err, ok = bindVarToReflectValueWithInterfaceCheck(pointerReflectValue, paramsInterface); ok { - return err - } - - // It automatically creates struct object if necessary. - // For example, if `pointer` is **User, then `elem` is *User, which is a pointer to User. - if pointerElemReflectValue.Kind() == reflect.Ptr { - if !pointerElemReflectValue.IsValid() || pointerElemReflectValue.IsNil() { - e := reflect.New(pointerElemReflectValue.Type().Elem()) - pointerElemReflectValue.Set(e) - defer func() { - if err != nil { - // If it is converted failed, it reset the `pointer` to nil. - pointerReflectValue.Elem().Set(reflect.Zero(pointerReflectValue.Type().Elem())) - } - }() - } - // if v, ok := pointerElemReflectValue.Interface().(iUnmarshalValue); ok { - // return v.UnmarshalValue(params) - // } - // Note that it's `pointerElemReflectValue` here not `pointerReflectValue`. - if err, ok = bindVarToReflectValueWithInterfaceCheck(pointerElemReflectValue, paramsInterface); ok { - return err - } - // Retrieve its element, may be struct at last. - pointerElemReflectValue = pointerElemReflectValue.Elem() - } - - // paramsMap is the map[string]interface{} type variable for params. - // DO NOT use MapDeep here. - paramsMap := doMapConvert(paramsInterface, recursiveTypeAuto, true) - if paramsMap == nil { - return gerror.NewCodef( - gcode.CodeInvalidParameter, - `convert params from "%#v" to "map[string]interface{}" failed`, - params, - ) - } - - // Nothing to be done as the parameters are empty. - if len(paramsMap) == 0 { - return nil - } - - // It only performs one converting to the same attribute. - // doneMap is used to check repeated converting, its key is the real attribute name - // of the struct. - doneMap := make(map[string]struct{}) - - // The key of the attrMap is the attribute name of the struct, - // and the value is its replaced name for later comparison to improve performance. - var ( - tempName string - elemFieldType reflect.StructField - elemFieldValue reflect.Value - elemType = pointerElemReflectValue.Type() - attrToCheckNameMap = make(map[string]string) - ) - for i := 0; i < pointerElemReflectValue.NumField(); i++ { - elemFieldType = elemType.Field(i) - // Only do converting to public attributes. - if !utils.IsLetterUpper(elemFieldType.Name[0]) { - continue - } - // Maybe it's struct/*struct embedded. - if elemFieldType.Anonymous { - elemFieldValue = pointerElemReflectValue.Field(i) - // Ignore the interface attribute if it's nil. - if elemFieldValue.Kind() == reflect.Interface { - elemFieldValue = elemFieldValue.Elem() - if !elemFieldValue.IsValid() { - continue - } - } - if err = doStruct(paramsMap, elemFieldValue, mapping, priorityTag); err != nil { - return err - } - } else { - tempName = elemFieldType.Name - attrToCheckNameMap[tempName] = utils.RemoveSymbols(tempName) - } - } - if len(attrToCheckNameMap) == 0 { - return nil - } - - // The key of the tagMap is the attribute name of the struct, - // and the value is its replaced tag name for later comparison to improve performance. - var ( - attrToTagCheckNameMap = make(map[string]string) - priorityTagArray []string - ) - if priorityTag != "" { - priorityTagArray = append(utils.SplitAndTrim(priorityTag, ","), StructTagPriority...) - } else { - priorityTagArray = StructTagPriority - } - tagToAttrNameMap, err := gstructs.TagMapName(pointerElemReflectValue, priorityTagArray) - if err != nil { - return err - } - for tagName, attributeName := range tagToAttrNameMap { - // If there's something else in the tag string, - // it uses the first part which is split using char ','. - // Eg: - // orm:"id, priority" - // orm:"name, with:uid=id" - attrToTagCheckNameMap[attributeName] = utils.RemoveSymbols(strings.Split(tagName, ",")[0]) - // If tag and attribute values both exist in `paramsMap`, - // it then uses the tag value overwriting the attribute value in `paramsMap`. - if paramsMap[tagName] != nil && paramsMap[attributeName] != nil { - paramsMap[attributeName] = paramsMap[tagName] - } - } - - var ( - attrName string - checkName string - ) - for paramName, paramValue := range paramsMap { - attrName = "" - // It firstly checks the passed mapping rules. - if len(mapping) > 0 { - if passedAttrKey, ok := mapping[paramName]; ok { - attrName = passedAttrKey - } - } - // It secondly checks the predefined tags and matching rules. - if attrName == "" { - // It firstly considers `paramName` as accurate tag name, - // and retrieve attribute name from `tagToAttrNameMap` . - attrName = tagToAttrNameMap[paramName] - if attrName == "" { - checkName = utils.RemoveSymbols(paramName) - // Loop to find the matched attribute name with or without - // string cases and chars like '-'/'_'/'.'/' '. - - // Matching the parameters to struct tag names. - // The `attrKey` is the attribute name of the struct. - for attrKey, cmpKey := range attrToTagCheckNameMap { - if strings.EqualFold(checkName, cmpKey) { - attrName = attrKey - break - } - } - } - - // Matching the parameters to struct attributes. - if attrName == "" { - for attrKey, cmpKey := range attrToCheckNameMap { - // Eg: - // UserName eq user_name - // User-Name eq username - // username eq userName - // etc. - if strings.EqualFold(checkName, cmpKey) { - attrName = attrKey - break - } - } - } - } - - // No matching, it gives up this attribute converting. - if attrName == "" { - continue - } - // If the attribute name is already checked converting, then skip it. - if _, ok = doneMap[attrName]; ok { - continue - } - // Mark it done. - doneMap[attrName] = struct{}{} - if err = bindVarToStructAttr(pointerElemReflectValue, attrName, paramValue, mapping); err != nil { - return err - } - } - return nil -} - -// bindVarToStructAttr sets value to struct object attribute by name. -func bindVarToStructAttr(structReflectValue reflect.Value, attrName string, value interface{}, mapping map[string]string) (err error) { - structFieldValue := structReflectValue.FieldByName(attrName) - if !structFieldValue.IsValid() { - return nil - } - // CanSet checks whether attribute is public accessible. - if !structFieldValue.CanSet() { - return nil - } - defer func() { - if exception := recover(); exception != nil { - if err = bindVarToReflectValue(structFieldValue, value, mapping); err != nil { - err = gerror.Wrapf(err, `error binding value to attribute "%s"`, attrName) - } - } - }() - // Directly converting. - if empty.IsNil(value) { - structFieldValue.Set(reflect.Zero(structFieldValue.Type())) - } else { - // Special handling for certain types: - // - Overwrite the default type converting logic of stdlib for time.Time/*time.Time. - var structFieldTypeName = structFieldValue.Type().String() - switch structFieldTypeName { - case "time.Time", "*time.Time": - doConvertWithReflectValueSet(structFieldValue, doConvertInput{ - FromValue: value, - ToTypeName: structFieldTypeName, - ReferValue: structFieldValue, - }) - return - } - - // Try to call custom converter. - if ok, err := callCustomConverter(reflect.ValueOf(value), structFieldValue); ok { - return err - } - - // Common interface check. - var ok bool - if err, ok = bindVarToReflectValueWithInterfaceCheck(structFieldValue, value); ok { - return err - } - - // Default converting. - doConvertWithReflectValueSet(structFieldValue, doConvertInput{ - FromValue: value, - ToTypeName: structFieldTypeName, - ReferValue: structFieldValue, - }) - } - return nil -} - -// bindVarToReflectValueWithInterfaceCheck does bind using common interfaces checks. -func bindVarToReflectValueWithInterfaceCheck(reflectValue reflect.Value, value interface{}) (error, bool) { - var pointer interface{} - if reflectValue.Kind() != reflect.Ptr && reflectValue.CanAddr() { - reflectValueAddr := reflectValue.Addr() - if reflectValueAddr.IsNil() || !reflectValueAddr.IsValid() { - return nil, false - } - // Not a pointer, but can token address, that makes it can be unmarshalled. - pointer = reflectValue.Addr().Interface() - } else { - if reflectValue.IsNil() || !reflectValue.IsValid() { - return nil, false - } - pointer = reflectValue.Interface() - } - // UnmarshalValue. - if v, ok := pointer.(iUnmarshalValue); ok { - return v.UnmarshalValue(value), ok - } - // UnmarshalText. - if v, ok := pointer.(iUnmarshalText); ok { - var valueBytes []byte - if b, ok := value.([]byte); ok { - valueBytes = b - } else if s, ok := value.(string); ok { - valueBytes = []byte(s) - } else if f, ok := value.(iString); ok { - valueBytes = []byte(f.String()) - } - if len(valueBytes) > 0 { - return v.UnmarshalText(valueBytes), ok - } - } - // UnmarshalJSON. - if v, ok := pointer.(iUnmarshalJSON); ok { - var valueBytes []byte - if b, ok := value.([]byte); ok { - valueBytes = b - } else if s, ok := value.(string); ok { - valueBytes = []byte(s) - } else if f, ok := value.(iString); ok { - valueBytes = []byte(f.String()) - } - - if len(valueBytes) > 0 { - // If it is not a valid JSON string, it then adds char `"` on its both sides to make it is. - if !json.Valid(valueBytes) { - newValueBytes := make([]byte, len(valueBytes)+2) - newValueBytes[0] = '"' - newValueBytes[len(newValueBytes)-1] = '"' - copy(newValueBytes[1:], valueBytes) - valueBytes = newValueBytes - } - return v.UnmarshalJSON(valueBytes), ok - } - } - if v, ok := pointer.(iSet); ok { - v.Set(value) - return nil, ok - } - return nil, false -} - -// bindVarToReflectValue sets `value` to reflect value object `structFieldValue`. -func bindVarToReflectValue(structFieldValue reflect.Value, value interface{}, mapping map[string]string) (err error) { - // JSON content converting. - err, ok := doStructWithJsonCheck(value, structFieldValue) - if err != nil { - return err - } - if ok { - return nil - } - - kind := structFieldValue.Kind() - // Converting using `Set` interface implements, for some types. - switch kind { - case reflect.Slice, reflect.Array, reflect.Ptr, reflect.Interface: - if !structFieldValue.IsNil() { - if v, ok := structFieldValue.Interface().(iSet); ok { - v.Set(value) - return nil - } - } - } - - // Converting using reflection by kind. - switch kind { - case reflect.Map: - return doMapToMap(value, structFieldValue, mapping) - - case reflect.Struct: - // Recursively converting for struct attribute. - if err = doStruct(value, structFieldValue, nil, ""); err != nil { - // Note there's reflect conversion mechanism here. - structFieldValue.Set(reflect.ValueOf(value).Convert(structFieldValue.Type())) - } - - // Note that the slice element might be type of struct, - // so it uses Struct function doing the converting internally. - case reflect.Slice, reflect.Array: - var ( - reflectArray reflect.Value - reflectValue = reflect.ValueOf(value) - ) - if reflectValue.Kind() == reflect.Slice || reflectValue.Kind() == reflect.Array { - reflectArray = reflect.MakeSlice(structFieldValue.Type(), reflectValue.Len(), reflectValue.Len()) - if reflectValue.Len() > 0 { - var ( - elemType = reflectArray.Index(0).Type() - elemTypeName string - converted bool - ) - for i := 0; i < reflectValue.Len(); i++ { - converted = false - elemTypeName = elemType.Name() - if elemTypeName == "" { - elemTypeName = elemType.String() - } - var elem reflect.Value - if elemType.Kind() == reflect.Ptr { - elem = reflect.New(elemType.Elem()).Elem() - } else { - elem = reflect.New(elemType).Elem() - } - if elem.Kind() == reflect.Struct { - if err = doStruct(reflectValue.Index(i).Interface(), elem, nil, ""); err == nil { - converted = true - } - } - if !converted { - doConvertWithReflectValueSet(elem, doConvertInput{ - FromValue: reflectValue.Index(i).Interface(), - ToTypeName: elemTypeName, - ReferValue: elem, - }) - } - if elemType.Kind() == reflect.Ptr { - // Before it sets the `elem` to array, do pointer converting if necessary. - elem = elem.Addr() - } - reflectArray.Index(i).Set(elem) - } - } - } else { - var ( - elem reflect.Value - elemType = structFieldValue.Type().Elem() - elemTypeName = elemType.Name() - converted bool - ) - switch reflectValue.Kind() { - case reflect.String: - // Value is empty string. - if reflectValue.IsZero() { - var elemKind = elemType.Kind() - // Try to find the original type kind of the slice element. - if elemKind == reflect.Ptr { - elemKind = elemType.Elem().Kind() - } - switch elemKind { - case reflect.String: - // Empty string cannot be assigned to string slice. - return nil - } - } - } - if elemTypeName == "" { - elemTypeName = elemType.String() - } - if elemType.Kind() == reflect.Ptr { - elem = reflect.New(elemType.Elem()).Elem() - } else { - elem = reflect.New(elemType).Elem() - } - if elem.Kind() == reflect.Struct { - if err = doStruct(value, elem, nil, ""); err == nil { - converted = true - } - } - if !converted { - doConvertWithReflectValueSet(elem, doConvertInput{ - FromValue: value, - ToTypeName: elemTypeName, - ReferValue: elem, - }) - } - if elemType.Kind() == reflect.Ptr { - // Before it sets the `elem` to array, do pointer converting if necessary. - elem = elem.Addr() - } - reflectArray = reflect.MakeSlice(structFieldValue.Type(), 1, 1) - reflectArray.Index(0).Set(elem) - } - structFieldValue.Set(reflectArray) - - case reflect.Ptr: - if structFieldValue.IsNil() || structFieldValue.IsZero() { - // Nil or empty pointer, it creates a new one. - item := reflect.New(structFieldValue.Type().Elem()) - if err, ok = bindVarToReflectValueWithInterfaceCheck(item, value); ok { - structFieldValue.Set(item) - return err - } - elem := item.Elem() - if err = bindVarToReflectValue(elem, value, mapping); err == nil { - structFieldValue.Set(elem.Addr()) - } - } else { - // Not empty pointer, it assigns values to it. - return bindVarToReflectValue(structFieldValue.Elem(), value, mapping) - } - - // It mainly and specially handles the interface of nil value. - case reflect.Interface: - if value == nil { - // Specially. - structFieldValue.Set(reflect.ValueOf((*interface{})(nil))) - } else { - // Note there's reflect conversion mechanism here. - structFieldValue.Set(reflect.ValueOf(value).Convert(structFieldValue.Type())) - } - - default: - defer func() { - if exception := recover(); exception != nil { - err = gerror.NewCodef( - gcode.CodeInternalPanic, - `cannot convert value "%+v" to type "%s":%+v`, - value, - structFieldValue.Type().String(), - exception, - ) - } - }() - // It here uses reflect converting `value` to type of the attribute and assigns - // the result value to the attribute. It might fail and panic if the usual Go - // conversion rules do not allow conversion. - structFieldValue.Set(reflect.ValueOf(value).Convert(structFieldValue.Type())) - } - return nil -} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_structs.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_structs.go deleted file mode 100644 index b8c04ff6..00000000 --- a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_structs.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gconv - -import ( - "reflect" - - "github.com/gogf/gf/v2/errors/gcode" - "github.com/gogf/gf/v2/errors/gerror" - "github.com/gogf/gf/v2/internal/json" -) - -// Structs converts any slice to given struct slice. -// Also see Scan, Struct. -func Structs(params interface{}, pointer interface{}, mapping ...map[string]string) (err error) { - return Scan(params, pointer, mapping...) -} - -// StructsTag acts as Structs but also with support for priority tag feature, which retrieves the -// specified tags for `params` key-value items to struct attribute names mapping. -// The parameter `priorityTag` supports multiple tags that can be joined with char ','. -func StructsTag(params interface{}, pointer interface{}, priorityTag string) (err error) { - return doStructs(params, pointer, nil, priorityTag) -} - -// doStructs converts any slice to given struct slice. -// -// It automatically checks and converts json string to []map if `params` is string/[]byte. -// -// The parameter `pointer` should be type of pointer to slice of struct. -// Note that if `pointer` is a pointer to another pointer of type of slice of struct, -// it will create the struct/pointer internally. -func doStructs(params interface{}, pointer interface{}, mapping map[string]string, priorityTag string) (err error) { - if params == nil { - // If `params` is nil, no conversion. - return nil - } - if pointer == nil { - return gerror.NewCode(gcode.CodeInvalidParameter, "object pointer cannot be nil") - } - - if doStructsByDirectReflectSet(params, pointer) { - return nil - } - - defer func() { - // Catch the panic, especially the reflection operation panics. - if exception := recover(); exception != nil { - if v, ok := exception.(error); ok && gerror.HasStack(v) { - err = v - } else { - err = gerror.NewCodeSkipf(gcode.CodeInternalPanic, 1, "%+v", exception) - } - } - }() - // If given `params` is JSON, it then uses json.Unmarshal doing the converting. - switch r := params.(type) { - case []byte: - if json.Valid(r) { - if rv, ok := pointer.(reflect.Value); ok { - if rv.Kind() == reflect.Ptr { - return json.UnmarshalUseNumber(r, rv.Interface()) - } - } else { - return json.UnmarshalUseNumber(r, pointer) - } - } - case string: - if paramsBytes := []byte(r); json.Valid(paramsBytes) { - if rv, ok := pointer.(reflect.Value); ok { - if rv.Kind() == reflect.Ptr { - return json.UnmarshalUseNumber(paramsBytes, rv.Interface()) - } - } else { - return json.UnmarshalUseNumber(paramsBytes, pointer) - } - } - } - // Pointer type check. - pointerRv, ok := pointer.(reflect.Value) - if !ok { - pointerRv = reflect.ValueOf(pointer) - if kind := pointerRv.Kind(); kind != reflect.Ptr { - return gerror.NewCodef(gcode.CodeInvalidParameter, "pointer should be type of pointer, but got: %v", kind) - } - } - // Converting `params` to map slice. - var ( - paramsList []interface{} - paramsRv = reflect.ValueOf(params) - paramsKind = paramsRv.Kind() - ) - for paramsKind == reflect.Ptr { - paramsRv = paramsRv.Elem() - paramsKind = paramsRv.Kind() - } - switch paramsKind { - case reflect.Slice, reflect.Array: - paramsList = make([]interface{}, paramsRv.Len()) - for i := 0; i < paramsRv.Len(); i++ { - paramsList[i] = paramsRv.Index(i).Interface() - } - default: - var paramsMaps = Maps(params) - paramsList = make([]interface{}, len(paramsMaps)) - for i := 0; i < len(paramsMaps); i++ { - paramsList[i] = paramsMaps[i] - } - } - // If `params` is an empty slice, no conversion. - if len(paramsList) == 0 { - return nil - } - var ( - reflectElemArray = reflect.MakeSlice(pointerRv.Type().Elem(), len(paramsList), len(paramsList)) - itemType = reflectElemArray.Index(0).Type() - itemTypeKind = itemType.Kind() - pointerRvElem = pointerRv.Elem() - pointerRvLength = pointerRvElem.Len() - ) - if itemTypeKind == reflect.Ptr { - // Pointer element. - for i := 0; i < len(paramsList); i++ { - var tempReflectValue reflect.Value - if i < pointerRvLength { - // Might be nil. - tempReflectValue = pointerRvElem.Index(i).Elem() - } - if !tempReflectValue.IsValid() { - tempReflectValue = reflect.New(itemType.Elem()).Elem() - } - if err = doStruct(paramsList[i], tempReflectValue, mapping, priorityTag); err != nil { - return err - } - reflectElemArray.Index(i).Set(tempReflectValue.Addr()) - } - } else { - // Struct element. - for i := 0; i < len(paramsList); i++ { - var tempReflectValue reflect.Value - if i < pointerRvLength { - tempReflectValue = pointerRvElem.Index(i) - } else { - tempReflectValue = reflect.New(itemType).Elem() - } - if err = doStruct(paramsList[i], tempReflectValue, mapping, priorityTag); err != nil { - return err - } - reflectElemArray.Index(i).Set(tempReflectValue) - } - } - pointerRv.Elem().Set(reflectElemArray) - return nil -} - -// doStructsByDirectReflectSet do the converting directly using reflect Set. -// It returns true if success, or else false. -func doStructsByDirectReflectSet(params interface{}, pointer interface{}) (ok bool) { - v1 := reflect.ValueOf(pointer) - v2 := reflect.ValueOf(params) - if v1.Kind() == reflect.Ptr { - if elem := v1.Elem(); elem.IsValid() && elem.Type() == v2.Type() { - elem.Set(v2) - ok = true - } - } - return ok -} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_time.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_time.go deleted file mode 100644 index a5269cad..00000000 --- a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_time.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gconv - -import ( - "time" - - "github.com/gogf/gf/v2/internal/utils" - "github.com/gogf/gf/v2/os/gtime" -) - -// Time converts `any` to time.Time. -func Time(any interface{}, format ...string) time.Time { - // It's already this type. - if len(format) == 0 { - if v, ok := any.(time.Time); ok { - return v - } - } - if t := GTime(any, format...); t != nil { - return t.Time - } - return time.Time{} -} - -// Duration converts `any` to time.Duration. -// If `any` is string, then it uses time.ParseDuration to convert it. -// If `any` is numeric, then it converts `any` as nanoseconds. -func Duration(any interface{}) time.Duration { - // It's already this type. - if v, ok := any.(time.Duration); ok { - return v - } - s := String(any) - if !utils.IsNumeric(s) { - d, _ := gtime.ParseDuration(s) - return d - } - return time.Duration(Int64(any)) -} - -// GTime converts `any` to *gtime.Time. -// The parameter `format` can be used to specify the format of `any`. -// If no `format` given, it converts `any` using gtime.NewFromTimeStamp if `any` is numeric, -// or using gtime.StrToTime if `any` is string. -func GTime(any interface{}, format ...string) *gtime.Time { - if any == nil { - return nil - } - if v, ok := any.(iGTime); ok { - return v.GTime(format...) - } - // It's already this type. - if len(format) == 0 { - if v, ok := any.(*gtime.Time); ok { - return v - } - if t, ok := any.(time.Time); ok { - return gtime.New(t) - } - if t, ok := any.(*time.Time); ok { - return gtime.New(t) - } - } - s := String(any) - if len(s) == 0 { - return gtime.New() - } - // Priority conversion using given format. - if len(format) > 0 { - t, _ := gtime.StrToTimeFormat(s, format[0]) - return t - } - if utils.IsNumeric(s) { - return gtime.NewFromTimeStamp(Int64(s)) - } else { - t, _ := gtime.StrToTime(s) - return t - } -} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_uint.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_uint.go deleted file mode 100644 index 028a14de..00000000 --- a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_uint.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gconv - -import ( - "math" - "strconv" - - "github.com/gogf/gf/v2/encoding/gbinary" -) - -// Uint converts `any` to uint. -func Uint(any interface{}) uint { - if any == nil { - return 0 - } - if v, ok := any.(uint); ok { - return v - } - return uint(Uint64(any)) -} - -// Uint8 converts `any` to uint8. -func Uint8(any interface{}) uint8 { - if any == nil { - return 0 - } - if v, ok := any.(uint8); ok { - return v - } - return uint8(Uint64(any)) -} - -// Uint16 converts `any` to uint16. -func Uint16(any interface{}) uint16 { - if any == nil { - return 0 - } - if v, ok := any.(uint16); ok { - return v - } - return uint16(Uint64(any)) -} - -// Uint32 converts `any` to uint32. -func Uint32(any interface{}) uint32 { - if any == nil { - return 0 - } - if v, ok := any.(uint32); ok { - return v - } - return uint32(Uint64(any)) -} - -// Uint64 converts `any` to uint64. -func Uint64(any interface{}) uint64 { - if any == nil { - return 0 - } - switch value := any.(type) { - case int: - return uint64(value) - case int8: - return uint64(value) - case int16: - return uint64(value) - case int32: - return uint64(value) - case int64: - return uint64(value) - case uint: - return uint64(value) - case uint8: - return uint64(value) - case uint16: - return uint64(value) - case uint32: - return uint64(value) - case uint64: - return value - case float32: - return uint64(value) - case float64: - return uint64(value) - case bool: - if value { - return 1 - } - return 0 - case []byte: - return gbinary.DecodeToUint64(value) - default: - if f, ok := value.(iUint64); ok { - return f.Uint64() - } - s := String(value) - // Hexadecimal - if len(s) > 2 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X') { - if v, e := strconv.ParseUint(s[2:], 16, 64); e == nil { - return v - } - } - // Decimal - if v, e := strconv.ParseUint(s, 10, 64); e == nil { - return v - } - // Float64 - if valueFloat64 := Float64(value); math.IsNaN(valueFloat64) { - return 0 - } else { - return uint64(valueFloat64) - } - } -} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_unsafe.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_unsafe.go deleted file mode 100644 index e4b24fdf..00000000 --- a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_unsafe.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gconv - -import "unsafe" - -// UnsafeStrToBytes converts string to []byte without memory copy. -// Note that, if you completely sure you will never use `s` variable in the feature, -// you can use this unsafe function to implement type conversion in high performance. -func UnsafeStrToBytes(s string) []byte { - return *(*[]byte)(unsafe.Pointer(&s)) -} - -// UnsafeBytesToStr converts []byte to string without memory copy. -// Note that, if you completely sure you will never use `b` variable in the feature, -// you can use this unsafe function to implement type conversion in high performance. -func UnsafeBytesToStr(b []byte) string { - return *(*string)(unsafe.Pointer(&b)) -} diff --git a/vendor/github.com/gogf/gf/v2/util/grand/grand.go b/vendor/github.com/gogf/gf/v2/util/grand/grand.go deleted file mode 100644 index 90fd93e6..00000000 --- a/vendor/github.com/gogf/gf/v2/util/grand/grand.go +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package grand provides high performance random bytes/number/string generation functionality. -package grand - -import ( - "encoding/binary" - "time" -) - -var ( - letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" // 52 - symbols = "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~" // 32 - digits = "0123456789" // 10 - characters = letters + digits + symbols // 94 -) - -// Intn returns an int number which is between 0 and max: [0, max). -// -// Note that: -// 1. The `max` can only be greater than 0, or else it returns `max` directly; -// 2. The result is greater than or equal to 0, but less than `max`; -// 3. The result number is 32bit and less than math.MaxUint32. -func Intn(max int) int { - if max <= 0 { - return max - } - n := int(binary.LittleEndian.Uint32(<-bufferChan)) % max - if (max > 0 && n < 0) || (max < 0 && n > 0) { - return -n - } - return n -} - -// B retrieves and returns random bytes of given length `n`. -func B(n int) []byte { - if n <= 0 { - return nil - } - i := 0 - b := make([]byte, n) - for { - copy(b[i:], <-bufferChan) - i += 4 - if i >= n { - break - } - } - return b -} - -// N returns a random int between min and max: [min, max]. -// The `min` and `max` also support negative numbers. -func N(min, max int) int { - if min >= max { - return min - } - if min >= 0 { - return Intn(max-min+1) + min - } - // As `Intn` dose not support negative number, - // so we should first shift the value to right, - // then call `Intn` to produce the random number, - // and finally shift the result back to left. - return Intn(max+(0-min)+1) - (0 - min) -} - -// S returns a random string which contains digits and letters, and its length is `n`. -// The optional parameter `symbols` specifies whether the result could contain symbols, -// which is false in default. -func S(n int, symbols ...bool) string { - if n <= 0 { - return "" - } - var ( - b = make([]byte, n) - numberBytes = B(n) - ) - for i := range b { - if len(symbols) > 0 && symbols[0] { - b[i] = characters[numberBytes[i]%94] - } else { - b[i] = characters[numberBytes[i]%62] - } - } - return string(b) -} - -// D returns a random time.Duration between min and max: [min, max]. -func D(min, max time.Duration) time.Duration { - multiple := int64(1) - if min != 0 { - for min%10 == 0 { - multiple *= 10 - min /= 10 - max /= 10 - } - } - n := int64(N(int(min), int(max))) - return time.Duration(n * multiple) -} - -// Str randomly picks and returns `n` count of chars from given string `s`. -// It also supports unicode string like Chinese/Russian/Japanese, etc. -func Str(s string, n int) string { - if n <= 0 { - return "" - } - var ( - b = make([]rune, n) - runes = []rune(s) - ) - if len(runes) <= 255 { - numberBytes := B(n) - for i := range b { - b[i] = runes[int(numberBytes[i])%len(runes)] - } - } else { - for i := range b { - b[i] = runes[Intn(len(runes))] - } - } - return string(b) -} - -// Digits returns a random string which contains only digits, and its length is `n`. -func Digits(n int) string { - if n <= 0 { - return "" - } - var ( - b = make([]byte, n) - numberBytes = B(n) - ) - for i := range b { - b[i] = digits[numberBytes[i]%10] - } - return string(b) -} - -// Letters returns a random string which contains only letters, and its length is `n`. -func Letters(n int) string { - if n <= 0 { - return "" - } - var ( - b = make([]byte, n) - numberBytes = B(n) - ) - for i := range b { - b[i] = letters[numberBytes[i]%52] - } - return string(b) -} - -// Symbols returns a random string which contains only symbols, and its length is `n`. -func Symbols(n int) string { - if n <= 0 { - return "" - } - var ( - b = make([]byte, n) - numberBytes = B(n) - ) - for i := range b { - b[i] = symbols[numberBytes[i]%32] - } - return string(b) -} - -// Perm returns, as a slice of n int numbers, a pseudo-random permutation of the integers [0,n). -// TODO performance improving for large slice producing. -func Perm(n int) []int { - m := make([]int, n) - for i := 0; i < n; i++ { - j := Intn(i + 1) - m[i] = m[j] - m[j] = i - } - return m -} - -// Meet randomly calculate whether the given probability `num`/`total` is met. -func Meet(num, total int) bool { - return Intn(total) < num -} - -// MeetProb randomly calculate whether the given probability is met. -func MeetProb(prob float32) bool { - return Intn(1e7) < int(prob*1e7) -} diff --git a/vendor/github.com/gogf/gf/v2/util/grand/grand_buffer.go b/vendor/github.com/gogf/gf/v2/util/grand/grand_buffer.go deleted file mode 100644 index 4527c25a..00000000 --- a/vendor/github.com/gogf/gf/v2/util/grand/grand_buffer.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package grand - -import ( - "crypto/rand" - - "github.com/gogf/gf/v2/errors/gcode" - "github.com/gogf/gf/v2/errors/gerror" -) - -const ( - // Buffer size for uint32 random number. - bufferChanSize = 10000 -) - -var ( - // bufferChan is the buffer for random bytes, - // every item storing 4 bytes. - bufferChan = make(chan []byte, bufferChanSize) -) - -func init() { - go asyncProducingRandomBufferBytesLoop() -} - -// asyncProducingRandomBufferBytes is a named goroutine, which uses an asynchronous goroutine -// to produce the random bytes, and a buffer chan to store the random bytes. -// So it has high performance to generate random numbers. -func asyncProducingRandomBufferBytesLoop() { - var step int - for { - buffer := make([]byte, 1024) - if n, err := rand.Read(buffer); err != nil { - panic(gerror.WrapCode(gcode.CodeInternalError, err, `error reading random buffer from system`)) - } else { - // The random buffer from system is very expensive, - // so fully reuse the random buffer by changing - // the step with a different number can - // improve the performance a lot. - // for _, step = range []int{4, 5, 6, 7} { - for _, step = range []int{4} { - for i := 0; i <= n-4; i += step { - bufferChan <- buffer[i : i+4] - } - } - } - } -} diff --git a/vendor/github.com/gogf/gf/v2/util/gtag/gtag.go b/vendor/github.com/gogf/gf/v2/util/gtag/gtag.go deleted file mode 100644 index 757f59df..00000000 --- a/vendor/github.com/gogf/gf/v2/util/gtag/gtag.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package gtag providing tag content storing for struct. -// -// Note that calling functions of this package is not concurrently safe, -// which means you cannot call them in runtime but in boot procedure. -package gtag - -const ( - Default = "default" // Default value tag of struct field for receiving parameters from HTTP request. - DefaultShort = "d" // Short name of Default. - Param = "param" // Parameter name for converting certain parameter to specified struct field. - ParamShort = "p" // Short name of Param. - Valid = "valid" // Validation rule tag for struct of field. - ValidShort = "v" // Short name of Valid. - NoValidation = "nv" // No validation for specified struct/field. - ORM = "orm" // ORM tag for ORM feature, which performs different features according scenarios. - Arg = "arg" // Arg tag for struct, usually for command argument option. - Brief = "brief" // Brief tag for struct, usually be considered as summary. - Root = "root" // Root tag for struct, usually for nested commands management. - Additional = "additional" // Additional tag for struct, usually for additional description of command. - AdditionalShort = "ad" // Short name of Additional. - Path = `path` // Route path for HTTP request. - Method = `method` // Route method for HTTP request. - Domain = `domain` // Route domain for HTTP request. - Mime = `mime` // MIME type for HTTP request/response. - Consumes = `consumes` // MIME type for HTTP request. - Summary = `summary` // Summary for struct, usually for OpenAPI in request struct. - SummaryShort = `sm` // Short name of Summary. - SummaryShort2 = `sum` // Short name of Summary. - Description = `description` // Description for struct, usually for OpenAPI in request struct. - DescriptionShort = `dc` // Short name of Description. - DescriptionShort2 = `des` // Short name of Description. - Example = `example` // Example for struct, usually for OpenAPI in request struct. - ExampleShort = `eg` // Short name of Example. - Examples = `examples` // Examples for struct, usually for OpenAPI in request struct. - ExamplesShort = `egs` // Short name of Examples. - ExternalDocs = `externalDocs` // External docs for struct, always for OpenAPI in request struct. - ExternalDocsShort = `ed` // Short name of ExternalDocs. - GConv = "gconv" // GConv defines the converting target name for specified struct field. - GConvShort = "c" // GConv defines the converting target name for specified struct field. - Json = "json" // Json tag is supported by stdlib. - Security = "security" // Security defines scheme for authentication. Detail to see https://swagger.io/docs/specification/authentication/ - In = "in" // Swagger distinguishes between the following parameter types based on the parameter location. Detail to see https://swagger.io/docs/specification/describing-parameters/ -) diff --git a/vendor/github.com/gogf/gf/v2/util/gtag/gtag_enums.go b/vendor/github.com/gogf/gf/v2/util/gtag/gtag_enums.go deleted file mode 100644 index 2c9d65c3..00000000 --- a/vendor/github.com/gogf/gf/v2/util/gtag/gtag_enums.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gtag - -import ( - "github.com/gogf/gf/v2/internal/json" -) - -var ( - // Type name => enums json. - enumsMap = make(map[string]json.RawMessage) -) - -// SetGlobalEnums sets the global enums into package. -// Note that this operation is not concurrent safety. -func SetGlobalEnums(enumsJson string) error { - return json.Unmarshal([]byte(enumsJson), &enumsMap) -} - -// GetGlobalEnums retrieves and returns the global enums. -func GetGlobalEnums() (string, error) { - enumsBytes, err := json.Marshal(enumsMap) - if err != nil { - return "", err - } - return string(enumsBytes), nil -} - -// GetEnumsByType retrieves and returns the stored enums json by type name. -// The type name is like: github.com/gogf/gf/v2/encoding/gjson.ContentType -func GetEnumsByType(typeName string) string { - return string(enumsMap[typeName]) -} diff --git a/vendor/github.com/gogf/gf/v2/util/gtag/gtag_func.go b/vendor/github.com/gogf/gf/v2/util/gtag/gtag_func.go deleted file mode 100644 index 5085c788..00000000 --- a/vendor/github.com/gogf/gf/v2/util/gtag/gtag_func.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gtag - -import ( - "regexp" - - "github.com/gogf/gf/v2/errors/gerror" -) - -var ( - data = make(map[string]string) - regex = regexp.MustCompile(`\{(.+?)\}`) -) - -// Set sets tag content for specified name. -// Note that it panics if `name` already exists. -func Set(name, value string) { - if _, ok := data[name]; ok { - panic(gerror.Newf(`value for tag name "%s" already exists`, name)) - } - data[name] = value -} - -// SetOver performs as Set, but it overwrites the old value if `name` already exists. -func SetOver(name, value string) { - data[name] = value -} - -// Sets sets multiple tag content by map. -func Sets(m map[string]string) { - for k, v := range m { - Set(k, v) - } -} - -// SetsOver performs as Sets, but it overwrites the old value if `name` already exists. -func SetsOver(m map[string]string) { - for k, v := range m { - SetOver(k, v) - } -} - -// Get retrieves and returns the stored tag content for specified name. -func Get(name string) string { - return data[name] -} - -// Parse parses and returns the content by replacing all tag name variable to -// its content for given `content`. -// Eg: -// gtag.Set("demo", "content") -// Parse(`This is {demo}`) -> `This is content`. -func Parse(content string) string { - return regex.ReplaceAllStringFunc(content, func(s string) string { - if v, ok := data[s[1:len(s)-1]]; ok { - return v - } - return s - }) -} diff --git a/vendor/github.com/gogf/gf/v2/util/gutil/gutil.go b/vendor/github.com/gogf/gf/v2/util/gutil/gutil.go deleted file mode 100644 index b24f1b69..00000000 --- a/vendor/github.com/gogf/gf/v2/util/gutil/gutil.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -// Package gutil provides utility functions. -package gutil - -import ( - "context" - "github.com/gogf/gf/v2/errors/gcode" - "reflect" - - "github.com/gogf/gf/v2/errors/gerror" - "github.com/gogf/gf/v2/internal/empty" - "github.com/gogf/gf/v2/util/gconv" -) - -const ( - dumpIndent = ` ` -) - -// Throw throws out an exception, which can be caught be TryCatch or recover. -func Throw(exception interface{}) { - panic(exception) -} - -// Try implements try... logistics using internal panic...recover. -// It returns error if any exception occurs, or else it returns nil. -func Try(ctx context.Context, try func(ctx context.Context)) (err error) { - defer func() { - if exception := recover(); exception != nil { - if v, ok := exception.(error); ok && gerror.HasStack(v) { - err = v - } else { - err = gerror.NewCodef(gcode.CodeInternalPanic, "%+v", exception) - } - } - }() - try(ctx) - return -} - -// TryCatch implements try...catch... logistics using internal panic...recover. -// It automatically calls function `catch` if any exception occurs and passes the exception as an error. -func TryCatch(ctx context.Context, try func(ctx context.Context), catch ...func(ctx context.Context, exception error)) { - defer func() { - if exception := recover(); exception != nil && len(catch) > 0 { - if v, ok := exception.(error); ok && gerror.HasStack(v) { - catch[0](ctx, v) - } else { - catch[0](ctx, gerror.NewCodef(gcode.CodeInternalPanic, "%+v", exception)) - } - } - }() - try(ctx) -} - -// IsEmpty checks given `value` empty or not. -// It returns false if `value` is: integer(0), bool(false), slice/map(len=0), nil; -// or else returns true. -func IsEmpty(value interface{}) bool { - return empty.IsEmpty(value) -} - -// Keys retrieves and returns the keys from given map or struct. -func Keys(mapOrStruct interface{}) (keysOrAttrs []string) { - keysOrAttrs = make([]string, 0) - if m, ok := mapOrStruct.(map[string]interface{}); ok { - for k := range m { - keysOrAttrs = append(keysOrAttrs, k) - } - return - } - var ( - reflectValue reflect.Value - reflectKind reflect.Kind - ) - if v, ok := mapOrStruct.(reflect.Value); ok { - reflectValue = v - } else { - reflectValue = reflect.ValueOf(mapOrStruct) - } - reflectKind = reflectValue.Kind() - for reflectKind == reflect.Ptr { - if !reflectValue.IsValid() || reflectValue.IsNil() { - reflectValue = reflect.New(reflectValue.Type().Elem()).Elem() - reflectKind = reflectValue.Kind() - } else { - reflectValue = reflectValue.Elem() - reflectKind = reflectValue.Kind() - } - } - switch reflectKind { - case reflect.Map: - for _, k := range reflectValue.MapKeys() { - keysOrAttrs = append(keysOrAttrs, gconv.String(k.Interface())) - } - case reflect.Struct: - var ( - fieldType reflect.StructField - reflectType = reflectValue.Type() - ) - for i := 0; i < reflectValue.NumField(); i++ { - fieldType = reflectType.Field(i) - if fieldType.Anonymous { - keysOrAttrs = append(keysOrAttrs, Keys(reflectValue.Field(i))...) - } else { - keysOrAttrs = append(keysOrAttrs, fieldType.Name) - } - } - } - return -} - -// Values retrieves and returns the values from given map or struct. -func Values(mapOrStruct interface{}) (values []interface{}) { - values = make([]interface{}, 0) - if m, ok := mapOrStruct.(map[string]interface{}); ok { - for _, v := range m { - values = append(values, v) - } - return - } - var ( - reflectValue reflect.Value - reflectKind reflect.Kind - ) - if v, ok := mapOrStruct.(reflect.Value); ok { - reflectValue = v - } else { - reflectValue = reflect.ValueOf(mapOrStruct) - } - reflectKind = reflectValue.Kind() - for reflectKind == reflect.Ptr { - reflectValue = reflectValue.Elem() - reflectKind = reflectValue.Kind() - } - switch reflectKind { - case reflect.Map: - for _, k := range reflectValue.MapKeys() { - values = append(values, reflectValue.MapIndex(k).Interface()) - } - case reflect.Struct: - var ( - fieldType reflect.StructField - reflectType = reflectValue.Type() - ) - for i := 0; i < reflectValue.NumField(); i++ { - fieldType = reflectType.Field(i) - if fieldType.Anonymous { - values = append(values, Values(reflectValue.Field(i))...) - } else { - values = append(values, reflectValue.Field(i).Interface()) - } - } - } - return -} diff --git a/vendor/github.com/gogf/gf/v2/util/gutil/gutil_comparator.go b/vendor/github.com/gogf/gf/v2/util/gutil/gutil_comparator.go deleted file mode 100644 index c454cf92..00000000 --- a/vendor/github.com/gogf/gf/v2/util/gutil/gutil_comparator.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gutil - -import ( - "strings" - - "github.com/gogf/gf/v2/util/gconv" -) - -// Comparator is a function that compare a and b, and returns the result as int. -// -// Should return a number: -// -// negative , if a < b -// zero , if a == b -// positive , if a > b -type Comparator func(a, b interface{}) int - -// ComparatorString provides a fast comparison on strings. -func ComparatorString(a, b interface{}) int { - return strings.Compare(gconv.String(a), gconv.String(b)) -} - -// ComparatorInt provides a basic comparison on int. -func ComparatorInt(a, b interface{}) int { - return gconv.Int(a) - gconv.Int(b) -} - -// ComparatorInt8 provides a basic comparison on int8. -func ComparatorInt8(a, b interface{}) int { - return int(gconv.Int8(a) - gconv.Int8(b)) -} - -// ComparatorInt16 provides a basic comparison on int16. -func ComparatorInt16(a, b interface{}) int { - return int(gconv.Int16(a) - gconv.Int16(b)) -} - -// ComparatorInt32 provides a basic comparison on int32. -func ComparatorInt32(a, b interface{}) int { - return int(gconv.Int32(a) - gconv.Int32(b)) -} - -// ComparatorInt64 provides a basic comparison on int64. -func ComparatorInt64(a, b interface{}) int { - return int(gconv.Int64(a) - gconv.Int64(b)) -} - -// ComparatorUint provides a basic comparison on uint. -func ComparatorUint(a, b interface{}) int { - return int(gconv.Uint(a) - gconv.Uint(b)) -} - -// ComparatorUint8 provides a basic comparison on uint8. -func ComparatorUint8(a, b interface{}) int { - return int(gconv.Uint8(a) - gconv.Uint8(b)) -} - -// ComparatorUint16 provides a basic comparison on uint16. -func ComparatorUint16(a, b interface{}) int { - return int(gconv.Uint16(a) - gconv.Uint16(b)) -} - -// ComparatorUint32 provides a basic comparison on uint32. -func ComparatorUint32(a, b interface{}) int { - return int(gconv.Uint32(a) - gconv.Uint32(b)) -} - -// ComparatorUint64 provides a basic comparison on uint64. -func ComparatorUint64(a, b interface{}) int { - return int(gconv.Uint64(a) - gconv.Uint64(b)) -} - -// ComparatorFloat32 provides a basic comparison on float32. -func ComparatorFloat32(a, b interface{}) int { - aFloat := gconv.Float32(a) - bFloat := gconv.Float32(b) - if aFloat == bFloat { - return 0 - } - if aFloat > bFloat { - return 1 - } - return -1 -} - -// ComparatorFloat64 provides a basic comparison on float64. -func ComparatorFloat64(a, b interface{}) int { - aFloat := gconv.Float64(a) - bFloat := gconv.Float64(b) - if aFloat == bFloat { - return 0 - } - if aFloat > bFloat { - return 1 - } - return -1 -} - -// ComparatorByte provides a basic comparison on byte. -func ComparatorByte(a, b interface{}) int { - return int(gconv.Byte(a) - gconv.Byte(b)) -} - -// ComparatorRune provides a basic comparison on rune. -func ComparatorRune(a, b interface{}) int { - return int(gconv.Rune(a) - gconv.Rune(b)) -} - -// ComparatorTime provides a basic comparison on time.Time. -func ComparatorTime(a, b interface{}) int { - aTime := gconv.Time(a) - bTime := gconv.Time(b) - switch { - case aTime.After(bTime): - return 1 - case aTime.Before(bTime): - return -1 - default: - return 0 - } -} diff --git a/vendor/github.com/gogf/gf/v2/util/gutil/gutil_copy.go b/vendor/github.com/gogf/gf/v2/util/gutil/gutil_copy.go deleted file mode 100644 index 24398b38..00000000 --- a/vendor/github.com/gogf/gf/v2/util/gutil/gutil_copy.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gutil - -import ( - "github.com/gogf/gf/v2/internal/deepcopy" -) - -// Copy returns a deep copy of v. -// -// Copy is unable to copy unexported fields in a struct (lowercase field names). -// Unexported fields can't be reflected by the Go runtime and therefore -// they can't perform any data copies. -func Copy(src interface{}) (dst interface{}) { - return deepcopy.Copy(src) -} diff --git a/vendor/github.com/gogf/gf/v2/util/gutil/gutil_default.go b/vendor/github.com/gogf/gf/v2/util/gutil/gutil_default.go deleted file mode 100644 index 881b0374..00000000 --- a/vendor/github.com/gogf/gf/v2/util/gutil/gutil_default.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gutil - -// GetOrDefaultStr checks and returns value according whether parameter `param` available. -// It returns `param[0]` if it is available, or else it returns `def`. -func GetOrDefaultStr(def string, param ...string) string { - value := def - if len(param) > 0 && param[0] != "" { - value = param[0] - } - return value -} - -// GetOrDefaultAny checks and returns value according whether parameter `param` available. -// It returns `param[0]` if it is available, or else it returns `def`. -func GetOrDefaultAny(def interface{}, param ...interface{}) interface{} { - value := def - if len(param) > 0 && param[0] != "" { - value = param[0] - } - return value -} diff --git a/vendor/github.com/gogf/gf/v2/util/gutil/gutil_dump.go b/vendor/github.com/gogf/gf/v2/util/gutil/gutil_dump.go deleted file mode 100644 index e25509a4..00000000 --- a/vendor/github.com/gogf/gf/v2/util/gutil/gutil_dump.go +++ /dev/null @@ -1,484 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gutil - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "reflect" - "strings" - - "github.com/gogf/gf/v2/internal/reflection" - "github.com/gogf/gf/v2/os/gstructs" - "github.com/gogf/gf/v2/text/gstr" -) - -// iString is used for type assert api for String(). -type iString interface { - String() string -} - -// iError is used for type assert api for Error(). -type iError interface { - Error() string -} - -// iMarshalJSON is the interface for custom Json marshaling. -type iMarshalJSON interface { - MarshalJSON() ([]byte, error) -} - -// DumpOption specifies the behavior of function Export. -type DumpOption struct { - WithType bool // WithType specifies dumping content with type information. - ExportedOnly bool // Only dump Exported fields for structs. -} - -// Dump prints variables `values` to stdout with more manually readable. -func Dump(values ...interface{}) { - for _, value := range values { - DumpWithOption(value, DumpOption{ - WithType: false, - ExportedOnly: false, - }) - } -} - -// DumpWithType acts like Dump, but with type information. -// Also see Dump. -func DumpWithType(values ...interface{}) { - for _, value := range values { - DumpWithOption(value, DumpOption{ - WithType: true, - ExportedOnly: false, - }) - } -} - -// DumpWithOption returns variables `values` as a string with more manually readable. -func DumpWithOption(value interface{}, option DumpOption) { - buffer := bytes.NewBuffer(nil) - DumpTo(buffer, value, DumpOption{ - WithType: option.WithType, - ExportedOnly: option.ExportedOnly, - }) - fmt.Println(buffer.String()) -} - -// DumpTo writes variables `values` as a string in to `writer` with more manually readable -func DumpTo(writer io.Writer, value interface{}, option DumpOption) { - buffer := bytes.NewBuffer(nil) - doDump(value, "", buffer, doDumpOption{ - WithType: option.WithType, - ExportedOnly: option.ExportedOnly, - }) - _, _ = writer.Write(buffer.Bytes()) -} - -type doDumpOption struct { - WithType bool - ExportedOnly bool - DumpedPointerSet map[string]struct{} -} - -func doDump(value interface{}, indent string, buffer *bytes.Buffer, option doDumpOption) { - if option.DumpedPointerSet == nil { - option.DumpedPointerSet = map[string]struct{}{} - } - - if value == nil { - buffer.WriteString(``) - return - } - var reflectValue reflect.Value - if v, ok := value.(reflect.Value); ok { - reflectValue = v - if v.IsValid() && v.CanInterface() { - value = v.Interface() - } else { - if convertedValue, ok := reflection.ValueToInterface(v); ok { - value = convertedValue - } - } - } else { - reflectValue = reflect.ValueOf(value) - } - var reflectKind = reflectValue.Kind() - // Double check nil value. - if value == nil || reflectKind == reflect.Invalid { - buffer.WriteString(``) - return - } - var ( - reflectTypeName = reflectValue.Type().String() - ptrAddress string - newIndent = indent + dumpIndent - ) - reflectTypeName = strings.ReplaceAll(reflectTypeName, `[]uint8`, `[]byte`) - for reflectKind == reflect.Ptr { - if ptrAddress == "" { - ptrAddress = fmt.Sprintf(`0x%x`, reflectValue.Pointer()) - } - reflectValue = reflectValue.Elem() - reflectKind = reflectValue.Kind() - } - var ( - exportInternalInput = doDumpInternalInput{ - Value: value, - Indent: indent, - NewIndent: newIndent, - Buffer: buffer, - Option: option, - PtrAddress: ptrAddress, - ReflectValue: reflectValue, - ReflectTypeName: reflectTypeName, - ExportedOnly: option.ExportedOnly, - DumpedPointerSet: option.DumpedPointerSet, - } - ) - switch reflectKind { - case reflect.Slice, reflect.Array: - doDumpSlice(exportInternalInput) - - case reflect.Map: - doDumpMap(exportInternalInput) - - case reflect.Struct: - doDumpStruct(exportInternalInput) - - case reflect.String: - doDumpString(exportInternalInput) - - case reflect.Bool: - doDumpBool(exportInternalInput) - - case - reflect.Int, - reflect.Int8, - reflect.Int16, - reflect.Int32, - reflect.Int64, - reflect.Uint, - reflect.Uint8, - reflect.Uint16, - reflect.Uint32, - reflect.Uint64, - reflect.Float32, - reflect.Float64, - reflect.Complex64, - reflect.Complex128: - doDumpNumber(exportInternalInput) - - case reflect.Chan: - buffer.WriteString(fmt.Sprintf(`<%s>`, reflectValue.Type().String())) - - case reflect.Func: - if reflectValue.IsNil() || !reflectValue.IsValid() { - buffer.WriteString(``) - } else { - buffer.WriteString(fmt.Sprintf(`<%s>`, reflectValue.Type().String())) - } - - case reflect.Interface: - doDump(exportInternalInput.ReflectValue.Elem(), indent, buffer, option) - - default: - doDumpDefault(exportInternalInput) - } -} - -type doDumpInternalInput struct { - Value interface{} - Indent string - NewIndent string - Buffer *bytes.Buffer - Option doDumpOption - ReflectValue reflect.Value - ReflectTypeName string - PtrAddress string - ExportedOnly bool - DumpedPointerSet map[string]struct{} -} - -func doDumpSlice(in doDumpInternalInput) { - if b, ok := in.Value.([]byte); ok { - if !in.Option.WithType { - in.Buffer.WriteString(fmt.Sprintf(`"%s"`, addSlashesForString(string(b)))) - } else { - in.Buffer.WriteString(fmt.Sprintf( - `%s(%d) "%s"`, - in.ReflectTypeName, - len(string(b)), - string(b), - )) - } - return - } - if in.ReflectValue.Len() == 0 { - if !in.Option.WithType { - in.Buffer.WriteString("[]") - } else { - in.Buffer.WriteString(fmt.Sprintf("%s(0) []", in.ReflectTypeName)) - } - return - } - if !in.Option.WithType { - in.Buffer.WriteString("[\n") - } else { - in.Buffer.WriteString(fmt.Sprintf("%s(%d) [\n", in.ReflectTypeName, in.ReflectValue.Len())) - } - for i := 0; i < in.ReflectValue.Len(); i++ { - in.Buffer.WriteString(in.NewIndent) - doDump(in.ReflectValue.Index(i), in.NewIndent, in.Buffer, in.Option) - in.Buffer.WriteString(",\n") - } - in.Buffer.WriteString(fmt.Sprintf("%s]", in.Indent)) -} - -func doDumpMap(in doDumpInternalInput) { - var mapKeys = make([]reflect.Value, 0) - for _, key := range in.ReflectValue.MapKeys() { - if !key.CanInterface() { - continue - } - mapKey := key - mapKeys = append(mapKeys, mapKey) - } - if len(mapKeys) == 0 { - if !in.Option.WithType { - in.Buffer.WriteString("{}") - } else { - in.Buffer.WriteString(fmt.Sprintf("%s(0) {}", in.ReflectTypeName)) - } - return - } - var ( - maxSpaceNum = 0 - tmpSpaceNum = 0 - mapKeyStr = "" - ) - for _, key := range mapKeys { - tmpSpaceNum = len(fmt.Sprintf(`%v`, key.Interface())) - if tmpSpaceNum > maxSpaceNum { - maxSpaceNum = tmpSpaceNum - } - } - if !in.Option.WithType { - in.Buffer.WriteString("{\n") - } else { - in.Buffer.WriteString(fmt.Sprintf("%s(%d) {\n", in.ReflectTypeName, len(mapKeys))) - } - for _, mapKey := range mapKeys { - tmpSpaceNum = len(fmt.Sprintf(`%v`, mapKey.Interface())) - if mapKey.Kind() == reflect.String { - mapKeyStr = fmt.Sprintf(`"%v"`, mapKey.Interface()) - } else { - mapKeyStr = fmt.Sprintf(`%v`, mapKey.Interface()) - } - // Map key and indent string dump. - if !in.Option.WithType { - in.Buffer.WriteString(fmt.Sprintf( - "%s%v:%s", - in.NewIndent, - mapKeyStr, - strings.Repeat(" ", maxSpaceNum-tmpSpaceNum+1), - )) - } else { - in.Buffer.WriteString(fmt.Sprintf( - "%s%s(%v):%s", - in.NewIndent, - mapKey.Type().String(), - mapKeyStr, - strings.Repeat(" ", maxSpaceNum-tmpSpaceNum+1), - )) - } - // Map value dump. - doDump(in.ReflectValue.MapIndex(mapKey), in.NewIndent, in.Buffer, in.Option) - in.Buffer.WriteString(",\n") - } - in.Buffer.WriteString(fmt.Sprintf("%s}", in.Indent)) -} - -func doDumpStruct(in doDumpInternalInput) { - if in.PtrAddress != "" { - if _, ok := in.DumpedPointerSet[in.PtrAddress]; ok { - in.Buffer.WriteString(fmt.Sprintf(``, in.PtrAddress)) - return - } - } - in.DumpedPointerSet[in.PtrAddress] = struct{}{} - - structFields, _ := gstructs.Fields(gstructs.FieldsInput{ - Pointer: in.Value, - RecursiveOption: gstructs.RecursiveOptionEmbedded, - }) - var ( - hasNoExportedFields = true - _, isReflectValue = in.Value.(reflect.Value) - ) - for _, field := range structFields { - if field.IsExported() { - hasNoExportedFields = false - break - } - } - if !isReflectValue && (len(structFields) == 0 || hasNoExportedFields) { - var ( - structContentStr = "" - attributeCountStr = "0" - ) - if v, ok := in.Value.(iString); ok { - structContentStr = v.String() - } else if v, ok := in.Value.(iError); ok { - structContentStr = v.Error() - } else if v, ok := in.Value.(iMarshalJSON); ok { - b, _ := v.MarshalJSON() - structContentStr = string(b) - } else { - // Has no common interface implements. - if len(structFields) != 0 { - goto dumpStructFields - } - } - if structContentStr == "" { - structContentStr = "{}" - } else { - structContentStr = fmt.Sprintf(`"%s"`, addSlashesForString(structContentStr)) - attributeCountStr = fmt.Sprintf(`%d`, len(structContentStr)-2) - } - if !in.Option.WithType { - in.Buffer.WriteString(structContentStr) - } else { - in.Buffer.WriteString(fmt.Sprintf( - "%s(%s) %s", - in.ReflectTypeName, - attributeCountStr, - structContentStr, - )) - } - return - } - -dumpStructFields: - var ( - maxSpaceNum = 0 - tmpSpaceNum = 0 - ) - for _, field := range structFields { - if in.ExportedOnly && !field.IsExported() { - continue - } - tmpSpaceNum = len(field.Name()) - if tmpSpaceNum > maxSpaceNum { - maxSpaceNum = tmpSpaceNum - } - } - if !in.Option.WithType { - in.Buffer.WriteString("{\n") - } else { - in.Buffer.WriteString(fmt.Sprintf("%s(%d) {\n", in.ReflectTypeName, len(structFields))) - } - for _, field := range structFields { - if in.ExportedOnly && !field.IsExported() { - continue - } - tmpSpaceNum = len(fmt.Sprintf(`%v`, field.Name())) - in.Buffer.WriteString(fmt.Sprintf( - "%s%s:%s", - in.NewIndent, - field.Name(), - strings.Repeat(" ", maxSpaceNum-tmpSpaceNum+1), - )) - doDump(field.Value, in.NewIndent, in.Buffer, in.Option) - in.Buffer.WriteString(",\n") - } - in.Buffer.WriteString(fmt.Sprintf("%s}", in.Indent)) -} - -func doDumpNumber(in doDumpInternalInput) { - if v, ok := in.Value.(iString); ok { - s := v.String() - if !in.Option.WithType { - in.Buffer.WriteString(fmt.Sprintf(`"%v"`, addSlashesForString(s))) - } else { - in.Buffer.WriteString(fmt.Sprintf( - `%s(%d) "%v"`, - in.ReflectTypeName, - len(s), - addSlashesForString(s), - )) - } - } else { - doDumpDefault(in) - } -} - -func doDumpString(in doDumpInternalInput) { - s := in.ReflectValue.String() - if !in.Option.WithType { - in.Buffer.WriteString(fmt.Sprintf(`"%v"`, addSlashesForString(s))) - } else { - in.Buffer.WriteString(fmt.Sprintf( - `%s(%d) "%v"`, - in.ReflectTypeName, - len(s), - addSlashesForString(s), - )) - } -} - -func doDumpBool(in doDumpInternalInput) { - var s string - if in.ReflectValue.Bool() { - s = `true` - } else { - s = `false` - } - if in.Option.WithType { - s = fmt.Sprintf(`bool(%s)`, s) - } - in.Buffer.WriteString(s) -} - -func doDumpDefault(in doDumpInternalInput) { - var s string - if in.ReflectValue.IsValid() && in.ReflectValue.CanInterface() { - s = fmt.Sprintf("%v", in.ReflectValue.Interface()) - } - if s == "" { - s = fmt.Sprintf("%v", in.Value) - } - s = gstr.Trim(s, `<>`) - if !in.Option.WithType { - in.Buffer.WriteString(s) - } else { - in.Buffer.WriteString(fmt.Sprintf("%s(%s)", in.ReflectTypeName, s)) - } -} - -func addSlashesForString(s string) string { - return gstr.ReplaceByMap(s, map[string]string{ - `"`: `\"`, - "\r": `\r`, - "\t": `\t`, - "\n": `\n`, - }) -} - -// DumpJson pretty dumps json content to stdout. -func DumpJson(jsonContent string) { - var ( - buffer = bytes.NewBuffer(nil) - jsonBytes = []byte(jsonContent) - ) - if err := json.Indent(buffer, jsonBytes, "", "\t"); err != nil { - fmt.Println(err.Error()) - } - fmt.Println(buffer.String()) -} diff --git a/vendor/github.com/gogf/gf/v2/util/gutil/gutil_list.go b/vendor/github.com/gogf/gf/v2/util/gutil/gutil_list.go deleted file mode 100644 index 9a60a318..00000000 --- a/vendor/github.com/gogf/gf/v2/util/gutil/gutil_list.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gutil - -import ( - "reflect" - - "github.com/gogf/gf/v2/internal/utils" -) - -// ListItemValues retrieves and returns the elements of all item struct/map with key `key`. -// Note that the parameter `list` should be type of slice which contains elements of map or struct, -// or else it returns an empty slice. -// -// The parameter `list` supports types like: -// []map[string]interface{} -// []map[string]sub-map -// []struct -// []struct:sub-struct -// Note that the sub-map/sub-struct makes sense only if the optional parameter `subKey` is given. -func ListItemValues(list interface{}, key interface{}, subKey ...interface{}) (values []interface{}) { - var reflectValue reflect.Value - if v, ok := list.(reflect.Value); ok { - reflectValue = v - } else { - reflectValue = reflect.ValueOf(list) - } - reflectKind := reflectValue.Kind() - for reflectKind == reflect.Ptr { - reflectValue = reflectValue.Elem() - reflectKind = reflectValue.Kind() - } - switch reflectKind { - case reflect.Slice, reflect.Array: - if reflectValue.Len() == 0 { - return - } - values = []interface{}{} - for i := 0; i < reflectValue.Len(); i++ { - if value, ok := ItemValue(reflectValue.Index(i), key); ok { - if len(subKey) > 0 && subKey[0] != nil { - if subValue, ok := ItemValue(value, subKey[0]); ok { - value = subValue - } else { - continue - } - } - if array, ok := value.([]interface{}); ok { - values = append(values, array...) - } else { - values = append(values, value) - } - } - } - } - return -} - -// ItemValue retrieves and returns its value of which name/attribute specified by `key`. -// The parameter `item` can be type of map/*map/struct/*struct. -func ItemValue(item interface{}, key interface{}) (value interface{}, found bool) { - var reflectValue reflect.Value - if v, ok := item.(reflect.Value); ok { - reflectValue = v - } else { - reflectValue = reflect.ValueOf(item) - } - reflectKind := reflectValue.Kind() - if reflectKind == reflect.Interface { - reflectValue = reflectValue.Elem() - reflectKind = reflectValue.Kind() - } - for reflectKind == reflect.Ptr { - reflectValue = reflectValue.Elem() - reflectKind = reflectValue.Kind() - } - var keyValue reflect.Value - if v, ok := key.(reflect.Value); ok { - keyValue = v - } else { - keyValue = reflect.ValueOf(key) - } - switch reflectKind { - case reflect.Array, reflect.Slice: - // The `key` must be type of string. - values := ListItemValues(reflectValue, keyValue.String()) - if values == nil { - return nil, false - } - return values, true - - case reflect.Map: - v := reflectValue.MapIndex(keyValue) - if v.IsValid() { - found = true - value = v.Interface() - } - - case reflect.Struct: - // The `mapKey` must be type of string. - v := reflectValue.FieldByName(keyValue.String()) - if v.IsValid() { - found = true - value = v.Interface() - } - } - return -} - -// ListItemValuesUnique retrieves and returns the unique elements of all struct/map with key `key`. -// Note that the parameter `list` should be type of slice which contains elements of map or struct, -// or else it returns an empty slice. -func ListItemValuesUnique(list interface{}, key string, subKey ...interface{}) []interface{} { - values := ListItemValues(list, key, subKey...) - if len(values) > 0 { - var ( - ok bool - m = make(map[interface{}]struct{}, len(values)) - ) - for i := 0; i < len(values); { - if _, ok = m[values[i]]; ok { - values = SliceDelete(values, i) - } else { - m[values[i]] = struct{}{} - i++ - } - } - } - return values -} - -// ListToMapByKey converts `list` to a map[string]interface{} of which key is specified by `key`. -// Note that the item value may be type of slice. -func ListToMapByKey(list []map[string]interface{}, key string) map[string]interface{} { - return utils.ListToMapByKey(list, key) -} diff --git a/vendor/github.com/gogf/gf/v2/util/gutil/gutil_map.go b/vendor/github.com/gogf/gf/v2/util/gutil/gutil_map.go deleted file mode 100644 index 92e47043..00000000 --- a/vendor/github.com/gogf/gf/v2/util/gutil/gutil_map.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gutil - -import ( - "reflect" - - "github.com/gogf/gf/v2/internal/utils" -) - -// MapCopy does a shallow copy from map `data` to `copy` for most commonly used map type -// map[string]interface{}. -func MapCopy(data map[string]interface{}) (copy map[string]interface{}) { - copy = make(map[string]interface{}, len(data)) - for k, v := range data { - copy[k] = v - } - return -} - -// MapContains checks whether map `data` contains `key`. -func MapContains(data map[string]interface{}, key string) (ok bool) { - if len(data) == 0 { - return - } - _, ok = data[key] - return -} - -// MapDelete deletes all `keys` from map `data`. -func MapDelete(data map[string]interface{}, keys ...string) { - if len(data) == 0 { - return - } - for _, key := range keys { - delete(data, key) - } -} - -// MapMerge merges all map from `src` to map `dst`. -func MapMerge(dst map[string]interface{}, src ...map[string]interface{}) { - if dst == nil { - return - } - for _, m := range src { - for k, v := range m { - dst[k] = v - } - } -} - -// MapMergeCopy creates and returns a new map which merges all map from `src`. -func MapMergeCopy(src ...map[string]interface{}) (copy map[string]interface{}) { - copy = make(map[string]interface{}) - for _, m := range src { - for k, v := range m { - copy[k] = v - } - } - return -} - -// MapPossibleItemByKey tries to find the possible key-value pair for given key ignoring cases and symbols. -// -// Note that this function might be of low performance. -func MapPossibleItemByKey(data map[string]interface{}, key string) (foundKey string, foundValue interface{}) { - return utils.MapPossibleItemByKey(data, key) -} - -// MapContainsPossibleKey checks if the given `key` is contained in given map `data`. -// It checks the key ignoring cases and symbols. -// -// Note that this function might be of low performance. -func MapContainsPossibleKey(data map[string]interface{}, key string) bool { - return utils.MapContainsPossibleKey(data, key) -} - -// MapOmitEmpty deletes all empty values from given map. -func MapOmitEmpty(data map[string]interface{}) { - if len(data) == 0 { - return - } - for k, v := range data { - if IsEmpty(v) { - delete(data, k) - } - } -} - -// MapToSlice converts map to slice of which all keys and values are its items. -// Eg: {"K1": "v1", "K2": "v2"} => ["K1", "v1", "K2", "v2"] -func MapToSlice(data interface{}) []interface{} { - var ( - reflectValue = reflect.ValueOf(data) - reflectKind = reflectValue.Kind() - ) - for reflectKind == reflect.Ptr { - reflectValue = reflectValue.Elem() - reflectKind = reflectValue.Kind() - } - switch reflectKind { - case reflect.Map: - array := make([]interface{}, 0) - for _, key := range reflectValue.MapKeys() { - array = append(array, key.Interface()) - array = append(array, reflectValue.MapIndex(key).Interface()) - } - return array - } - return nil -} diff --git a/vendor/github.com/gogf/gf/v2/util/gutil/gutil_reflect.go b/vendor/github.com/gogf/gf/v2/util/gutil/gutil_reflect.go deleted file mode 100644 index 87fdb781..00000000 --- a/vendor/github.com/gogf/gf/v2/util/gutil/gutil_reflect.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gutil - -import ( - "github.com/gogf/gf/v2/internal/reflection" -) - -type ( - OriginValueAndKindOutput = reflection.OriginValueAndKindOutput - OriginTypeAndKindOutput = reflection.OriginTypeAndKindOutput -) - -// OriginValueAndKind retrieves and returns the original reflect value and kind. -func OriginValueAndKind(value interface{}) (out OriginValueAndKindOutput) { - return reflection.OriginValueAndKind(value) -} - -// OriginTypeAndKind retrieves and returns the original reflect type and kind. -func OriginTypeAndKind(value interface{}) (out OriginTypeAndKindOutput) { - return reflection.OriginTypeAndKind(value) -} diff --git a/vendor/github.com/gogf/gf/v2/util/gutil/gutil_slice.go b/vendor/github.com/gogf/gf/v2/util/gutil/gutil_slice.go deleted file mode 100644 index fa8d71e5..00000000 --- a/vendor/github.com/gogf/gf/v2/util/gutil/gutil_slice.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gutil - -import ( - "reflect" - - "github.com/gogf/gf/v2/util/gconv" -) - -// SliceCopy does a shallow copy of slice `data` for most commonly used slice type -// []interface{}. -func SliceCopy(slice []interface{}) []interface{} { - newSlice := make([]interface{}, len(slice)) - copy(newSlice, slice) - return newSlice -} - -// SliceInsertBefore inserts the `values` to the front of `index` and returns a new slice. -func SliceInsertBefore(slice []interface{}, index int, values ...interface{}) (newSlice []interface{}) { - if index < 0 || index >= len(slice) { - return slice - } - newSlice = make([]interface{}, len(slice)+len(values)) - copy(newSlice, slice[0:index]) - copy(newSlice[index:], values) - copy(newSlice[index+len(values):], slice[index:]) - return -} - -// SliceInsertAfter inserts the `values` to the back of `index` and returns a new slice. -func SliceInsertAfter(slice []interface{}, index int, values ...interface{}) (newSlice []interface{}) { - if index < 0 || index >= len(slice) { - return slice - } - newSlice = make([]interface{}, len(slice)+len(values)) - copy(newSlice, slice[0:index+1]) - copy(newSlice[index+1:], values) - copy(newSlice[index+1+len(values):], slice[index+1:]) - return -} - -// SliceDelete deletes an element at `index` and returns the new slice. -// It does nothing if the given `index` is invalid. -func SliceDelete(slice []interface{}, index int) (newSlice []interface{}) { - if index < 0 || index >= len(slice) { - return slice - } - // Determine array boundaries when deleting to improve deletion efficiency. - if index == 0 { - return slice[1:] - } else if index == len(slice)-1 { - return slice[:index] - } - // If it is a non-boundary delete, - // it will involve the creation of an array, - // then the deletion is less efficient. - return append(slice[:index], slice[index+1:]...) -} - -// SliceToMap converts slice type variable `slice` to `map[string]interface{}`. -// Note that if the length of `slice` is not an even number, it returns nil. -// Eg: -// ["K1", "v1", "K2", "v2"] => {"K1": "v1", "K2": "v2"} -// ["K1", "v1", "K2"] => nil -func SliceToMap(slice interface{}) map[string]interface{} { - var ( - reflectValue = reflect.ValueOf(slice) - reflectKind = reflectValue.Kind() - ) - for reflectKind == reflect.Ptr { - reflectValue = reflectValue.Elem() - reflectKind = reflectValue.Kind() - } - switch reflectKind { - case reflect.Slice, reflect.Array: - length := reflectValue.Len() - if length%2 != 0 { - return nil - } - data := make(map[string]interface{}) - for i := 0; i < reflectValue.Len(); i += 2 { - data[gconv.String(reflectValue.Index(i).Interface())] = reflectValue.Index(i + 1).Interface() - } - return data - } - return nil -} - -// SliceToMapWithColumnAsKey converts slice type variable `slice` to `map[interface{}]interface{}` -// The value of specified column use as the key for returned map. -// Eg: -// SliceToMapWithColumnAsKey([{"K1": "v1", "K2": 1}, {"K1": "v2", "K2": 2}], "K1") => {"v1": {"K1": "v1", "K2": 1}, "v2": {"K1": "v2", "K2": 2}} -// SliceToMapWithColumnAsKey([{"K1": "v1", "K2": 1}, {"K1": "v2", "K2": 2}], "K2") => {1: {"K1": "v1", "K2": 1}, 2: {"K1": "v2", "K2": 2}} -func SliceToMapWithColumnAsKey(slice interface{}, key interface{}) map[interface{}]interface{} { - var ( - reflectValue = reflect.ValueOf(slice) - reflectKind = reflectValue.Kind() - ) - for reflectKind == reflect.Ptr { - reflectValue = reflectValue.Elem() - reflectKind = reflectValue.Kind() - } - data := make(map[interface{}]interface{}) - switch reflectKind { - case reflect.Slice, reflect.Array: - for i := 0; i < reflectValue.Len(); i++ { - if k, ok := ItemValue(reflectValue.Index(i), key); ok { - data[k] = reflectValue.Index(i).Interface() - } - } - } - return data -} diff --git a/vendor/github.com/gogf/gf/v2/util/gutil/gutil_struct.go b/vendor/github.com/gogf/gf/v2/util/gutil/gutil_struct.go deleted file mode 100644 index bd856fc4..00000000 --- a/vendor/github.com/gogf/gf/v2/util/gutil/gutil_struct.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. -// -// This Source Code Form is subject to the terms of the MIT License. -// If a copy of the MIT was not distributed with this file, -// You can obtain one at https://github.com/gogf/gf. - -package gutil - -import ( - "reflect" - - "github.com/gogf/gf/v2/util/gconv" -) - -// StructToSlice converts struct to slice of which all keys and values are its items. -// Eg: {"K1": "v1", "K2": "v2"} => ["K1", "v1", "K2", "v2"] -func StructToSlice(data interface{}) []interface{} { - var ( - reflectValue = reflect.ValueOf(data) - reflectKind = reflectValue.Kind() - ) - for reflectKind == reflect.Ptr { - reflectValue = reflectValue.Elem() - reflectKind = reflectValue.Kind() - } - switch reflectKind { - case reflect.Struct: - array := make([]interface{}, 0) - // Note that, it uses the gconv tag name instead of the attribute name if - // the gconv tag is fined in the struct attributes. - for k, v := range gconv.Map(reflectValue) { - array = append(array, k) - array = append(array, v) - } - return array - } - return nil -} diff --git a/vendor/github.com/lib/pq/.gitignore b/vendor/github.com/lib/pq/.gitignore deleted file mode 100644 index 3243952a..00000000 --- a/vendor/github.com/lib/pq/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -.db -*.test -*~ -*.swp -.idea -.vscode \ No newline at end of file diff --git a/vendor/github.com/lib/pq/LICENSE.md b/vendor/github.com/lib/pq/LICENSE.md deleted file mode 100644 index 5773904a..00000000 --- a/vendor/github.com/lib/pq/LICENSE.md +++ /dev/null @@ -1,8 +0,0 @@ -Copyright (c) 2011-2013, 'pq' Contributors -Portions Copyright (C) 2011 Blake Mizerany - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/lib/pq/README.md b/vendor/github.com/lib/pq/README.md deleted file mode 100644 index 126ee5d3..00000000 --- a/vendor/github.com/lib/pq/README.md +++ /dev/null @@ -1,36 +0,0 @@ -# pq - A pure Go postgres driver for Go's database/sql package - -[![GoDoc](https://godoc.org/github.com/lib/pq?status.svg)](https://pkg.go.dev/github.com/lib/pq?tab=doc) - -## Install - - go get github.com/lib/pq - -## Features - -* SSL -* Handles bad connections for `database/sql` -* Scan `time.Time` correctly (i.e. `timestamp[tz]`, `time[tz]`, `date`) -* Scan binary blobs correctly (i.e. `bytea`) -* Package for `hstore` support -* COPY FROM support -* pq.ParseURL for converting urls to connection strings for sql.Open. -* Many libpq compatible environment variables -* Unix socket support -* Notifications: `LISTEN`/`NOTIFY` -* pgpass support -* GSS (Kerberos) auth - -## Tests - -`go test` is used for testing. See [TESTS.md](TESTS.md) for more details. - -## Status - -This package is currently in maintenance mode, which means: -1. It generally does not accept new features. -2. It does accept bug fixes and version compatability changes provided by the community. -3. Maintainers usually do not resolve reported issues. -4. Community members are encouraged to help each other with reported issues. - -For users that require new features or reliable resolution of reported bugs, we recommend using [pgx](https://github.com/jackc/pgx) which is under active development. diff --git a/vendor/github.com/lib/pq/TESTS.md b/vendor/github.com/lib/pq/TESTS.md deleted file mode 100644 index f0502111..00000000 --- a/vendor/github.com/lib/pq/TESTS.md +++ /dev/null @@ -1,33 +0,0 @@ -# Tests - -## Running Tests - -`go test` is used for testing. A running PostgreSQL -server is required, with the ability to log in. The -database to connect to test with is "pqgotest," on -"localhost" but these can be overridden using [environment -variables](https://www.postgresql.org/docs/9.3/static/libpq-envars.html). - -Example: - - PGHOST=/run/postgresql go test - -## Benchmarks - -A benchmark suite can be run as part of the tests: - - go test -bench . - -## Example setup (Docker) - -Run a postgres container: - -``` -docker run --expose 5432:5432 postgres -``` - -Run tests: - -``` -PGHOST=localhost PGPORT=5432 PGUSER=postgres PGSSLMODE=disable PGDATABASE=postgres go test -``` diff --git a/vendor/github.com/lib/pq/array.go b/vendor/github.com/lib/pq/array.go deleted file mode 100644 index 39c8f7e2..00000000 --- a/vendor/github.com/lib/pq/array.go +++ /dev/null @@ -1,895 +0,0 @@ -package pq - -import ( - "bytes" - "database/sql" - "database/sql/driver" - "encoding/hex" - "fmt" - "reflect" - "strconv" - "strings" -) - -var typeByteSlice = reflect.TypeOf([]byte{}) -var typeDriverValuer = reflect.TypeOf((*driver.Valuer)(nil)).Elem() -var typeSQLScanner = reflect.TypeOf((*sql.Scanner)(nil)).Elem() - -// Array returns the optimal driver.Valuer and sql.Scanner for an array or -// slice of any dimension. -// -// For example: -// db.Query(`SELECT * FROM t WHERE id = ANY($1)`, pq.Array([]int{235, 401})) -// -// var x []sql.NullInt64 -// db.QueryRow(`SELECT ARRAY[235, 401]`).Scan(pq.Array(&x)) -// -// Scanning multi-dimensional arrays is not supported. Arrays where the lower -// bound is not one (such as `[0:0]={1}') are not supported. -func Array(a interface{}) interface { - driver.Valuer - sql.Scanner -} { - switch a := a.(type) { - case []bool: - return (*BoolArray)(&a) - case []float64: - return (*Float64Array)(&a) - case []float32: - return (*Float32Array)(&a) - case []int64: - return (*Int64Array)(&a) - case []int32: - return (*Int32Array)(&a) - case []string: - return (*StringArray)(&a) - case [][]byte: - return (*ByteaArray)(&a) - - case *[]bool: - return (*BoolArray)(a) - case *[]float64: - return (*Float64Array)(a) - case *[]float32: - return (*Float32Array)(a) - case *[]int64: - return (*Int64Array)(a) - case *[]int32: - return (*Int32Array)(a) - case *[]string: - return (*StringArray)(a) - case *[][]byte: - return (*ByteaArray)(a) - } - - return GenericArray{a} -} - -// ArrayDelimiter may be optionally implemented by driver.Valuer or sql.Scanner -// to override the array delimiter used by GenericArray. -type ArrayDelimiter interface { - // ArrayDelimiter returns the delimiter character(s) for this element's type. - ArrayDelimiter() string -} - -// BoolArray represents a one-dimensional array of the PostgreSQL boolean type. -type BoolArray []bool - -// Scan implements the sql.Scanner interface. -func (a *BoolArray) Scan(src interface{}) error { - switch src := src.(type) { - case []byte: - return a.scanBytes(src) - case string: - return a.scanBytes([]byte(src)) - case nil: - *a = nil - return nil - } - - return fmt.Errorf("pq: cannot convert %T to BoolArray", src) -} - -func (a *BoolArray) scanBytes(src []byte) error { - elems, err := scanLinearArray(src, []byte{','}, "BoolArray") - if err != nil { - return err - } - if *a != nil && len(elems) == 0 { - *a = (*a)[:0] - } else { - b := make(BoolArray, len(elems)) - for i, v := range elems { - if len(v) != 1 { - return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v) - } - switch v[0] { - case 't': - b[i] = true - case 'f': - b[i] = false - default: - return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v) - } - } - *a = b - } - return nil -} - -// Value implements the driver.Valuer interface. -func (a BoolArray) Value() (driver.Value, error) { - if a == nil { - return nil, nil - } - - if n := len(a); n > 0 { - // There will be exactly two curly brackets, N bytes of values, - // and N-1 bytes of delimiters. - b := make([]byte, 1+2*n) - - for i := 0; i < n; i++ { - b[2*i] = ',' - if a[i] { - b[1+2*i] = 't' - } else { - b[1+2*i] = 'f' - } - } - - b[0] = '{' - b[2*n] = '}' - - return string(b), nil - } - - return "{}", nil -} - -// ByteaArray represents a one-dimensional array of the PostgreSQL bytea type. -type ByteaArray [][]byte - -// Scan implements the sql.Scanner interface. -func (a *ByteaArray) Scan(src interface{}) error { - switch src := src.(type) { - case []byte: - return a.scanBytes(src) - case string: - return a.scanBytes([]byte(src)) - case nil: - *a = nil - return nil - } - - return fmt.Errorf("pq: cannot convert %T to ByteaArray", src) -} - -func (a *ByteaArray) scanBytes(src []byte) error { - elems, err := scanLinearArray(src, []byte{','}, "ByteaArray") - if err != nil { - return err - } - if *a != nil && len(elems) == 0 { - *a = (*a)[:0] - } else { - b := make(ByteaArray, len(elems)) - for i, v := range elems { - b[i], err = parseBytea(v) - if err != nil { - return fmt.Errorf("could not parse bytea array index %d: %s", i, err.Error()) - } - } - *a = b - } - return nil -} - -// Value implements the driver.Valuer interface. It uses the "hex" format which -// is only supported on PostgreSQL 9.0 or newer. -func (a ByteaArray) Value() (driver.Value, error) { - if a == nil { - return nil, nil - } - - if n := len(a); n > 0 { - // There will be at least two curly brackets, 2*N bytes of quotes, - // 3*N bytes of hex formatting, and N-1 bytes of delimiters. - size := 1 + 6*n - for _, x := range a { - size += hex.EncodedLen(len(x)) - } - - b := make([]byte, size) - - for i, s := 0, b; i < n; i++ { - o := copy(s, `,"\\x`) - o += hex.Encode(s[o:], a[i]) - s[o] = '"' - s = s[o+1:] - } - - b[0] = '{' - b[size-1] = '}' - - return string(b), nil - } - - return "{}", nil -} - -// Float64Array represents a one-dimensional array of the PostgreSQL double -// precision type. -type Float64Array []float64 - -// Scan implements the sql.Scanner interface. -func (a *Float64Array) Scan(src interface{}) error { - switch src := src.(type) { - case []byte: - return a.scanBytes(src) - case string: - return a.scanBytes([]byte(src)) - case nil: - *a = nil - return nil - } - - return fmt.Errorf("pq: cannot convert %T to Float64Array", src) -} - -func (a *Float64Array) scanBytes(src []byte) error { - elems, err := scanLinearArray(src, []byte{','}, "Float64Array") - if err != nil { - return err - } - if *a != nil && len(elems) == 0 { - *a = (*a)[:0] - } else { - b := make(Float64Array, len(elems)) - for i, v := range elems { - if b[i], err = strconv.ParseFloat(string(v), 64); err != nil { - return fmt.Errorf("pq: parsing array element index %d: %v", i, err) - } - } - *a = b - } - return nil -} - -// Value implements the driver.Valuer interface. -func (a Float64Array) Value() (driver.Value, error) { - if a == nil { - return nil, nil - } - - if n := len(a); n > 0 { - // There will be at least two curly brackets, N bytes of values, - // and N-1 bytes of delimiters. - b := make([]byte, 1, 1+2*n) - b[0] = '{' - - b = strconv.AppendFloat(b, a[0], 'f', -1, 64) - for i := 1; i < n; i++ { - b = append(b, ',') - b = strconv.AppendFloat(b, a[i], 'f', -1, 64) - } - - return string(append(b, '}')), nil - } - - return "{}", nil -} - -// Float32Array represents a one-dimensional array of the PostgreSQL double -// precision type. -type Float32Array []float32 - -// Scan implements the sql.Scanner interface. -func (a *Float32Array) Scan(src interface{}) error { - switch src := src.(type) { - case []byte: - return a.scanBytes(src) - case string: - return a.scanBytes([]byte(src)) - case nil: - *a = nil - return nil - } - - return fmt.Errorf("pq: cannot convert %T to Float32Array", src) -} - -func (a *Float32Array) scanBytes(src []byte) error { - elems, err := scanLinearArray(src, []byte{','}, "Float32Array") - if err != nil { - return err - } - if *a != nil && len(elems) == 0 { - *a = (*a)[:0] - } else { - b := make(Float32Array, len(elems)) - for i, v := range elems { - var x float64 - if x, err = strconv.ParseFloat(string(v), 32); err != nil { - return fmt.Errorf("pq: parsing array element index %d: %v", i, err) - } - b[i] = float32(x) - } - *a = b - } - return nil -} - -// Value implements the driver.Valuer interface. -func (a Float32Array) Value() (driver.Value, error) { - if a == nil { - return nil, nil - } - - if n := len(a); n > 0 { - // There will be at least two curly brackets, N bytes of values, - // and N-1 bytes of delimiters. - b := make([]byte, 1, 1+2*n) - b[0] = '{' - - b = strconv.AppendFloat(b, float64(a[0]), 'f', -1, 32) - for i := 1; i < n; i++ { - b = append(b, ',') - b = strconv.AppendFloat(b, float64(a[i]), 'f', -1, 32) - } - - return string(append(b, '}')), nil - } - - return "{}", nil -} - -// GenericArray implements the driver.Valuer and sql.Scanner interfaces for -// an array or slice of any dimension. -type GenericArray struct{ A interface{} } - -func (GenericArray) evaluateDestination(rt reflect.Type) (reflect.Type, func([]byte, reflect.Value) error, string) { - var assign func([]byte, reflect.Value) error - var del = "," - - // TODO calculate the assign function for other types - // TODO repeat this section on the element type of arrays or slices (multidimensional) - { - if reflect.PtrTo(rt).Implements(typeSQLScanner) { - // dest is always addressable because it is an element of a slice. - assign = func(src []byte, dest reflect.Value) (err error) { - ss := dest.Addr().Interface().(sql.Scanner) - if src == nil { - err = ss.Scan(nil) - } else { - err = ss.Scan(src) - } - return - } - goto FoundType - } - - assign = func([]byte, reflect.Value) error { - return fmt.Errorf("pq: scanning to %s is not implemented; only sql.Scanner", rt) - } - } - -FoundType: - - if ad, ok := reflect.Zero(rt).Interface().(ArrayDelimiter); ok { - del = ad.ArrayDelimiter() - } - - return rt, assign, del -} - -// Scan implements the sql.Scanner interface. -func (a GenericArray) Scan(src interface{}) error { - dpv := reflect.ValueOf(a.A) - switch { - case dpv.Kind() != reflect.Ptr: - return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A) - case dpv.IsNil(): - return fmt.Errorf("pq: destination %T is nil", a.A) - } - - dv := dpv.Elem() - switch dv.Kind() { - case reflect.Slice: - case reflect.Array: - default: - return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A) - } - - switch src := src.(type) { - case []byte: - return a.scanBytes(src, dv) - case string: - return a.scanBytes([]byte(src), dv) - case nil: - if dv.Kind() == reflect.Slice { - dv.Set(reflect.Zero(dv.Type())) - return nil - } - } - - return fmt.Errorf("pq: cannot convert %T to %s", src, dv.Type()) -} - -func (a GenericArray) scanBytes(src []byte, dv reflect.Value) error { - dtype, assign, del := a.evaluateDestination(dv.Type().Elem()) - dims, elems, err := parseArray(src, []byte(del)) - if err != nil { - return err - } - - // TODO allow multidimensional - - if len(dims) > 1 { - return fmt.Errorf("pq: scanning from multidimensional ARRAY%s is not implemented", - strings.Replace(fmt.Sprint(dims), " ", "][", -1)) - } - - // Treat a zero-dimensional array like an array with a single dimension of zero. - if len(dims) == 0 { - dims = append(dims, 0) - } - - for i, rt := 0, dv.Type(); i < len(dims); i, rt = i+1, rt.Elem() { - switch rt.Kind() { - case reflect.Slice: - case reflect.Array: - if rt.Len() != dims[i] { - return fmt.Errorf("pq: cannot convert ARRAY%s to %s", - strings.Replace(fmt.Sprint(dims), " ", "][", -1), dv.Type()) - } - default: - // TODO handle multidimensional - } - } - - values := reflect.MakeSlice(reflect.SliceOf(dtype), len(elems), len(elems)) - for i, e := range elems { - if err := assign(e, values.Index(i)); err != nil { - return fmt.Errorf("pq: parsing array element index %d: %v", i, err) - } - } - - // TODO handle multidimensional - - switch dv.Kind() { - case reflect.Slice: - dv.Set(values.Slice(0, dims[0])) - case reflect.Array: - for i := 0; i < dims[0]; i++ { - dv.Index(i).Set(values.Index(i)) - } - } - - return nil -} - -// Value implements the driver.Valuer interface. -func (a GenericArray) Value() (driver.Value, error) { - if a.A == nil { - return nil, nil - } - - rv := reflect.ValueOf(a.A) - - switch rv.Kind() { - case reflect.Slice: - if rv.IsNil() { - return nil, nil - } - case reflect.Array: - default: - return nil, fmt.Errorf("pq: Unable to convert %T to array", a.A) - } - - if n := rv.Len(); n > 0 { - // There will be at least two curly brackets, N bytes of values, - // and N-1 bytes of delimiters. - b := make([]byte, 0, 1+2*n) - - b, _, err := appendArray(b, rv, n) - return string(b), err - } - - return "{}", nil -} - -// Int64Array represents a one-dimensional array of the PostgreSQL integer types. -type Int64Array []int64 - -// Scan implements the sql.Scanner interface. -func (a *Int64Array) Scan(src interface{}) error { - switch src := src.(type) { - case []byte: - return a.scanBytes(src) - case string: - return a.scanBytes([]byte(src)) - case nil: - *a = nil - return nil - } - - return fmt.Errorf("pq: cannot convert %T to Int64Array", src) -} - -func (a *Int64Array) scanBytes(src []byte) error { - elems, err := scanLinearArray(src, []byte{','}, "Int64Array") - if err != nil { - return err - } - if *a != nil && len(elems) == 0 { - *a = (*a)[:0] - } else { - b := make(Int64Array, len(elems)) - for i, v := range elems { - if b[i], err = strconv.ParseInt(string(v), 10, 64); err != nil { - return fmt.Errorf("pq: parsing array element index %d: %v", i, err) - } - } - *a = b - } - return nil -} - -// Value implements the driver.Valuer interface. -func (a Int64Array) Value() (driver.Value, error) { - if a == nil { - return nil, nil - } - - if n := len(a); n > 0 { - // There will be at least two curly brackets, N bytes of values, - // and N-1 bytes of delimiters. - b := make([]byte, 1, 1+2*n) - b[0] = '{' - - b = strconv.AppendInt(b, a[0], 10) - for i := 1; i < n; i++ { - b = append(b, ',') - b = strconv.AppendInt(b, a[i], 10) - } - - return string(append(b, '}')), nil - } - - return "{}", nil -} - -// Int32Array represents a one-dimensional array of the PostgreSQL integer types. -type Int32Array []int32 - -// Scan implements the sql.Scanner interface. -func (a *Int32Array) Scan(src interface{}) error { - switch src := src.(type) { - case []byte: - return a.scanBytes(src) - case string: - return a.scanBytes([]byte(src)) - case nil: - *a = nil - return nil - } - - return fmt.Errorf("pq: cannot convert %T to Int32Array", src) -} - -func (a *Int32Array) scanBytes(src []byte) error { - elems, err := scanLinearArray(src, []byte{','}, "Int32Array") - if err != nil { - return err - } - if *a != nil && len(elems) == 0 { - *a = (*a)[:0] - } else { - b := make(Int32Array, len(elems)) - for i, v := range elems { - x, err := strconv.ParseInt(string(v), 10, 32) - if err != nil { - return fmt.Errorf("pq: parsing array element index %d: %v", i, err) - } - b[i] = int32(x) - } - *a = b - } - return nil -} - -// Value implements the driver.Valuer interface. -func (a Int32Array) Value() (driver.Value, error) { - if a == nil { - return nil, nil - } - - if n := len(a); n > 0 { - // There will be at least two curly brackets, N bytes of values, - // and N-1 bytes of delimiters. - b := make([]byte, 1, 1+2*n) - b[0] = '{' - - b = strconv.AppendInt(b, int64(a[0]), 10) - for i := 1; i < n; i++ { - b = append(b, ',') - b = strconv.AppendInt(b, int64(a[i]), 10) - } - - return string(append(b, '}')), nil - } - - return "{}", nil -} - -// StringArray represents a one-dimensional array of the PostgreSQL character types. -type StringArray []string - -// Scan implements the sql.Scanner interface. -func (a *StringArray) Scan(src interface{}) error { - switch src := src.(type) { - case []byte: - return a.scanBytes(src) - case string: - return a.scanBytes([]byte(src)) - case nil: - *a = nil - return nil - } - - return fmt.Errorf("pq: cannot convert %T to StringArray", src) -} - -func (a *StringArray) scanBytes(src []byte) error { - elems, err := scanLinearArray(src, []byte{','}, "StringArray") - if err != nil { - return err - } - if *a != nil && len(elems) == 0 { - *a = (*a)[:0] - } else { - b := make(StringArray, len(elems)) - for i, v := range elems { - if b[i] = string(v); v == nil { - return fmt.Errorf("pq: parsing array element index %d: cannot convert nil to string", i) - } - } - *a = b - } - return nil -} - -// Value implements the driver.Valuer interface. -func (a StringArray) Value() (driver.Value, error) { - if a == nil { - return nil, nil - } - - if n := len(a); n > 0 { - // There will be at least two curly brackets, 2*N bytes of quotes, - // and N-1 bytes of delimiters. - b := make([]byte, 1, 1+3*n) - b[0] = '{' - - b = appendArrayQuotedBytes(b, []byte(a[0])) - for i := 1; i < n; i++ { - b = append(b, ',') - b = appendArrayQuotedBytes(b, []byte(a[i])) - } - - return string(append(b, '}')), nil - } - - return "{}", nil -} - -// appendArray appends rv to the buffer, returning the extended buffer and -// the delimiter used between elements. -// -// It panics when n <= 0 or rv's Kind is not reflect.Array nor reflect.Slice. -func appendArray(b []byte, rv reflect.Value, n int) ([]byte, string, error) { - var del string - var err error - - b = append(b, '{') - - if b, del, err = appendArrayElement(b, rv.Index(0)); err != nil { - return b, del, err - } - - for i := 1; i < n; i++ { - b = append(b, del...) - if b, del, err = appendArrayElement(b, rv.Index(i)); err != nil { - return b, del, err - } - } - - return append(b, '}'), del, nil -} - -// appendArrayElement appends rv to the buffer, returning the extended buffer -// and the delimiter to use before the next element. -// -// When rv's Kind is neither reflect.Array nor reflect.Slice, it is converted -// using driver.DefaultParameterConverter and the resulting []byte or string -// is double-quoted. -// -// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO -func appendArrayElement(b []byte, rv reflect.Value) ([]byte, string, error) { - if k := rv.Kind(); k == reflect.Array || k == reflect.Slice { - if t := rv.Type(); t != typeByteSlice && !t.Implements(typeDriverValuer) { - if n := rv.Len(); n > 0 { - return appendArray(b, rv, n) - } - - return b, "", nil - } - } - - var del = "," - var err error - var iv interface{} = rv.Interface() - - if ad, ok := iv.(ArrayDelimiter); ok { - del = ad.ArrayDelimiter() - } - - if iv, err = driver.DefaultParameterConverter.ConvertValue(iv); err != nil { - return b, del, err - } - - switch v := iv.(type) { - case nil: - return append(b, "NULL"...), del, nil - case []byte: - return appendArrayQuotedBytes(b, v), del, nil - case string: - return appendArrayQuotedBytes(b, []byte(v)), del, nil - } - - b, err = appendValue(b, iv) - return b, del, err -} - -func appendArrayQuotedBytes(b, v []byte) []byte { - b = append(b, '"') - for { - i := bytes.IndexAny(v, `"\`) - if i < 0 { - b = append(b, v...) - break - } - if i > 0 { - b = append(b, v[:i]...) - } - b = append(b, '\\', v[i]) - v = v[i+1:] - } - return append(b, '"') -} - -func appendValue(b []byte, v driver.Value) ([]byte, error) { - return append(b, encode(nil, v, 0)...), nil -} - -// parseArray extracts the dimensions and elements of an array represented in -// text format. Only representations emitted by the backend are supported. -// Notably, whitespace around brackets and delimiters is significant, and NULL -// is case-sensitive. -// -// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO -func parseArray(src, del []byte) (dims []int, elems [][]byte, err error) { - var depth, i int - - if len(src) < 1 || src[0] != '{' { - return nil, nil, fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '{', 0) - } - -Open: - for i < len(src) { - switch src[i] { - case '{': - depth++ - i++ - case '}': - elems = make([][]byte, 0) - goto Close - default: - break Open - } - } - dims = make([]int, i) - -Element: - for i < len(src) { - switch src[i] { - case '{': - if depth == len(dims) { - break Element - } - depth++ - dims[depth-1] = 0 - i++ - case '"': - var elem = []byte{} - var escape bool - for i++; i < len(src); i++ { - if escape { - elem = append(elem, src[i]) - escape = false - } else { - switch src[i] { - default: - elem = append(elem, src[i]) - case '\\': - escape = true - case '"': - elems = append(elems, elem) - i++ - break Element - } - } - } - default: - for start := i; i < len(src); i++ { - if bytes.HasPrefix(src[i:], del) || src[i] == '}' { - elem := src[start:i] - if len(elem) == 0 { - return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) - } - if bytes.Equal(elem, []byte("NULL")) { - elem = nil - } - elems = append(elems, elem) - break Element - } - } - } - } - - for i < len(src) { - if bytes.HasPrefix(src[i:], del) && depth > 0 { - dims[depth-1]++ - i += len(del) - goto Element - } else if src[i] == '}' && depth > 0 { - dims[depth-1]++ - depth-- - i++ - } else { - return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) - } - } - -Close: - for i < len(src) { - if src[i] == '}' && depth > 0 { - depth-- - i++ - } else { - return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) - } - } - if depth > 0 { - err = fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '}', i) - } - if err == nil { - for _, d := range dims { - if (len(elems) % d) != 0 { - err = fmt.Errorf("pq: multidimensional arrays must have elements with matching dimensions") - } - } - } - return -} - -func scanLinearArray(src, del []byte, typ string) (elems [][]byte, err error) { - dims, elems, err := parseArray(src, del) - if err != nil { - return nil, err - } - if len(dims) > 1 { - return nil, fmt.Errorf("pq: cannot convert ARRAY%s to %s", strings.Replace(fmt.Sprint(dims), " ", "][", -1), typ) - } - return elems, err -} diff --git a/vendor/github.com/lib/pq/buf.go b/vendor/github.com/lib/pq/buf.go deleted file mode 100644 index 4b0a0a8f..00000000 --- a/vendor/github.com/lib/pq/buf.go +++ /dev/null @@ -1,91 +0,0 @@ -package pq - -import ( - "bytes" - "encoding/binary" - - "github.com/lib/pq/oid" -) - -type readBuf []byte - -func (b *readBuf) int32() (n int) { - n = int(int32(binary.BigEndian.Uint32(*b))) - *b = (*b)[4:] - return -} - -func (b *readBuf) oid() (n oid.Oid) { - n = oid.Oid(binary.BigEndian.Uint32(*b)) - *b = (*b)[4:] - return -} - -// N.B: this is actually an unsigned 16-bit integer, unlike int32 -func (b *readBuf) int16() (n int) { - n = int(binary.BigEndian.Uint16(*b)) - *b = (*b)[2:] - return -} - -func (b *readBuf) string() string { - i := bytes.IndexByte(*b, 0) - if i < 0 { - errorf("invalid message format; expected string terminator") - } - s := (*b)[:i] - *b = (*b)[i+1:] - return string(s) -} - -func (b *readBuf) next(n int) (v []byte) { - v = (*b)[:n] - *b = (*b)[n:] - return -} - -func (b *readBuf) byte() byte { - return b.next(1)[0] -} - -type writeBuf struct { - buf []byte - pos int -} - -func (b *writeBuf) int32(n int) { - x := make([]byte, 4) - binary.BigEndian.PutUint32(x, uint32(n)) - b.buf = append(b.buf, x...) -} - -func (b *writeBuf) int16(n int) { - x := make([]byte, 2) - binary.BigEndian.PutUint16(x, uint16(n)) - b.buf = append(b.buf, x...) -} - -func (b *writeBuf) string(s string) { - b.buf = append(append(b.buf, s...), '\000') -} - -func (b *writeBuf) byte(c byte) { - b.buf = append(b.buf, c) -} - -func (b *writeBuf) bytes(v []byte) { - b.buf = append(b.buf, v...) -} - -func (b *writeBuf) wrap() []byte { - p := b.buf[b.pos:] - binary.BigEndian.PutUint32(p, uint32(len(p))) - return b.buf -} - -func (b *writeBuf) next(c byte) { - p := b.buf[b.pos:] - binary.BigEndian.PutUint32(p, uint32(len(p))) - b.pos = len(b.buf) + 1 - b.buf = append(b.buf, c, 0, 0, 0, 0) -} diff --git a/vendor/github.com/lib/pq/conn.go b/vendor/github.com/lib/pq/conn.go deleted file mode 100644 index da4ff9de..00000000 --- a/vendor/github.com/lib/pq/conn.go +++ /dev/null @@ -1,2112 +0,0 @@ -package pq - -import ( - "bufio" - "bytes" - "context" - "crypto/md5" - "crypto/sha256" - "database/sql" - "database/sql/driver" - "encoding/binary" - "errors" - "fmt" - "io" - "net" - "os" - "os/user" - "path" - "path/filepath" - "strconv" - "strings" - "sync" - "time" - "unicode" - - "github.com/lib/pq/oid" - "github.com/lib/pq/scram" -) - -// Common error types -var ( - ErrNotSupported = errors.New("pq: Unsupported command") - ErrInFailedTransaction = errors.New("pq: Could not complete operation in a failed transaction") - ErrSSLNotSupported = errors.New("pq: SSL is not enabled on the server") - ErrSSLKeyUnknownOwnership = errors.New("pq: Could not get owner information for private key, may not be properly protected") - ErrSSLKeyHasWorldPermissions = errors.New("pq: Private key has world access. Permissions should be u=rw,g=r (0640) if owned by root, or u=rw (0600), or less") - - ErrCouldNotDetectUsername = errors.New("pq: Could not detect default username. Please provide one explicitly") - - errUnexpectedReady = errors.New("unexpected ReadyForQuery") - errNoRowsAffected = errors.New("no RowsAffected available after the empty statement") - errNoLastInsertID = errors.New("no LastInsertId available after the empty statement") -) - -// Compile time validation that our types implement the expected interfaces -var ( - _ driver.Driver = Driver{} -) - -// Driver is the Postgres database driver. -type Driver struct{} - -// Open opens a new connection to the database. name is a connection string. -// Most users should only use it through database/sql package from the standard -// library. -func (d Driver) Open(name string) (driver.Conn, error) { - return Open(name) -} - -func init() { - sql.Register("postgres", &Driver{}) -} - -type parameterStatus struct { - // server version in the same format as server_version_num, or 0 if - // unavailable - serverVersion int - - // the current location based on the TimeZone value of the session, if - // available - currentLocation *time.Location -} - -type transactionStatus byte - -const ( - txnStatusIdle transactionStatus = 'I' - txnStatusIdleInTransaction transactionStatus = 'T' - txnStatusInFailedTransaction transactionStatus = 'E' -) - -func (s transactionStatus) String() string { - switch s { - case txnStatusIdle: - return "idle" - case txnStatusIdleInTransaction: - return "idle in transaction" - case txnStatusInFailedTransaction: - return "in a failed transaction" - default: - errorf("unknown transactionStatus %d", s) - } - - panic("not reached") -} - -// Dialer is the dialer interface. It can be used to obtain more control over -// how pq creates network connections. -type Dialer interface { - Dial(network, address string) (net.Conn, error) - DialTimeout(network, address string, timeout time.Duration) (net.Conn, error) -} - -// DialerContext is the context-aware dialer interface. -type DialerContext interface { - DialContext(ctx context.Context, network, address string) (net.Conn, error) -} - -type defaultDialer struct { - d net.Dialer -} - -func (d defaultDialer) Dial(network, address string) (net.Conn, error) { - return d.d.Dial(network, address) -} -func (d defaultDialer) DialTimeout( - network, address string, timeout time.Duration, -) (net.Conn, error) { - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - return d.DialContext(ctx, network, address) -} -func (d defaultDialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) { - return d.d.DialContext(ctx, network, address) -} - -type conn struct { - c net.Conn - buf *bufio.Reader - namei int - scratch [512]byte - txnStatus transactionStatus - txnFinish func() - - // Save connection arguments to use during CancelRequest. - dialer Dialer - opts values - - // Cancellation key data for use with CancelRequest messages. - processID int - secretKey int - - parameterStatus parameterStatus - - saveMessageType byte - saveMessageBuffer []byte - - // If an error is set, this connection is bad and all public-facing - // functions should return the appropriate error by calling get() - // (ErrBadConn) or getForNext(). - err syncErr - - // If set, this connection should never use the binary format when - // receiving query results from prepared statements. Only provided for - // debugging. - disablePreparedBinaryResult bool - - // Whether to always send []byte parameters over as binary. Enables single - // round-trip mode for non-prepared Query calls. - binaryParameters bool - - // If true this connection is in the middle of a COPY - inCopy bool - - // If not nil, notices will be synchronously sent here - noticeHandler func(*Error) - - // If not nil, notifications will be synchronously sent here - notificationHandler func(*Notification) - - // GSSAPI context - gss GSS -} - -type syncErr struct { - err error - sync.Mutex -} - -// Return ErrBadConn if connection is bad. -func (e *syncErr) get() error { - e.Lock() - defer e.Unlock() - if e.err != nil { - return driver.ErrBadConn - } - return nil -} - -// Return the error set on the connection. Currently only used by rows.Next. -func (e *syncErr) getForNext() error { - e.Lock() - defer e.Unlock() - return e.err -} - -// Set error, only if it isn't set yet. -func (e *syncErr) set(err error) { - if err == nil { - panic("attempt to set nil err") - } - e.Lock() - defer e.Unlock() - if e.err == nil { - e.err = err - } -} - -// Handle driver-side settings in parsed connection string. -func (cn *conn) handleDriverSettings(o values) (err error) { - boolSetting := func(key string, val *bool) error { - if value, ok := o[key]; ok { - if value == "yes" { - *val = true - } else if value == "no" { - *val = false - } else { - return fmt.Errorf("unrecognized value %q for %s", value, key) - } - } - return nil - } - - err = boolSetting("disable_prepared_binary_result", &cn.disablePreparedBinaryResult) - if err != nil { - return err - } - return boolSetting("binary_parameters", &cn.binaryParameters) -} - -func (cn *conn) handlePgpass(o values) { - // if a password was supplied, do not process .pgpass - if _, ok := o["password"]; ok { - return - } - filename := os.Getenv("PGPASSFILE") - if filename == "" { - // XXX this code doesn't work on Windows where the default filename is - // XXX %APPDATA%\postgresql\pgpass.conf - // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 - userHome := os.Getenv("HOME") - if userHome == "" { - user, err := user.Current() - if err != nil { - return - } - userHome = user.HomeDir - } - filename = filepath.Join(userHome, ".pgpass") - } - fileinfo, err := os.Stat(filename) - if err != nil { - return - } - mode := fileinfo.Mode() - if mode&(0x77) != 0 { - // XXX should warn about incorrect .pgpass permissions as psql does - return - } - file, err := os.Open(filename) - if err != nil { - return - } - defer file.Close() - scanner := bufio.NewScanner(io.Reader(file)) - // From: https://github.com/tg/pgpass/blob/master/reader.go - for scanner.Scan() { - if scanText(scanner.Text(), o) { - break - } - } -} - -// GetFields is a helper function for scanText. -func getFields(s string) []string { - fs := make([]string, 0, 5) - f := make([]rune, 0, len(s)) - - var esc bool - for _, c := range s { - switch { - case esc: - f = append(f, c) - esc = false - case c == '\\': - esc = true - case c == ':': - fs = append(fs, string(f)) - f = f[:0] - default: - f = append(f, c) - } - } - return append(fs, string(f)) -} - -// ScanText assists HandlePgpass in it's objective. -func scanText(line string, o values) bool { - hostname := o["host"] - ntw, _ := network(o) - port := o["port"] - db := o["dbname"] - username := o["user"] - if len(line) == 0 || line[0] == '#' { - return false - } - split := getFields(line) - if len(split) != 5 { - return false - } - if (split[0] == "*" || split[0] == hostname || (split[0] == "localhost" && (hostname == "" || ntw == "unix"))) && (split[1] == "*" || split[1] == port) && (split[2] == "*" || split[2] == db) && (split[3] == "*" || split[3] == username) { - o["password"] = split[4] - return true - } - return false -} - -func (cn *conn) writeBuf(b byte) *writeBuf { - cn.scratch[0] = b - return &writeBuf{ - buf: cn.scratch[:5], - pos: 1, - } -} - -// Open opens a new connection to the database. dsn is a connection string. -// Most users should only use it through database/sql package from the standard -// library. -func Open(dsn string) (_ driver.Conn, err error) { - return DialOpen(defaultDialer{}, dsn) -} - -// DialOpen opens a new connection to the database using a dialer. -func DialOpen(d Dialer, dsn string) (_ driver.Conn, err error) { - c, err := NewConnector(dsn) - if err != nil { - return nil, err - } - c.Dialer(d) - return c.open(context.Background()) -} - -func (c *Connector) open(ctx context.Context) (cn *conn, err error) { - // Handle any panics during connection initialization. Note that we - // specifically do *not* want to use errRecover(), as that would turn any - // connection errors into ErrBadConns, hiding the real error message from - // the user. - defer errRecoverNoErrBadConn(&err) - - // Create a new values map (copy). This makes it so maps in different - // connections do not reference the same underlying data structure, so it - // is safe for multiple connections to concurrently write to their opts. - o := make(values) - for k, v := range c.opts { - o[k] = v - } - - cn = &conn{ - opts: o, - dialer: c.dialer, - } - err = cn.handleDriverSettings(o) - if err != nil { - return nil, err - } - cn.handlePgpass(o) - - cn.c, err = dial(ctx, c.dialer, o) - if err != nil { - return nil, err - } - - err = cn.ssl(o) - if err != nil { - if cn.c != nil { - cn.c.Close() - } - return nil, err - } - - // cn.startup panics on error. Make sure we don't leak cn.c. - panicking := true - defer func() { - if panicking { - cn.c.Close() - } - }() - - cn.buf = bufio.NewReader(cn.c) - cn.startup(o) - - // reset the deadline, in case one was set (see dial) - if timeout, ok := o["connect_timeout"]; ok && timeout != "0" { - err = cn.c.SetDeadline(time.Time{}) - } - panicking = false - return cn, err -} - -func dial(ctx context.Context, d Dialer, o values) (net.Conn, error) { - network, address := network(o) - - // Zero or not specified means wait indefinitely. - if timeout, ok := o["connect_timeout"]; ok && timeout != "0" { - seconds, err := strconv.ParseInt(timeout, 10, 0) - if err != nil { - return nil, fmt.Errorf("invalid value for parameter connect_timeout: %s", err) - } - duration := time.Duration(seconds) * time.Second - - // connect_timeout should apply to the entire connection establishment - // procedure, so we both use a timeout for the TCP connection - // establishment and set a deadline for doing the initial handshake. - // The deadline is then reset after startup() is done. - deadline := time.Now().Add(duration) - var conn net.Conn - if dctx, ok := d.(DialerContext); ok { - ctx, cancel := context.WithTimeout(ctx, duration) - defer cancel() - conn, err = dctx.DialContext(ctx, network, address) - } else { - conn, err = d.DialTimeout(network, address, duration) - } - if err != nil { - return nil, err - } - err = conn.SetDeadline(deadline) - return conn, err - } - if dctx, ok := d.(DialerContext); ok { - return dctx.DialContext(ctx, network, address) - } - return d.Dial(network, address) -} - -func network(o values) (string, string) { - host := o["host"] - - if strings.HasPrefix(host, "/") { - sockPath := path.Join(host, ".s.PGSQL."+o["port"]) - return "unix", sockPath - } - - return "tcp", net.JoinHostPort(host, o["port"]) -} - -type values map[string]string - -// scanner implements a tokenizer for libpq-style option strings. -type scanner struct { - s []rune - i int -} - -// newScanner returns a new scanner initialized with the option string s. -func newScanner(s string) *scanner { - return &scanner{[]rune(s), 0} -} - -// Next returns the next rune. -// It returns 0, false if the end of the text has been reached. -func (s *scanner) Next() (rune, bool) { - if s.i >= len(s.s) { - return 0, false - } - r := s.s[s.i] - s.i++ - return r, true -} - -// SkipSpaces returns the next non-whitespace rune. -// It returns 0, false if the end of the text has been reached. -func (s *scanner) SkipSpaces() (rune, bool) { - r, ok := s.Next() - for unicode.IsSpace(r) && ok { - r, ok = s.Next() - } - return r, ok -} - -// parseOpts parses the options from name and adds them to the values. -// -// The parsing code is based on conninfo_parse from libpq's fe-connect.c -func parseOpts(name string, o values) error { - s := newScanner(name) - - for { - var ( - keyRunes, valRunes []rune - r rune - ok bool - ) - - if r, ok = s.SkipSpaces(); !ok { - break - } - - // Scan the key - for !unicode.IsSpace(r) && r != '=' { - keyRunes = append(keyRunes, r) - if r, ok = s.Next(); !ok { - break - } - } - - // Skip any whitespace if we're not at the = yet - if r != '=' { - r, ok = s.SkipSpaces() - } - - // The current character should be = - if r != '=' || !ok { - return fmt.Errorf(`missing "=" after %q in connection info string"`, string(keyRunes)) - } - - // Skip any whitespace after the = - if r, ok = s.SkipSpaces(); !ok { - // If we reach the end here, the last value is just an empty string as per libpq. - o[string(keyRunes)] = "" - break - } - - if r != '\'' { - for !unicode.IsSpace(r) { - if r == '\\' { - if r, ok = s.Next(); !ok { - return fmt.Errorf(`missing character after backslash`) - } - } - valRunes = append(valRunes, r) - - if r, ok = s.Next(); !ok { - break - } - } - } else { - quote: - for { - if r, ok = s.Next(); !ok { - return fmt.Errorf(`unterminated quoted string literal in connection string`) - } - switch r { - case '\'': - break quote - case '\\': - r, _ = s.Next() - fallthrough - default: - valRunes = append(valRunes, r) - } - } - } - - o[string(keyRunes)] = string(valRunes) - } - - return nil -} - -func (cn *conn) isInTransaction() bool { - return cn.txnStatus == txnStatusIdleInTransaction || - cn.txnStatus == txnStatusInFailedTransaction -} - -func (cn *conn) checkIsInTransaction(intxn bool) { - if cn.isInTransaction() != intxn { - cn.err.set(driver.ErrBadConn) - errorf("unexpected transaction status %v", cn.txnStatus) - } -} - -func (cn *conn) Begin() (_ driver.Tx, err error) { - return cn.begin("") -} - -func (cn *conn) begin(mode string) (_ driver.Tx, err error) { - if err := cn.err.get(); err != nil { - return nil, err - } - defer cn.errRecover(&err) - - cn.checkIsInTransaction(false) - _, commandTag, err := cn.simpleExec("BEGIN" + mode) - if err != nil { - return nil, err - } - if commandTag != "BEGIN" { - cn.err.set(driver.ErrBadConn) - return nil, fmt.Errorf("unexpected command tag %s", commandTag) - } - if cn.txnStatus != txnStatusIdleInTransaction { - cn.err.set(driver.ErrBadConn) - return nil, fmt.Errorf("unexpected transaction status %v", cn.txnStatus) - } - return cn, nil -} - -func (cn *conn) closeTxn() { - if finish := cn.txnFinish; finish != nil { - finish() - } -} - -func (cn *conn) Commit() (err error) { - defer cn.closeTxn() - if err := cn.err.get(); err != nil { - return err - } - defer cn.errRecover(&err) - - cn.checkIsInTransaction(true) - // We don't want the client to think that everything is okay if it tries - // to commit a failed transaction. However, no matter what we return, - // database/sql will release this connection back into the free connection - // pool so we have to abort the current transaction here. Note that you - // would get the same behaviour if you issued a COMMIT in a failed - // transaction, so it's also the least surprising thing to do here. - if cn.txnStatus == txnStatusInFailedTransaction { - if err := cn.rollback(); err != nil { - return err - } - return ErrInFailedTransaction - } - - _, commandTag, err := cn.simpleExec("COMMIT") - if err != nil { - if cn.isInTransaction() { - cn.err.set(driver.ErrBadConn) - } - return err - } - if commandTag != "COMMIT" { - cn.err.set(driver.ErrBadConn) - return fmt.Errorf("unexpected command tag %s", commandTag) - } - cn.checkIsInTransaction(false) - return nil -} - -func (cn *conn) Rollback() (err error) { - defer cn.closeTxn() - if err := cn.err.get(); err != nil { - return err - } - defer cn.errRecover(&err) - return cn.rollback() -} - -func (cn *conn) rollback() (err error) { - cn.checkIsInTransaction(true) - _, commandTag, err := cn.simpleExec("ROLLBACK") - if err != nil { - if cn.isInTransaction() { - cn.err.set(driver.ErrBadConn) - } - return err - } - if commandTag != "ROLLBACK" { - return fmt.Errorf("unexpected command tag %s", commandTag) - } - cn.checkIsInTransaction(false) - return nil -} - -func (cn *conn) gname() string { - cn.namei++ - return strconv.FormatInt(int64(cn.namei), 10) -} - -func (cn *conn) simpleExec(q string) (res driver.Result, commandTag string, err error) { - b := cn.writeBuf('Q') - b.string(q) - cn.send(b) - - for { - t, r := cn.recv1() - switch t { - case 'C': - res, commandTag = cn.parseComplete(r.string()) - case 'Z': - cn.processReadyForQuery(r) - if res == nil && err == nil { - err = errUnexpectedReady - } - // done - return - case 'E': - err = parseError(r) - case 'I': - res = emptyRows - case 'T', 'D': - // ignore any results - default: - cn.err.set(driver.ErrBadConn) - errorf("unknown response for simple query: %q", t) - } - } -} - -func (cn *conn) simpleQuery(q string) (res *rows, err error) { - defer cn.errRecover(&err) - - b := cn.writeBuf('Q') - b.string(q) - cn.send(b) - - for { - t, r := cn.recv1() - switch t { - case 'C', 'I': - // We allow queries which don't return any results through Query as - // well as Exec. We still have to give database/sql a rows object - // the user can close, though, to avoid connections from being - // leaked. A "rows" with done=true works fine for that purpose. - if err != nil { - cn.err.set(driver.ErrBadConn) - errorf("unexpected message %q in simple query execution", t) - } - if res == nil { - res = &rows{ - cn: cn, - } - } - // Set the result and tag to the last command complete if there wasn't a - // query already run. Although queries usually return from here and cede - // control to Next, a query with zero results does not. - if t == 'C' { - res.result, res.tag = cn.parseComplete(r.string()) - if res.colNames != nil { - return - } - } - res.done = true - case 'Z': - cn.processReadyForQuery(r) - // done - return - case 'E': - res = nil - err = parseError(r) - case 'D': - if res == nil { - cn.err.set(driver.ErrBadConn) - errorf("unexpected DataRow in simple query execution") - } - // the query didn't fail; kick off to Next - cn.saveMessage(t, r) - return - case 'T': - // res might be non-nil here if we received a previous - // CommandComplete, but that's fine; just overwrite it - res = &rows{cn: cn} - res.rowsHeader = parsePortalRowDescribe(r) - - // To work around a bug in QueryRow in Go 1.2 and earlier, wait - // until the first DataRow has been received. - default: - cn.err.set(driver.ErrBadConn) - errorf("unknown response for simple query: %q", t) - } - } -} - -type noRows struct{} - -var emptyRows noRows - -var _ driver.Result = noRows{} - -func (noRows) LastInsertId() (int64, error) { - return 0, errNoLastInsertID -} - -func (noRows) RowsAffected() (int64, error) { - return 0, errNoRowsAffected -} - -// Decides which column formats to use for a prepared statement. The input is -// an array of type oids, one element per result column. -func decideColumnFormats( - colTyps []fieldDesc, forceText bool, -) (colFmts []format, colFmtData []byte) { - if len(colTyps) == 0 { - return nil, colFmtDataAllText - } - - colFmts = make([]format, len(colTyps)) - if forceText { - return colFmts, colFmtDataAllText - } - - allBinary := true - allText := true - for i, t := range colTyps { - switch t.OID { - // This is the list of types to use binary mode for when receiving them - // through a prepared statement. If a type appears in this list, it - // must also be implemented in binaryDecode in encode.go. - case oid.T_bytea: - fallthrough - case oid.T_int8: - fallthrough - case oid.T_int4: - fallthrough - case oid.T_int2: - fallthrough - case oid.T_uuid: - colFmts[i] = formatBinary - allText = false - - default: - allBinary = false - } - } - - if allBinary { - return colFmts, colFmtDataAllBinary - } else if allText { - return colFmts, colFmtDataAllText - } else { - colFmtData = make([]byte, 2+len(colFmts)*2) - binary.BigEndian.PutUint16(colFmtData, uint16(len(colFmts))) - for i, v := range colFmts { - binary.BigEndian.PutUint16(colFmtData[2+i*2:], uint16(v)) - } - return colFmts, colFmtData - } -} - -func (cn *conn) prepareTo(q, stmtName string) *stmt { - st := &stmt{cn: cn, name: stmtName} - - b := cn.writeBuf('P') - b.string(st.name) - b.string(q) - b.int16(0) - - b.next('D') - b.byte('S') - b.string(st.name) - - b.next('S') - cn.send(b) - - cn.readParseResponse() - st.paramTyps, st.colNames, st.colTyps = cn.readStatementDescribeResponse() - st.colFmts, st.colFmtData = decideColumnFormats(st.colTyps, cn.disablePreparedBinaryResult) - cn.readReadyForQuery() - return st -} - -func (cn *conn) Prepare(q string) (_ driver.Stmt, err error) { - if err := cn.err.get(); err != nil { - return nil, err - } - defer cn.errRecover(&err) - - if len(q) >= 4 && strings.EqualFold(q[:4], "COPY") { - s, err := cn.prepareCopyIn(q) - if err == nil { - cn.inCopy = true - } - return s, err - } - return cn.prepareTo(q, cn.gname()), nil -} - -func (cn *conn) Close() (err error) { - // Skip cn.bad return here because we always want to close a connection. - defer cn.errRecover(&err) - - // Ensure that cn.c.Close is always run. Since error handling is done with - // panics and cn.errRecover, the Close must be in a defer. - defer func() { - cerr := cn.c.Close() - if err == nil { - err = cerr - } - }() - - // Don't go through send(); ListenerConn relies on us not scribbling on the - // scratch buffer of this connection. - return cn.sendSimpleMessage('X') -} - -// Implement the "Queryer" interface -func (cn *conn) Query(query string, args []driver.Value) (driver.Rows, error) { - return cn.query(query, args) -} - -func (cn *conn) query(query string, args []driver.Value) (_ *rows, err error) { - if err := cn.err.get(); err != nil { - return nil, err - } - if cn.inCopy { - return nil, errCopyInProgress - } - defer cn.errRecover(&err) - - // Check to see if we can use the "simpleQuery" interface, which is - // *much* faster than going through prepare/exec - if len(args) == 0 { - return cn.simpleQuery(query) - } - - if cn.binaryParameters { - cn.sendBinaryModeQuery(query, args) - - cn.readParseResponse() - cn.readBindResponse() - rows := &rows{cn: cn} - rows.rowsHeader = cn.readPortalDescribeResponse() - cn.postExecuteWorkaround() - return rows, nil - } - st := cn.prepareTo(query, "") - st.exec(args) - return &rows{ - cn: cn, - rowsHeader: st.rowsHeader, - }, nil -} - -// Implement the optional "Execer" interface for one-shot queries -func (cn *conn) Exec(query string, args []driver.Value) (res driver.Result, err error) { - if err := cn.err.get(); err != nil { - return nil, err - } - defer cn.errRecover(&err) - - // Check to see if we can use the "simpleExec" interface, which is - // *much* faster than going through prepare/exec - if len(args) == 0 { - // ignore commandTag, our caller doesn't care - r, _, err := cn.simpleExec(query) - return r, err - } - - if cn.binaryParameters { - cn.sendBinaryModeQuery(query, args) - - cn.readParseResponse() - cn.readBindResponse() - cn.readPortalDescribeResponse() - cn.postExecuteWorkaround() - res, _, err = cn.readExecuteResponse("Execute") - return res, err - } - // Use the unnamed statement to defer planning until bind - // time, or else value-based selectivity estimates cannot be - // used. - st := cn.prepareTo(query, "") - r, err := st.Exec(args) - if err != nil { - panic(err) - } - return r, err -} - -type safeRetryError struct { - Err error -} - -func (se *safeRetryError) Error() string { - return se.Err.Error() -} - -func (cn *conn) send(m *writeBuf) { - n, err := cn.c.Write(m.wrap()) - if err != nil { - if n == 0 { - err = &safeRetryError{Err: err} - } - panic(err) - } -} - -func (cn *conn) sendStartupPacket(m *writeBuf) error { - _, err := cn.c.Write((m.wrap())[1:]) - return err -} - -// Send a message of type typ to the server on the other end of cn. The -// message should have no payload. This method does not use the scratch -// buffer. -func (cn *conn) sendSimpleMessage(typ byte) (err error) { - _, err = cn.c.Write([]byte{typ, '\x00', '\x00', '\x00', '\x04'}) - return err -} - -// saveMessage memorizes a message and its buffer in the conn struct. -// recvMessage will then return these values on the next call to it. This -// method is useful in cases where you have to see what the next message is -// going to be (e.g. to see whether it's an error or not) but you can't handle -// the message yourself. -func (cn *conn) saveMessage(typ byte, buf *readBuf) { - if cn.saveMessageType != 0 { - cn.err.set(driver.ErrBadConn) - errorf("unexpected saveMessageType %d", cn.saveMessageType) - } - cn.saveMessageType = typ - cn.saveMessageBuffer = *buf -} - -// recvMessage receives any message from the backend, or returns an error if -// a problem occurred while reading the message. -func (cn *conn) recvMessage(r *readBuf) (byte, error) { - // workaround for a QueryRow bug, see exec - if cn.saveMessageType != 0 { - t := cn.saveMessageType - *r = cn.saveMessageBuffer - cn.saveMessageType = 0 - cn.saveMessageBuffer = nil - return t, nil - } - - x := cn.scratch[:5] - _, err := io.ReadFull(cn.buf, x) - if err != nil { - return 0, err - } - - // read the type and length of the message that follows - t := x[0] - n := int(binary.BigEndian.Uint32(x[1:])) - 4 - var y []byte - if n <= len(cn.scratch) { - y = cn.scratch[:n] - } else { - y = make([]byte, n) - } - _, err = io.ReadFull(cn.buf, y) - if err != nil { - return 0, err - } - *r = y - return t, nil -} - -// recv receives a message from the backend, but if an error happened while -// reading the message or the received message was an ErrorResponse, it panics. -// NoticeResponses are ignored. This function should generally be used only -// during the startup sequence. -func (cn *conn) recv() (t byte, r *readBuf) { - for { - var err error - r = &readBuf{} - t, err = cn.recvMessage(r) - if err != nil { - panic(err) - } - switch t { - case 'E': - panic(parseError(r)) - case 'N': - if n := cn.noticeHandler; n != nil { - n(parseError(r)) - } - case 'A': - if n := cn.notificationHandler; n != nil { - n(recvNotification(r)) - } - default: - return - } - } -} - -// recv1Buf is exactly equivalent to recv1, except it uses a buffer supplied by -// the caller to avoid an allocation. -func (cn *conn) recv1Buf(r *readBuf) byte { - for { - t, err := cn.recvMessage(r) - if err != nil { - panic(err) - } - - switch t { - case 'A': - if n := cn.notificationHandler; n != nil { - n(recvNotification(r)) - } - case 'N': - if n := cn.noticeHandler; n != nil { - n(parseError(r)) - } - case 'S': - cn.processParameterStatus(r) - default: - return t - } - } -} - -// recv1 receives a message from the backend, panicking if an error occurs -// while attempting to read it. All asynchronous messages are ignored, with -// the exception of ErrorResponse. -func (cn *conn) recv1() (t byte, r *readBuf) { - r = &readBuf{} - t = cn.recv1Buf(r) - return t, r -} - -func (cn *conn) ssl(o values) error { - upgrade, err := ssl(o) - if err != nil { - return err - } - - if upgrade == nil { - // Nothing to do - return nil - } - - w := cn.writeBuf(0) - w.int32(80877103) - if err = cn.sendStartupPacket(w); err != nil { - return err - } - - b := cn.scratch[:1] - _, err = io.ReadFull(cn.c, b) - if err != nil { - return err - } - - if b[0] != 'S' { - return ErrSSLNotSupported - } - - cn.c, err = upgrade(cn.c) - return err -} - -// isDriverSetting returns true iff a setting is purely for configuring the -// driver's options and should not be sent to the server in the connection -// startup packet. -func isDriverSetting(key string) bool { - switch key { - case "host", "port": - return true - case "password": - return true - case "sslmode", "sslcert", "sslkey", "sslrootcert", "sslinline", "sslsni": - return true - case "fallback_application_name": - return true - case "connect_timeout": - return true - case "disable_prepared_binary_result": - return true - case "binary_parameters": - return true - case "krbsrvname": - return true - case "krbspn": - return true - default: - return false - } -} - -func (cn *conn) startup(o values) { - w := cn.writeBuf(0) - w.int32(196608) - // Send the backend the name of the database we want to connect to, and the - // user we want to connect as. Additionally, we send over any run-time - // parameters potentially included in the connection string. If the server - // doesn't recognize any of them, it will reply with an error. - for k, v := range o { - if isDriverSetting(k) { - // skip options which can't be run-time parameters - continue - } - // The protocol requires us to supply the database name as "database" - // instead of "dbname". - if k == "dbname" { - k = "database" - } - w.string(k) - w.string(v) - } - w.string("") - if err := cn.sendStartupPacket(w); err != nil { - panic(err) - } - - for { - t, r := cn.recv() - switch t { - case 'K': - cn.processBackendKeyData(r) - case 'S': - cn.processParameterStatus(r) - case 'R': - cn.auth(r, o) - case 'Z': - cn.processReadyForQuery(r) - return - default: - errorf("unknown response for startup: %q", t) - } - } -} - -func (cn *conn) auth(r *readBuf, o values) { - switch code := r.int32(); code { - case 0: - // OK - case 3: - w := cn.writeBuf('p') - w.string(o["password"]) - cn.send(w) - - t, r := cn.recv() - if t != 'R' { - errorf("unexpected password response: %q", t) - } - - if r.int32() != 0 { - errorf("unexpected authentication response: %q", t) - } - case 5: - s := string(r.next(4)) - w := cn.writeBuf('p') - w.string("md5" + md5s(md5s(o["password"]+o["user"])+s)) - cn.send(w) - - t, r := cn.recv() - if t != 'R' { - errorf("unexpected password response: %q", t) - } - - if r.int32() != 0 { - errorf("unexpected authentication response: %q", t) - } - case 7: // GSSAPI, startup - if newGss == nil { - errorf("kerberos error: no GSSAPI provider registered (import github.com/lib/pq/auth/kerberos if you need Kerberos support)") - } - cli, err := newGss() - if err != nil { - errorf("kerberos error: %s", err.Error()) - } - - var token []byte - - if spn, ok := o["krbspn"]; ok { - // Use the supplied SPN if provided.. - token, err = cli.GetInitTokenFromSpn(spn) - } else { - // Allow the kerberos service name to be overridden - service := "postgres" - if val, ok := o["krbsrvname"]; ok { - service = val - } - - token, err = cli.GetInitToken(o["host"], service) - } - - if err != nil { - errorf("failed to get Kerberos ticket: %q", err) - } - - w := cn.writeBuf('p') - w.bytes(token) - cn.send(w) - - // Store for GSSAPI continue message - cn.gss = cli - - case 8: // GSSAPI continue - - if cn.gss == nil { - errorf("GSSAPI protocol error") - } - - b := []byte(*r) - - done, tokOut, err := cn.gss.Continue(b) - if err == nil && !done { - w := cn.writeBuf('p') - w.bytes(tokOut) - cn.send(w) - } - - // Errors fall through and read the more detailed message - // from the server.. - - case 10: - sc := scram.NewClient(sha256.New, o["user"], o["password"]) - sc.Step(nil) - if sc.Err() != nil { - errorf("SCRAM-SHA-256 error: %s", sc.Err().Error()) - } - scOut := sc.Out() - - w := cn.writeBuf('p') - w.string("SCRAM-SHA-256") - w.int32(len(scOut)) - w.bytes(scOut) - cn.send(w) - - t, r := cn.recv() - if t != 'R' { - errorf("unexpected password response: %q", t) - } - - if r.int32() != 11 { - errorf("unexpected authentication response: %q", t) - } - - nextStep := r.next(len(*r)) - sc.Step(nextStep) - if sc.Err() != nil { - errorf("SCRAM-SHA-256 error: %s", sc.Err().Error()) - } - - scOut = sc.Out() - w = cn.writeBuf('p') - w.bytes(scOut) - cn.send(w) - - t, r = cn.recv() - if t != 'R' { - errorf("unexpected password response: %q", t) - } - - if r.int32() != 12 { - errorf("unexpected authentication response: %q", t) - } - - nextStep = r.next(len(*r)) - sc.Step(nextStep) - if sc.Err() != nil { - errorf("SCRAM-SHA-256 error: %s", sc.Err().Error()) - } - - default: - errorf("unknown authentication response: %d", code) - } -} - -type format int - -const formatText format = 0 -const formatBinary format = 1 - -// One result-column format code with the value 1 (i.e. all binary). -var colFmtDataAllBinary = []byte{0, 1, 0, 1} - -// No result-column format codes (i.e. all text). -var colFmtDataAllText = []byte{0, 0} - -type stmt struct { - cn *conn - name string - rowsHeader - colFmtData []byte - paramTyps []oid.Oid - closed bool -} - -func (st *stmt) Close() (err error) { - if st.closed { - return nil - } - if err := st.cn.err.get(); err != nil { - return err - } - defer st.cn.errRecover(&err) - - w := st.cn.writeBuf('C') - w.byte('S') - w.string(st.name) - st.cn.send(w) - - st.cn.send(st.cn.writeBuf('S')) - - t, _ := st.cn.recv1() - if t != '3' { - st.cn.err.set(driver.ErrBadConn) - errorf("unexpected close response: %q", t) - } - st.closed = true - - t, r := st.cn.recv1() - if t != 'Z' { - st.cn.err.set(driver.ErrBadConn) - errorf("expected ready for query, but got: %q", t) - } - st.cn.processReadyForQuery(r) - - return nil -} - -func (st *stmt) Query(v []driver.Value) (r driver.Rows, err error) { - return st.query(v) -} - -func (st *stmt) query(v []driver.Value) (r *rows, err error) { - if err := st.cn.err.get(); err != nil { - return nil, err - } - defer st.cn.errRecover(&err) - - st.exec(v) - return &rows{ - cn: st.cn, - rowsHeader: st.rowsHeader, - }, nil -} - -func (st *stmt) Exec(v []driver.Value) (res driver.Result, err error) { - if err := st.cn.err.get(); err != nil { - return nil, err - } - defer st.cn.errRecover(&err) - - st.exec(v) - res, _, err = st.cn.readExecuteResponse("simple query") - return res, err -} - -func (st *stmt) exec(v []driver.Value) { - if len(v) >= 65536 { - errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(v)) - } - if len(v) != len(st.paramTyps) { - errorf("got %d parameters but the statement requires %d", len(v), len(st.paramTyps)) - } - - cn := st.cn - w := cn.writeBuf('B') - w.byte(0) // unnamed portal - w.string(st.name) - - if cn.binaryParameters { - cn.sendBinaryParameters(w, v) - } else { - w.int16(0) - w.int16(len(v)) - for i, x := range v { - if x == nil { - w.int32(-1) - } else { - b := encode(&cn.parameterStatus, x, st.paramTyps[i]) - w.int32(len(b)) - w.bytes(b) - } - } - } - w.bytes(st.colFmtData) - - w.next('E') - w.byte(0) - w.int32(0) - - w.next('S') - cn.send(w) - - cn.readBindResponse() - cn.postExecuteWorkaround() - -} - -func (st *stmt) NumInput() int { - return len(st.paramTyps) -} - -// parseComplete parses the "command tag" from a CommandComplete message, and -// returns the number of rows affected (if applicable) and a string -// identifying only the command that was executed, e.g. "ALTER TABLE". If the -// command tag could not be parsed, parseComplete panics. -func (cn *conn) parseComplete(commandTag string) (driver.Result, string) { - commandsWithAffectedRows := []string{ - "SELECT ", - // INSERT is handled below - "UPDATE ", - "DELETE ", - "FETCH ", - "MOVE ", - "COPY ", - } - - var affectedRows *string - for _, tag := range commandsWithAffectedRows { - if strings.HasPrefix(commandTag, tag) { - t := commandTag[len(tag):] - affectedRows = &t - commandTag = tag[:len(tag)-1] - break - } - } - // INSERT also includes the oid of the inserted row in its command tag. - // Oids in user tables are deprecated, and the oid is only returned when - // exactly one row is inserted, so it's unlikely to be of value to any - // real-world application and we can ignore it. - if affectedRows == nil && strings.HasPrefix(commandTag, "INSERT ") { - parts := strings.Split(commandTag, " ") - if len(parts) != 3 { - cn.err.set(driver.ErrBadConn) - errorf("unexpected INSERT command tag %s", commandTag) - } - affectedRows = &parts[len(parts)-1] - commandTag = "INSERT" - } - // There should be no affected rows attached to the tag, just return it - if affectedRows == nil { - return driver.RowsAffected(0), commandTag - } - n, err := strconv.ParseInt(*affectedRows, 10, 64) - if err != nil { - cn.err.set(driver.ErrBadConn) - errorf("could not parse commandTag: %s", err) - } - return driver.RowsAffected(n), commandTag -} - -type rowsHeader struct { - colNames []string - colTyps []fieldDesc - colFmts []format -} - -type rows struct { - cn *conn - finish func() - rowsHeader - done bool - rb readBuf - result driver.Result - tag string - - next *rowsHeader -} - -func (rs *rows) Close() error { - if finish := rs.finish; finish != nil { - defer finish() - } - // no need to look at cn.bad as Next() will - for { - err := rs.Next(nil) - switch err { - case nil: - case io.EOF: - // rs.Next can return io.EOF on both 'Z' (ready for query) and 'T' (row - // description, used with HasNextResultSet). We need to fetch messages until - // we hit a 'Z', which is done by waiting for done to be set. - if rs.done { - return nil - } - default: - return err - } - } -} - -func (rs *rows) Columns() []string { - return rs.colNames -} - -func (rs *rows) Result() driver.Result { - if rs.result == nil { - return emptyRows - } - return rs.result -} - -func (rs *rows) Tag() string { - return rs.tag -} - -func (rs *rows) Next(dest []driver.Value) (err error) { - if rs.done { - return io.EOF - } - - conn := rs.cn - if err := conn.err.getForNext(); err != nil { - return err - } - defer conn.errRecover(&err) - - for { - t := conn.recv1Buf(&rs.rb) - switch t { - case 'E': - err = parseError(&rs.rb) - case 'C', 'I': - if t == 'C' { - rs.result, rs.tag = conn.parseComplete(rs.rb.string()) - } - continue - case 'Z': - conn.processReadyForQuery(&rs.rb) - rs.done = true - if err != nil { - return err - } - return io.EOF - case 'D': - n := rs.rb.int16() - if err != nil { - conn.err.set(driver.ErrBadConn) - errorf("unexpected DataRow after error %s", err) - } - if n < len(dest) { - dest = dest[:n] - } - for i := range dest { - l := rs.rb.int32() - if l == -1 { - dest[i] = nil - continue - } - dest[i] = decode(&conn.parameterStatus, rs.rb.next(l), rs.colTyps[i].OID, rs.colFmts[i]) - } - return - case 'T': - next := parsePortalRowDescribe(&rs.rb) - rs.next = &next - return io.EOF - default: - errorf("unexpected message after execute: %q", t) - } - } -} - -func (rs *rows) HasNextResultSet() bool { - hasNext := rs.next != nil && !rs.done - return hasNext -} - -func (rs *rows) NextResultSet() error { - if rs.next == nil { - return io.EOF - } - rs.rowsHeader = *rs.next - rs.next = nil - return nil -} - -// QuoteIdentifier quotes an "identifier" (e.g. a table or a column name) to be -// used as part of an SQL statement. For example: -// -// tblname := "my_table" -// data := "my_data" -// quoted := pq.QuoteIdentifier(tblname) -// err := db.Exec(fmt.Sprintf("INSERT INTO %s VALUES ($1)", quoted), data) -// -// Any double quotes in name will be escaped. The quoted identifier will be -// case sensitive when used in a query. If the input string contains a zero -// byte, the result will be truncated immediately before it. -func QuoteIdentifier(name string) string { - end := strings.IndexRune(name, 0) - if end > -1 { - name = name[:end] - } - return `"` + strings.Replace(name, `"`, `""`, -1) + `"` -} - -// BufferQuoteIdentifier satisfies the same purpose as QuoteIdentifier, but backed by a -// byte buffer. -func BufferQuoteIdentifier(name string, buffer *bytes.Buffer) { - end := strings.IndexRune(name, 0) - if end > -1 { - name = name[:end] - } - buffer.WriteRune('"') - buffer.WriteString(strings.Replace(name, `"`, `""`, -1)) - buffer.WriteRune('"') -} - -// QuoteLiteral quotes a 'literal' (e.g. a parameter, often used to pass literal -// to DDL and other statements that do not accept parameters) to be used as part -// of an SQL statement. For example: -// -// exp_date := pq.QuoteLiteral("2023-01-05 15:00:00Z") -// err := db.Exec(fmt.Sprintf("CREATE ROLE my_user VALID UNTIL %s", exp_date)) -// -// Any single quotes in name will be escaped. Any backslashes (i.e. "\") will be -// replaced by two backslashes (i.e. "\\") and the C-style escape identifier -// that PostgreSQL provides ('E') will be prepended to the string. -func QuoteLiteral(literal string) string { - // This follows the PostgreSQL internal algorithm for handling quoted literals - // from libpq, which can be found in the "PQEscapeStringInternal" function, - // which is found in the libpq/fe-exec.c source file: - // https://git.postgresql.org/gitweb/?p=postgresql.git;a=blob;f=src/interfaces/libpq/fe-exec.c - // - // substitute any single-quotes (') with two single-quotes ('') - literal = strings.Replace(literal, `'`, `''`, -1) - // determine if the string has any backslashes (\) in it. - // if it does, replace any backslashes (\) with two backslashes (\\) - // then, we need to wrap the entire string with a PostgreSQL - // C-style escape. Per how "PQEscapeStringInternal" handles this case, we - // also add a space before the "E" - if strings.Contains(literal, `\`) { - literal = strings.Replace(literal, `\`, `\\`, -1) - literal = ` E'` + literal + `'` - } else { - // otherwise, we can just wrap the literal with a pair of single quotes - literal = `'` + literal + `'` - } - return literal -} - -func md5s(s string) string { - h := md5.New() - h.Write([]byte(s)) - return fmt.Sprintf("%x", h.Sum(nil)) -} - -func (cn *conn) sendBinaryParameters(b *writeBuf, args []driver.Value) { - // Do one pass over the parameters to see if we're going to send any of - // them over in binary. If we are, create a paramFormats array at the - // same time. - var paramFormats []int - for i, x := range args { - _, ok := x.([]byte) - if ok { - if paramFormats == nil { - paramFormats = make([]int, len(args)) - } - paramFormats[i] = 1 - } - } - if paramFormats == nil { - b.int16(0) - } else { - b.int16(len(paramFormats)) - for _, x := range paramFormats { - b.int16(x) - } - } - - b.int16(len(args)) - for _, x := range args { - if x == nil { - b.int32(-1) - } else { - datum := binaryEncode(&cn.parameterStatus, x) - b.int32(len(datum)) - b.bytes(datum) - } - } -} - -func (cn *conn) sendBinaryModeQuery(query string, args []driver.Value) { - if len(args) >= 65536 { - errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(args)) - } - - b := cn.writeBuf('P') - b.byte(0) // unnamed statement - b.string(query) - b.int16(0) - - b.next('B') - b.int16(0) // unnamed portal and statement - cn.sendBinaryParameters(b, args) - b.bytes(colFmtDataAllText) - - b.next('D') - b.byte('P') - b.byte(0) // unnamed portal - - b.next('E') - b.byte(0) - b.int32(0) - - b.next('S') - cn.send(b) -} - -func (cn *conn) processParameterStatus(r *readBuf) { - var err error - - param := r.string() - switch param { - case "server_version": - var major1 int - var major2 int - _, err = fmt.Sscanf(r.string(), "%d.%d", &major1, &major2) - if err == nil { - cn.parameterStatus.serverVersion = major1*10000 + major2*100 - } - - case "TimeZone": - cn.parameterStatus.currentLocation, err = time.LoadLocation(r.string()) - if err != nil { - cn.parameterStatus.currentLocation = nil - } - - default: - // ignore - } -} - -func (cn *conn) processReadyForQuery(r *readBuf) { - cn.txnStatus = transactionStatus(r.byte()) -} - -func (cn *conn) readReadyForQuery() { - t, r := cn.recv1() - switch t { - case 'Z': - cn.processReadyForQuery(r) - return - default: - cn.err.set(driver.ErrBadConn) - errorf("unexpected message %q; expected ReadyForQuery", t) - } -} - -func (cn *conn) processBackendKeyData(r *readBuf) { - cn.processID = r.int32() - cn.secretKey = r.int32() -} - -func (cn *conn) readParseResponse() { - t, r := cn.recv1() - switch t { - case '1': - return - case 'E': - err := parseError(r) - cn.readReadyForQuery() - panic(err) - default: - cn.err.set(driver.ErrBadConn) - errorf("unexpected Parse response %q", t) - } -} - -func (cn *conn) readStatementDescribeResponse() ( - paramTyps []oid.Oid, - colNames []string, - colTyps []fieldDesc, -) { - for { - t, r := cn.recv1() - switch t { - case 't': - nparams := r.int16() - paramTyps = make([]oid.Oid, nparams) - for i := range paramTyps { - paramTyps[i] = r.oid() - } - case 'n': - return paramTyps, nil, nil - case 'T': - colNames, colTyps = parseStatementRowDescribe(r) - return paramTyps, colNames, colTyps - case 'E': - err := parseError(r) - cn.readReadyForQuery() - panic(err) - default: - cn.err.set(driver.ErrBadConn) - errorf("unexpected Describe statement response %q", t) - } - } -} - -func (cn *conn) readPortalDescribeResponse() rowsHeader { - t, r := cn.recv1() - switch t { - case 'T': - return parsePortalRowDescribe(r) - case 'n': - return rowsHeader{} - case 'E': - err := parseError(r) - cn.readReadyForQuery() - panic(err) - default: - cn.err.set(driver.ErrBadConn) - errorf("unexpected Describe response %q", t) - } - panic("not reached") -} - -func (cn *conn) readBindResponse() { - t, r := cn.recv1() - switch t { - case '2': - return - case 'E': - err := parseError(r) - cn.readReadyForQuery() - panic(err) - default: - cn.err.set(driver.ErrBadConn) - errorf("unexpected Bind response %q", t) - } -} - -func (cn *conn) postExecuteWorkaround() { - // Work around a bug in sql.DB.QueryRow: in Go 1.2 and earlier it ignores - // any errors from rows.Next, which masks errors that happened during the - // execution of the query. To avoid the problem in common cases, we wait - // here for one more message from the database. If it's not an error the - // query will likely succeed (or perhaps has already, if it's a - // CommandComplete), so we push the message into the conn struct; recv1 - // will return it as the next message for rows.Next or rows.Close. - // However, if it's an error, we wait until ReadyForQuery and then return - // the error to our caller. - for { - t, r := cn.recv1() - switch t { - case 'E': - err := parseError(r) - cn.readReadyForQuery() - panic(err) - case 'C', 'D', 'I': - // the query didn't fail, but we can't process this message - cn.saveMessage(t, r) - return - default: - cn.err.set(driver.ErrBadConn) - errorf("unexpected message during extended query execution: %q", t) - } - } -} - -// Only for Exec(), since we ignore the returned data -func (cn *conn) readExecuteResponse( - protocolState string, -) (res driver.Result, commandTag string, err error) { - for { - t, r := cn.recv1() - switch t { - case 'C': - if err != nil { - cn.err.set(driver.ErrBadConn) - errorf("unexpected CommandComplete after error %s", err) - } - res, commandTag = cn.parseComplete(r.string()) - case 'Z': - cn.processReadyForQuery(r) - if res == nil && err == nil { - err = errUnexpectedReady - } - return res, commandTag, err - case 'E': - err = parseError(r) - case 'T', 'D', 'I': - if err != nil { - cn.err.set(driver.ErrBadConn) - errorf("unexpected %q after error %s", t, err) - } - if t == 'I' { - res = emptyRows - } - // ignore any results - default: - cn.err.set(driver.ErrBadConn) - errorf("unknown %s response: %q", protocolState, t) - } - } -} - -func parseStatementRowDescribe(r *readBuf) (colNames []string, colTyps []fieldDesc) { - n := r.int16() - colNames = make([]string, n) - colTyps = make([]fieldDesc, n) - for i := range colNames { - colNames[i] = r.string() - r.next(6) - colTyps[i].OID = r.oid() - colTyps[i].Len = r.int16() - colTyps[i].Mod = r.int32() - // format code not known when describing a statement; always 0 - r.next(2) - } - return -} - -func parsePortalRowDescribe(r *readBuf) rowsHeader { - n := r.int16() - colNames := make([]string, n) - colFmts := make([]format, n) - colTyps := make([]fieldDesc, n) - for i := range colNames { - colNames[i] = r.string() - r.next(6) - colTyps[i].OID = r.oid() - colTyps[i].Len = r.int16() - colTyps[i].Mod = r.int32() - colFmts[i] = format(r.int16()) - } - return rowsHeader{ - colNames: colNames, - colFmts: colFmts, - colTyps: colTyps, - } -} - -// parseEnviron tries to mimic some of libpq's environment handling -// -// To ease testing, it does not directly reference os.Environ, but is -// designed to accept its output. -// -// Environment-set connection information is intended to have a higher -// precedence than a library default but lower than any explicitly -// passed information (such as in the URL or connection string). -func parseEnviron(env []string) (out map[string]string) { - out = make(map[string]string) - - for _, v := range env { - parts := strings.SplitN(v, "=", 2) - - accrue := func(keyname string) { - out[keyname] = parts[1] - } - unsupported := func() { - panic(fmt.Sprintf("setting %v not supported", parts[0])) - } - - // The order of these is the same as is seen in the - // PostgreSQL 9.1 manual. Unsupported but well-defined - // keys cause a panic; these should be unset prior to - // execution. Options which pq expects to be set to a - // certain value are allowed, but must be set to that - // value if present (they can, of course, be absent). - switch parts[0] { - case "PGHOST": - accrue("host") - case "PGHOSTADDR": - unsupported() - case "PGPORT": - accrue("port") - case "PGDATABASE": - accrue("dbname") - case "PGUSER": - accrue("user") - case "PGPASSWORD": - accrue("password") - case "PGSERVICE", "PGSERVICEFILE", "PGREALM": - unsupported() - case "PGOPTIONS": - accrue("options") - case "PGAPPNAME": - accrue("application_name") - case "PGSSLMODE": - accrue("sslmode") - case "PGSSLCERT": - accrue("sslcert") - case "PGSSLKEY": - accrue("sslkey") - case "PGSSLROOTCERT": - accrue("sslrootcert") - case "PGSSLSNI": - accrue("sslsni") - case "PGREQUIRESSL", "PGSSLCRL": - unsupported() - case "PGREQUIREPEER": - unsupported() - case "PGKRBSRVNAME", "PGGSSLIB": - unsupported() - case "PGCONNECT_TIMEOUT": - accrue("connect_timeout") - case "PGCLIENTENCODING": - accrue("client_encoding") - case "PGDATESTYLE": - accrue("datestyle") - case "PGTZ": - accrue("timezone") - case "PGGEQO": - accrue("geqo") - case "PGSYSCONFDIR", "PGLOCALEDIR": - unsupported() - } - } - - return out -} - -// isUTF8 returns whether name is a fuzzy variation of the string "UTF-8". -func isUTF8(name string) bool { - // Recognize all sorts of silly things as "UTF-8", like Postgres does - s := strings.Map(alnumLowerASCII, name) - return s == "utf8" || s == "unicode" -} - -func alnumLowerASCII(ch rune) rune { - if 'A' <= ch && ch <= 'Z' { - return ch + ('a' - 'A') - } - if 'a' <= ch && ch <= 'z' || '0' <= ch && ch <= '9' { - return ch - } - return -1 // discard -} - -// The database/sql/driver package says: -// All Conn implementations should implement the following interfaces: Pinger, SessionResetter, and Validator. -var _ driver.Pinger = &conn{} -var _ driver.SessionResetter = &conn{} - -func (cn *conn) ResetSession(ctx context.Context) error { - // Ensure bad connections are reported: From database/sql/driver: - // If a connection is never returned to the connection pool but immediately reused, then - // ResetSession is called prior to reuse but IsValid is not called. - return cn.err.get() -} - -func (cn *conn) IsValid() bool { - return cn.err.get() == nil -} diff --git a/vendor/github.com/lib/pq/conn_go115.go b/vendor/github.com/lib/pq/conn_go115.go deleted file mode 100644 index f4ef030f..00000000 --- a/vendor/github.com/lib/pq/conn_go115.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build go1.15 -// +build go1.15 - -package pq - -import "database/sql/driver" - -var _ driver.Validator = &conn{} diff --git a/vendor/github.com/lib/pq/conn_go18.go b/vendor/github.com/lib/pq/conn_go18.go deleted file mode 100644 index 63d4ca6a..00000000 --- a/vendor/github.com/lib/pq/conn_go18.go +++ /dev/null @@ -1,247 +0,0 @@ -package pq - -import ( - "context" - "database/sql" - "database/sql/driver" - "fmt" - "io" - "io/ioutil" - "time" -) - -const ( - watchCancelDialContextTimeout = time.Second * 10 -) - -// Implement the "QueryerContext" interface -func (cn *conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { - list := make([]driver.Value, len(args)) - for i, nv := range args { - list[i] = nv.Value - } - finish := cn.watchCancel(ctx) - r, err := cn.query(query, list) - if err != nil { - if finish != nil { - finish() - } - return nil, err - } - r.finish = finish - return r, nil -} - -// Implement the "ExecerContext" interface -func (cn *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { - list := make([]driver.Value, len(args)) - for i, nv := range args { - list[i] = nv.Value - } - - if finish := cn.watchCancel(ctx); finish != nil { - defer finish() - } - - return cn.Exec(query, list) -} - -// Implement the "ConnPrepareContext" interface -func (cn *conn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { - if finish := cn.watchCancel(ctx); finish != nil { - defer finish() - } - return cn.Prepare(query) -} - -// Implement the "ConnBeginTx" interface -func (cn *conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { - var mode string - - switch sql.IsolationLevel(opts.Isolation) { - case sql.LevelDefault: - // Don't touch mode: use the server's default - case sql.LevelReadUncommitted: - mode = " ISOLATION LEVEL READ UNCOMMITTED" - case sql.LevelReadCommitted: - mode = " ISOLATION LEVEL READ COMMITTED" - case sql.LevelRepeatableRead: - mode = " ISOLATION LEVEL REPEATABLE READ" - case sql.LevelSerializable: - mode = " ISOLATION LEVEL SERIALIZABLE" - default: - return nil, fmt.Errorf("pq: isolation level not supported: %d", opts.Isolation) - } - - if opts.ReadOnly { - mode += " READ ONLY" - } else { - mode += " READ WRITE" - } - - tx, err := cn.begin(mode) - if err != nil { - return nil, err - } - cn.txnFinish = cn.watchCancel(ctx) - return tx, nil -} - -func (cn *conn) Ping(ctx context.Context) error { - if finish := cn.watchCancel(ctx); finish != nil { - defer finish() - } - rows, err := cn.simpleQuery(";") - if err != nil { - return driver.ErrBadConn // https://golang.org/pkg/database/sql/driver/#Pinger - } - rows.Close() - return nil -} - -func (cn *conn) watchCancel(ctx context.Context) func() { - if done := ctx.Done(); done != nil { - finished := make(chan struct{}, 1) - go func() { - select { - case <-done: - select { - case finished <- struct{}{}: - default: - // We raced with the finish func, let the next query handle this with the - // context. - return - } - - // Set the connection state to bad so it does not get reused. - cn.err.set(ctx.Err()) - - // At this point the function level context is canceled, - // so it must not be used for the additional network - // request to cancel the query. - // Create a new context to pass into the dial. - ctxCancel, cancel := context.WithTimeout(context.Background(), watchCancelDialContextTimeout) - defer cancel() - - _ = cn.cancel(ctxCancel) - case <-finished: - } - }() - return func() { - select { - case <-finished: - cn.err.set(ctx.Err()) - cn.Close() - case finished <- struct{}{}: - } - } - } - return nil -} - -func (cn *conn) cancel(ctx context.Context) error { - // Create a new values map (copy). This makes sure the connection created - // in this method cannot write to the same underlying data, which could - // cause a concurrent map write panic. This is necessary because cancel - // is called from a goroutine in watchCancel. - o := make(values) - for k, v := range cn.opts { - o[k] = v - } - - c, err := dial(ctx, cn.dialer, o) - if err != nil { - return err - } - defer c.Close() - - { - can := conn{ - c: c, - } - err = can.ssl(o) - if err != nil { - return err - } - - w := can.writeBuf(0) - w.int32(80877102) // cancel request code - w.int32(cn.processID) - w.int32(cn.secretKey) - - if err := can.sendStartupPacket(w); err != nil { - return err - } - } - - // Read until EOF to ensure that the server received the cancel. - { - _, err := io.Copy(ioutil.Discard, c) - return err - } -} - -// Implement the "StmtQueryContext" interface -func (st *stmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) { - list := make([]driver.Value, len(args)) - for i, nv := range args { - list[i] = nv.Value - } - finish := st.watchCancel(ctx) - r, err := st.query(list) - if err != nil { - if finish != nil { - finish() - } - return nil, err - } - r.finish = finish - return r, nil -} - -// Implement the "StmtExecContext" interface -func (st *stmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) { - list := make([]driver.Value, len(args)) - for i, nv := range args { - list[i] = nv.Value - } - - if finish := st.watchCancel(ctx); finish != nil { - defer finish() - } - - return st.Exec(list) -} - -// watchCancel is implemented on stmt in order to not mark the parent conn as bad -func (st *stmt) watchCancel(ctx context.Context) func() { - if done := ctx.Done(); done != nil { - finished := make(chan struct{}) - go func() { - select { - case <-done: - // At this point the function level context is canceled, - // so it must not be used for the additional network - // request to cancel the query. - // Create a new context to pass into the dial. - ctxCancel, cancel := context.WithTimeout(context.Background(), watchCancelDialContextTimeout) - defer cancel() - - _ = st.cancel(ctxCancel) - finished <- struct{}{} - case <-finished: - } - }() - return func() { - select { - case <-finished: - case finished <- struct{}{}: - } - } - } - return nil -} - -func (st *stmt) cancel(ctx context.Context) error { - return st.cn.cancel(ctx) -} diff --git a/vendor/github.com/lib/pq/connector.go b/vendor/github.com/lib/pq/connector.go deleted file mode 100644 index 1145e122..00000000 --- a/vendor/github.com/lib/pq/connector.go +++ /dev/null @@ -1,120 +0,0 @@ -package pq - -import ( - "context" - "database/sql/driver" - "errors" - "fmt" - "os" - "strings" -) - -// Connector represents a fixed configuration for the pq driver with a given -// name. Connector satisfies the database/sql/driver Connector interface and -// can be used to create any number of DB Conn's via the database/sql OpenDB -// function. -// -// See https://golang.org/pkg/database/sql/driver/#Connector. -// See https://golang.org/pkg/database/sql/#OpenDB. -type Connector struct { - opts values - dialer Dialer -} - -// Connect returns a connection to the database using the fixed configuration -// of this Connector. Context is not used. -func (c *Connector) Connect(ctx context.Context) (driver.Conn, error) { - return c.open(ctx) -} - -// Dialer allows change the dialer used to open connections. -func (c *Connector) Dialer(dialer Dialer) { - c.dialer = dialer -} - -// Driver returns the underlying driver of this Connector. -func (c *Connector) Driver() driver.Driver { - return &Driver{} -} - -// NewConnector returns a connector for the pq driver in a fixed configuration -// with the given dsn. The returned connector can be used to create any number -// of equivalent Conn's. The returned connector is intended to be used with -// database/sql.OpenDB. -// -// See https://golang.org/pkg/database/sql/driver/#Connector. -// See https://golang.org/pkg/database/sql/#OpenDB. -func NewConnector(dsn string) (*Connector, error) { - var err error - o := make(values) - - // A number of defaults are applied here, in this order: - // - // * Very low precedence defaults applied in every situation - // * Environment variables - // * Explicitly passed connection information - o["host"] = "localhost" - o["port"] = "5432" - // N.B.: Extra float digits should be set to 3, but that breaks - // Postgres 8.4 and older, where the max is 2. - o["extra_float_digits"] = "2" - for k, v := range parseEnviron(os.Environ()) { - o[k] = v - } - - if strings.HasPrefix(dsn, "postgres://") || strings.HasPrefix(dsn, "postgresql://") { - dsn, err = ParseURL(dsn) - if err != nil { - return nil, err - } - } - - if err := parseOpts(dsn, o); err != nil { - return nil, err - } - - // Use the "fallback" application name if necessary - if fallback, ok := o["fallback_application_name"]; ok { - if _, ok := o["application_name"]; !ok { - o["application_name"] = fallback - } - } - - // We can't work with any client_encoding other than UTF-8 currently. - // However, we have historically allowed the user to set it to UTF-8 - // explicitly, and there's no reason to break such programs, so allow that. - // Note that the "options" setting could also set client_encoding, but - // parsing its value is not worth it. Instead, we always explicitly send - // client_encoding as a separate run-time parameter, which should override - // anything set in options. - if enc, ok := o["client_encoding"]; ok && !isUTF8(enc) { - return nil, errors.New("client_encoding must be absent or 'UTF8'") - } - o["client_encoding"] = "UTF8" - // DateStyle needs a similar treatment. - if datestyle, ok := o["datestyle"]; ok { - if datestyle != "ISO, MDY" { - return nil, fmt.Errorf("setting datestyle must be absent or %v; got %v", "ISO, MDY", datestyle) - } - } else { - o["datestyle"] = "ISO, MDY" - } - - // If a user is not provided by any other means, the last - // resort is to use the current operating system provided user - // name. - if _, ok := o["user"]; !ok { - u, err := userCurrent() - if err != nil { - return nil, err - } - o["user"] = u - } - - // SSL is not necessary or supported over UNIX domain sockets - if network, _ := network(o); network == "unix" { - o["sslmode"] = "disable" - } - - return &Connector{opts: o, dialer: defaultDialer{}}, nil -} diff --git a/vendor/github.com/lib/pq/copy.go b/vendor/github.com/lib/pq/copy.go deleted file mode 100644 index a8f16b2b..00000000 --- a/vendor/github.com/lib/pq/copy.go +++ /dev/null @@ -1,348 +0,0 @@ -package pq - -import ( - "bytes" - "context" - "database/sql/driver" - "encoding/binary" - "errors" - "fmt" - "sync" -) - -var ( - errCopyInClosed = errors.New("pq: copyin statement has already been closed") - errBinaryCopyNotSupported = errors.New("pq: only text format supported for COPY") - errCopyToNotSupported = errors.New("pq: COPY TO is not supported") - errCopyNotSupportedOutsideTxn = errors.New("pq: COPY is only allowed inside a transaction") - errCopyInProgress = errors.New("pq: COPY in progress") -) - -// CopyIn creates a COPY FROM statement which can be prepared with -// Tx.Prepare(). The target table should be visible in search_path. -func CopyIn(table string, columns ...string) string { - buffer := bytes.NewBufferString("COPY ") - BufferQuoteIdentifier(table, buffer) - buffer.WriteString(" (") - makeStmt(buffer, columns...) - return buffer.String() -} - -// MakeStmt makes the stmt string for CopyIn and CopyInSchema. -func makeStmt(buffer *bytes.Buffer, columns ...string) { - //s := bytes.NewBufferString() - for i, col := range columns { - if i != 0 { - buffer.WriteString(", ") - } - BufferQuoteIdentifier(col, buffer) - } - buffer.WriteString(") FROM STDIN") -} - -// CopyInSchema creates a COPY FROM statement which can be prepared with -// Tx.Prepare(). -func CopyInSchema(schema, table string, columns ...string) string { - buffer := bytes.NewBufferString("COPY ") - BufferQuoteIdentifier(schema, buffer) - buffer.WriteRune('.') - BufferQuoteIdentifier(table, buffer) - buffer.WriteString(" (") - makeStmt(buffer, columns...) - return buffer.String() -} - -type copyin struct { - cn *conn - buffer []byte - rowData chan []byte - done chan bool - - closed bool - - mu struct { - sync.Mutex - err error - driver.Result - } -} - -const ciBufferSize = 64 * 1024 - -// flush buffer before the buffer is filled up and needs reallocation -const ciBufferFlushSize = 63 * 1024 - -func (cn *conn) prepareCopyIn(q string) (_ driver.Stmt, err error) { - if !cn.isInTransaction() { - return nil, errCopyNotSupportedOutsideTxn - } - - ci := ©in{ - cn: cn, - buffer: make([]byte, 0, ciBufferSize), - rowData: make(chan []byte), - done: make(chan bool, 1), - } - // add CopyData identifier + 4 bytes for message length - ci.buffer = append(ci.buffer, 'd', 0, 0, 0, 0) - - b := cn.writeBuf('Q') - b.string(q) - cn.send(b) - -awaitCopyInResponse: - for { - t, r := cn.recv1() - switch t { - case 'G': - if r.byte() != 0 { - err = errBinaryCopyNotSupported - break awaitCopyInResponse - } - go ci.resploop() - return ci, nil - case 'H': - err = errCopyToNotSupported - break awaitCopyInResponse - case 'E': - err = parseError(r) - case 'Z': - if err == nil { - ci.setBad(driver.ErrBadConn) - errorf("unexpected ReadyForQuery in response to COPY") - } - cn.processReadyForQuery(r) - return nil, err - default: - ci.setBad(driver.ErrBadConn) - errorf("unknown response for copy query: %q", t) - } - } - - // something went wrong, abort COPY before we return - b = cn.writeBuf('f') - b.string(err.Error()) - cn.send(b) - - for { - t, r := cn.recv1() - switch t { - case 'c', 'C', 'E': - case 'Z': - // correctly aborted, we're done - cn.processReadyForQuery(r) - return nil, err - default: - ci.setBad(driver.ErrBadConn) - errorf("unknown response for CopyFail: %q", t) - } - } -} - -func (ci *copyin) flush(buf []byte) { - // set message length (without message identifier) - binary.BigEndian.PutUint32(buf[1:], uint32(len(buf)-1)) - - _, err := ci.cn.c.Write(buf) - if err != nil { - panic(err) - } -} - -func (ci *copyin) resploop() { - for { - var r readBuf - t, err := ci.cn.recvMessage(&r) - if err != nil { - ci.setBad(driver.ErrBadConn) - ci.setError(err) - ci.done <- true - return - } - switch t { - case 'C': - // complete - res, _ := ci.cn.parseComplete(r.string()) - ci.setResult(res) - case 'N': - if n := ci.cn.noticeHandler; n != nil { - n(parseError(&r)) - } - case 'Z': - ci.cn.processReadyForQuery(&r) - ci.done <- true - return - case 'E': - err := parseError(&r) - ci.setError(err) - default: - ci.setBad(driver.ErrBadConn) - ci.setError(fmt.Errorf("unknown response during CopyIn: %q", t)) - ci.done <- true - return - } - } -} - -func (ci *copyin) setBad(err error) { - ci.cn.err.set(err) -} - -func (ci *copyin) getBad() error { - return ci.cn.err.get() -} - -func (ci *copyin) err() error { - ci.mu.Lock() - err := ci.mu.err - ci.mu.Unlock() - return err -} - -// setError() sets ci.err if one has not been set already. Caller must not be -// holding ci.Mutex. -func (ci *copyin) setError(err error) { - ci.mu.Lock() - if ci.mu.err == nil { - ci.mu.err = err - } - ci.mu.Unlock() -} - -func (ci *copyin) setResult(result driver.Result) { - ci.mu.Lock() - ci.mu.Result = result - ci.mu.Unlock() -} - -func (ci *copyin) getResult() driver.Result { - ci.mu.Lock() - result := ci.mu.Result - ci.mu.Unlock() - if result == nil { - return driver.RowsAffected(0) - } - return result -} - -func (ci *copyin) NumInput() int { - return -1 -} - -func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) { - return nil, ErrNotSupported -} - -// Exec inserts values into the COPY stream. The insert is asynchronous -// and Exec can return errors from previous Exec calls to the same -// COPY stmt. -// -// You need to call Exec(nil) to sync the COPY stream and to get any -// errors from pending data, since Stmt.Close() doesn't return errors -// to the user. -func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) { - if ci.closed { - return nil, errCopyInClosed - } - - if err := ci.getBad(); err != nil { - return nil, err - } - defer ci.cn.errRecover(&err) - - if err := ci.err(); err != nil { - return nil, err - } - - if len(v) == 0 { - if err := ci.Close(); err != nil { - return driver.RowsAffected(0), err - } - - return ci.getResult(), nil - } - - numValues := len(v) - for i, value := range v { - ci.buffer = appendEncodedText(&ci.cn.parameterStatus, ci.buffer, value) - if i < numValues-1 { - ci.buffer = append(ci.buffer, '\t') - } - } - - ci.buffer = append(ci.buffer, '\n') - - if len(ci.buffer) > ciBufferFlushSize { - ci.flush(ci.buffer) - // reset buffer, keep bytes for message identifier and length - ci.buffer = ci.buffer[:5] - } - - return driver.RowsAffected(0), nil -} - -// CopyData inserts a raw string into the COPY stream. The insert is -// asynchronous and CopyData can return errors from previous CopyData calls to -// the same COPY stmt. -// -// You need to call Exec(nil) to sync the COPY stream and to get any -// errors from pending data, since Stmt.Close() doesn't return errors -// to the user. -func (ci *copyin) CopyData(ctx context.Context, line string) (r driver.Result, err error) { - if ci.closed { - return nil, errCopyInClosed - } - - if finish := ci.cn.watchCancel(ctx); finish != nil { - defer finish() - } - - if err := ci.getBad(); err != nil { - return nil, err - } - defer ci.cn.errRecover(&err) - - if err := ci.err(); err != nil { - return nil, err - } - - ci.buffer = append(ci.buffer, []byte(line)...) - ci.buffer = append(ci.buffer, '\n') - - if len(ci.buffer) > ciBufferFlushSize { - ci.flush(ci.buffer) - // reset buffer, keep bytes for message identifier and length - ci.buffer = ci.buffer[:5] - } - - return driver.RowsAffected(0), nil -} - -func (ci *copyin) Close() (err error) { - if ci.closed { // Don't do anything, we're already closed - return nil - } - ci.closed = true - - if err := ci.getBad(); err != nil { - return err - } - defer ci.cn.errRecover(&err) - - if len(ci.buffer) > 0 { - ci.flush(ci.buffer) - } - // Avoid touching the scratch buffer as resploop could be using it. - err = ci.cn.sendSimpleMessage('c') - if err != nil { - return err - } - - <-ci.done - ci.cn.inCopy = false - - if err := ci.err(); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/lib/pq/doc.go b/vendor/github.com/lib/pq/doc.go deleted file mode 100644 index b5718480..00000000 --- a/vendor/github.com/lib/pq/doc.go +++ /dev/null @@ -1,268 +0,0 @@ -/* -Package pq is a pure Go Postgres driver for the database/sql package. - -In most cases clients will use the database/sql package instead of -using this package directly. For example: - - import ( - "database/sql" - - _ "github.com/lib/pq" - ) - - func main() { - connStr := "user=pqgotest dbname=pqgotest sslmode=verify-full" - db, err := sql.Open("postgres", connStr) - if err != nil { - log.Fatal(err) - } - - age := 21 - rows, err := db.Query("SELECT name FROM users WHERE age = $1", age) - … - } - -You can also connect to a database using a URL. For example: - - connStr := "postgres://pqgotest:password@localhost/pqgotest?sslmode=verify-full" - db, err := sql.Open("postgres", connStr) - - -Connection String Parameters - - -Similarly to libpq, when establishing a connection using pq you are expected to -supply a connection string containing zero or more parameters. -A subset of the connection parameters supported by libpq are also supported by pq. -Additionally, pq also lets you specify run-time parameters (such as search_path or work_mem) -directly in the connection string. This is different from libpq, which does not allow -run-time parameters in the connection string, instead requiring you to supply -them in the options parameter. - -For compatibility with libpq, the following special connection parameters are -supported: - - * dbname - The name of the database to connect to - * user - The user to sign in as - * password - The user's password - * host - The host to connect to. Values that start with / are for unix - domain sockets. (default is localhost) - * port - The port to bind to. (default is 5432) - * sslmode - Whether or not to use SSL (default is require, this is not - the default for libpq) - * fallback_application_name - An application_name to fall back to if one isn't provided. - * connect_timeout - Maximum wait for connection, in seconds. Zero or - not specified means wait indefinitely. - * sslcert - Cert file location. The file must contain PEM encoded data. - * sslkey - Key file location. The file must contain PEM encoded data. - * sslrootcert - The location of the root certificate file. The file - must contain PEM encoded data. - -Valid values for sslmode are: - - * disable - No SSL - * require - Always SSL (skip verification) - * verify-ca - Always SSL (verify that the certificate presented by the - server was signed by a trusted CA) - * verify-full - Always SSL (verify that the certification presented by - the server was signed by a trusted CA and the server host name - matches the one in the certificate) - -See http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING -for more information about connection string parameters. - -Use single quotes for values that contain whitespace: - - "user=pqgotest password='with spaces'" - -A backslash will escape the next character in values: - - "user=space\ man password='it\'s valid'" - -Note that the connection parameter client_encoding (which sets the -text encoding for the connection) may be set but must be "UTF8", -matching with the same rules as Postgres. It is an error to provide -any other value. - -In addition to the parameters listed above, any run-time parameter that can be -set at backend start time can be set in the connection string. For more -information, see -http://www.postgresql.org/docs/current/static/runtime-config.html. - -Most environment variables as specified at http://www.postgresql.org/docs/current/static/libpq-envars.html -supported by libpq are also supported by pq. If any of the environment -variables not supported by pq are set, pq will panic during connection -establishment. Environment variables have a lower precedence than explicitly -provided connection parameters. - -The pgpass mechanism as described in http://www.postgresql.org/docs/current/static/libpq-pgpass.html -is supported, but on Windows PGPASSFILE must be specified explicitly. - - -Queries - - -database/sql does not dictate any specific format for parameter -markers in query strings, and pq uses the Postgres-native ordinal markers, -as shown above. The same marker can be reused for the same parameter: - - rows, err := db.Query(`SELECT name FROM users WHERE favorite_fruit = $1 - OR age BETWEEN $2 AND $2 + 3`, "orange", 64) - -pq does not support the LastInsertId() method of the Result type in database/sql. -To return the identifier of an INSERT (or UPDATE or DELETE), use the Postgres -RETURNING clause with a standard Query or QueryRow call: - - var userid int - err := db.QueryRow(`INSERT INTO users(name, favorite_fruit, age) - VALUES('beatrice', 'starfruit', 93) RETURNING id`).Scan(&userid) - -For more details on RETURNING, see the Postgres documentation: - - http://www.postgresql.org/docs/current/static/sql-insert.html - http://www.postgresql.org/docs/current/static/sql-update.html - http://www.postgresql.org/docs/current/static/sql-delete.html - -For additional instructions on querying see the documentation for the database/sql package. - - -Data Types - - -Parameters pass through driver.DefaultParameterConverter before they are handled -by this package. When the binary_parameters connection option is enabled, -[]byte values are sent directly to the backend as data in binary format. - -This package returns the following types for values from the PostgreSQL backend: - - - integer types smallint, integer, and bigint are returned as int64 - - floating-point types real and double precision are returned as float64 - - character types char, varchar, and text are returned as string - - temporal types date, time, timetz, timestamp, and timestamptz are - returned as time.Time - - the boolean type is returned as bool - - the bytea type is returned as []byte - -All other types are returned directly from the backend as []byte values in text format. - - -Errors - - -pq may return errors of type *pq.Error which can be interrogated for error details: - - if err, ok := err.(*pq.Error); ok { - fmt.Println("pq error:", err.Code.Name()) - } - -See the pq.Error type for details. - - -Bulk imports - -You can perform bulk imports by preparing a statement returned by pq.CopyIn (or -pq.CopyInSchema) in an explicit transaction (sql.Tx). The returned statement -handle can then be repeatedly "executed" to copy data into the target table. -After all data has been processed you should call Exec() once with no arguments -to flush all buffered data. Any call to Exec() might return an error which -should be handled appropriately, but because of the internal buffering an error -returned by Exec() might not be related to the data passed in the call that -failed. - -CopyIn uses COPY FROM internally. It is not possible to COPY outside of an -explicit transaction in pq. - -Usage example: - - txn, err := db.Begin() - if err != nil { - log.Fatal(err) - } - - stmt, err := txn.Prepare(pq.CopyIn("users", "name", "age")) - if err != nil { - log.Fatal(err) - } - - for _, user := range users { - _, err = stmt.Exec(user.Name, int64(user.Age)) - if err != nil { - log.Fatal(err) - } - } - - _, err = stmt.Exec() - if err != nil { - log.Fatal(err) - } - - err = stmt.Close() - if err != nil { - log.Fatal(err) - } - - err = txn.Commit() - if err != nil { - log.Fatal(err) - } - - -Notifications - - -PostgreSQL supports a simple publish/subscribe model over database -connections. See http://www.postgresql.org/docs/current/static/sql-notify.html -for more information about the general mechanism. - -To start listening for notifications, you first have to open a new connection -to the database by calling NewListener. This connection can not be used for -anything other than LISTEN / NOTIFY. Calling Listen will open a "notification -channel"; once a notification channel is open, a notification generated on that -channel will effect a send on the Listener.Notify channel. A notification -channel will remain open until Unlisten is called, though connection loss might -result in some notifications being lost. To solve this problem, Listener sends -a nil pointer over the Notify channel any time the connection is re-established -following a connection loss. The application can get information about the -state of the underlying connection by setting an event callback in the call to -NewListener. - -A single Listener can safely be used from concurrent goroutines, which means -that there is often no need to create more than one Listener in your -application. However, a Listener is always connected to a single database, so -you will need to create a new Listener instance for every database you want to -receive notifications in. - -The channel name in both Listen and Unlisten is case sensitive, and can contain -any characters legal in an identifier (see -http://www.postgresql.org/docs/current/static/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS -for more information). Note that the channel name will be truncated to 63 -bytes by the PostgreSQL server. - -You can find a complete, working example of Listener usage at -https://godoc.org/github.com/lib/pq/example/listen. - - -Kerberos Support - - -If you need support for Kerberos authentication, add the following to your main -package: - - import "github.com/lib/pq/auth/kerberos" - - func init() { - pq.RegisterGSSProvider(func() (pq.Gss, error) { return kerberos.NewGSS() }) - } - -This package is in a separate module so that users who don't need Kerberos -don't have to download unnecessary dependencies. - -When imported, additional connection string parameters are supported: - - * krbsrvname - GSS (Kerberos) service name when constructing the - SPN (default is `postgres`). This will be combined with the host - to form the full SPN: `krbsrvname/host`. - * krbspn - GSS (Kerberos) SPN. This takes priority over - `krbsrvname` if present. -*/ -package pq diff --git a/vendor/github.com/lib/pq/encode.go b/vendor/github.com/lib/pq/encode.go deleted file mode 100644 index bffe6096..00000000 --- a/vendor/github.com/lib/pq/encode.go +++ /dev/null @@ -1,632 +0,0 @@ -package pq - -import ( - "bytes" - "database/sql/driver" - "encoding/binary" - "encoding/hex" - "errors" - "fmt" - "math" - "regexp" - "strconv" - "strings" - "sync" - "time" - - "github.com/lib/pq/oid" -) - -var time2400Regex = regexp.MustCompile(`^(24:00(?::00(?:\.0+)?)?)(?:[Z+-].*)?$`) - -func binaryEncode(parameterStatus *parameterStatus, x interface{}) []byte { - switch v := x.(type) { - case []byte: - return v - default: - return encode(parameterStatus, x, oid.T_unknown) - } -} - -func encode(parameterStatus *parameterStatus, x interface{}, pgtypOid oid.Oid) []byte { - switch v := x.(type) { - case int64: - return strconv.AppendInt(nil, v, 10) - case float64: - return strconv.AppendFloat(nil, v, 'f', -1, 64) - case []byte: - if pgtypOid == oid.T_bytea { - return encodeBytea(parameterStatus.serverVersion, v) - } - - return v - case string: - if pgtypOid == oid.T_bytea { - return encodeBytea(parameterStatus.serverVersion, []byte(v)) - } - - return []byte(v) - case bool: - return strconv.AppendBool(nil, v) - case time.Time: - return formatTs(v) - - default: - errorf("encode: unknown type for %T", v) - } - - panic("not reached") -} - -func decode(parameterStatus *parameterStatus, s []byte, typ oid.Oid, f format) interface{} { - switch f { - case formatBinary: - return binaryDecode(parameterStatus, s, typ) - case formatText: - return textDecode(parameterStatus, s, typ) - default: - panic("not reached") - } -} - -func binaryDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} { - switch typ { - case oid.T_bytea: - return s - case oid.T_int8: - return int64(binary.BigEndian.Uint64(s)) - case oid.T_int4: - return int64(int32(binary.BigEndian.Uint32(s))) - case oid.T_int2: - return int64(int16(binary.BigEndian.Uint16(s))) - case oid.T_uuid: - b, err := decodeUUIDBinary(s) - if err != nil { - panic(err) - } - return b - - default: - errorf("don't know how to decode binary parameter of type %d", uint32(typ)) - } - - panic("not reached") -} - -func textDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} { - switch typ { - case oid.T_char, oid.T_varchar, oid.T_text: - return string(s) - case oid.T_bytea: - b, err := parseBytea(s) - if err != nil { - errorf("%s", err) - } - return b - case oid.T_timestamptz: - return parseTs(parameterStatus.currentLocation, string(s)) - case oid.T_timestamp, oid.T_date: - return parseTs(nil, string(s)) - case oid.T_time: - return mustParse("15:04:05", typ, s) - case oid.T_timetz: - return mustParse("15:04:05-07", typ, s) - case oid.T_bool: - return s[0] == 't' - case oid.T_int8, oid.T_int4, oid.T_int2: - i, err := strconv.ParseInt(string(s), 10, 64) - if err != nil { - errorf("%s", err) - } - return i - case oid.T_float4, oid.T_float8: - // We always use 64 bit parsing, regardless of whether the input text is for - // a float4 or float8, because clients expect float64s for all float datatypes - // and returning a 32-bit parsed float64 produces lossy results. - f, err := strconv.ParseFloat(string(s), 64) - if err != nil { - errorf("%s", err) - } - return f - } - - return s -} - -// appendEncodedText encodes item in text format as required by COPY -// and appends to buf -func appendEncodedText(parameterStatus *parameterStatus, buf []byte, x interface{}) []byte { - switch v := x.(type) { - case int64: - return strconv.AppendInt(buf, v, 10) - case float64: - return strconv.AppendFloat(buf, v, 'f', -1, 64) - case []byte: - encodedBytea := encodeBytea(parameterStatus.serverVersion, v) - return appendEscapedText(buf, string(encodedBytea)) - case string: - return appendEscapedText(buf, v) - case bool: - return strconv.AppendBool(buf, v) - case time.Time: - return append(buf, formatTs(v)...) - case nil: - return append(buf, "\\N"...) - default: - errorf("encode: unknown type for %T", v) - } - - panic("not reached") -} - -func appendEscapedText(buf []byte, text string) []byte { - escapeNeeded := false - startPos := 0 - var c byte - - // check if we need to escape - for i := 0; i < len(text); i++ { - c = text[i] - if c == '\\' || c == '\n' || c == '\r' || c == '\t' { - escapeNeeded = true - startPos = i - break - } - } - if !escapeNeeded { - return append(buf, text...) - } - - // copy till first char to escape, iterate the rest - result := append(buf, text[:startPos]...) - for i := startPos; i < len(text); i++ { - c = text[i] - switch c { - case '\\': - result = append(result, '\\', '\\') - case '\n': - result = append(result, '\\', 'n') - case '\r': - result = append(result, '\\', 'r') - case '\t': - result = append(result, '\\', 't') - default: - result = append(result, c) - } - } - return result -} - -func mustParse(f string, typ oid.Oid, s []byte) time.Time { - str := string(s) - - // Check for a minute and second offset in the timezone. - if typ == oid.T_timestamptz || typ == oid.T_timetz { - for i := 3; i <= 6; i += 3 { - if str[len(str)-i] == ':' { - f += ":00" - continue - } - break - } - } - - // Special case for 24:00 time. - // Unfortunately, golang does not parse 24:00 as a proper time. - // In this case, we want to try "round to the next day", to differentiate. - // As such, we find if the 24:00 time matches at the beginning; if so, - // we default it back to 00:00 but add a day later. - var is2400Time bool - switch typ { - case oid.T_timetz, oid.T_time: - if matches := time2400Regex.FindStringSubmatch(str); matches != nil { - // Concatenate timezone information at the back. - str = "00:00:00" + str[len(matches[1]):] - is2400Time = true - } - } - t, err := time.Parse(f, str) - if err != nil { - errorf("decode: %s", err) - } - if is2400Time { - t = t.Add(24 * time.Hour) - } - return t -} - -var errInvalidTimestamp = errors.New("invalid timestamp") - -type timestampParser struct { - err error -} - -func (p *timestampParser) expect(str string, char byte, pos int) { - if p.err != nil { - return - } - if pos+1 > len(str) { - p.err = errInvalidTimestamp - return - } - if c := str[pos]; c != char && p.err == nil { - p.err = fmt.Errorf("expected '%v' at position %v; got '%v'", char, pos, c) - } -} - -func (p *timestampParser) mustAtoi(str string, begin int, end int) int { - if p.err != nil { - return 0 - } - if begin < 0 || end < 0 || begin > end || end > len(str) { - p.err = errInvalidTimestamp - return 0 - } - result, err := strconv.Atoi(str[begin:end]) - if err != nil { - if p.err == nil { - p.err = fmt.Errorf("expected number; got '%v'", str) - } - return 0 - } - return result -} - -// The location cache caches the time zones typically used by the client. -type locationCache struct { - cache map[int]*time.Location - lock sync.Mutex -} - -// All connections share the same list of timezones. Benchmarking shows that -// about 5% speed could be gained by putting the cache in the connection and -// losing the mutex, at the cost of a small amount of memory and a somewhat -// significant increase in code complexity. -var globalLocationCache = newLocationCache() - -func newLocationCache() *locationCache { - return &locationCache{cache: make(map[int]*time.Location)} -} - -// Returns the cached timezone for the specified offset, creating and caching -// it if necessary. -func (c *locationCache) getLocation(offset int) *time.Location { - c.lock.Lock() - defer c.lock.Unlock() - - location, ok := c.cache[offset] - if !ok { - location = time.FixedZone("", offset) - c.cache[offset] = location - } - - return location -} - -var infinityTsEnabled = false -var infinityTsNegative time.Time -var infinityTsPositive time.Time - -const ( - infinityTsEnabledAlready = "pq: infinity timestamp enabled already" - infinityTsNegativeMustBeSmaller = "pq: infinity timestamp: negative value must be smaller (before) than positive" -) - -// EnableInfinityTs controls the handling of Postgres' "-infinity" and -// "infinity" "timestamp"s. -// -// If EnableInfinityTs is not called, "-infinity" and "infinity" will return -// []byte("-infinity") and []byte("infinity") respectively, and potentially -// cause error "sql: Scan error on column index 0: unsupported driver -> Scan -// pair: []uint8 -> *time.Time", when scanning into a time.Time value. -// -// Once EnableInfinityTs has been called, all connections created using this -// driver will decode Postgres' "-infinity" and "infinity" for "timestamp", -// "timestamp with time zone" and "date" types to the predefined minimum and -// maximum times, respectively. When encoding time.Time values, any time which -// equals or precedes the predefined minimum time will be encoded to -// "-infinity". Any values at or past the maximum time will similarly be -// encoded to "infinity". -// -// If EnableInfinityTs is called with negative >= positive, it will panic. -// Calling EnableInfinityTs after a connection has been established results in -// undefined behavior. If EnableInfinityTs is called more than once, it will -// panic. -func EnableInfinityTs(negative time.Time, positive time.Time) { - if infinityTsEnabled { - panic(infinityTsEnabledAlready) - } - if !negative.Before(positive) { - panic(infinityTsNegativeMustBeSmaller) - } - infinityTsEnabled = true - infinityTsNegative = negative - infinityTsPositive = positive -} - -/* - * Testing might want to toggle infinityTsEnabled - */ -func disableInfinityTs() { - infinityTsEnabled = false -} - -// This is a time function specific to the Postgres default DateStyle -// setting ("ISO, MDY"), the only one we currently support. This -// accounts for the discrepancies between the parsing available with -// time.Parse and the Postgres date formatting quirks. -func parseTs(currentLocation *time.Location, str string) interface{} { - switch str { - case "-infinity": - if infinityTsEnabled { - return infinityTsNegative - } - return []byte(str) - case "infinity": - if infinityTsEnabled { - return infinityTsPositive - } - return []byte(str) - } - t, err := ParseTimestamp(currentLocation, str) - if err != nil { - panic(err) - } - return t -} - -// ParseTimestamp parses Postgres' text format. It returns a time.Time in -// currentLocation iff that time's offset agrees with the offset sent from the -// Postgres server. Otherwise, ParseTimestamp returns a time.Time with the -// fixed offset offset provided by the Postgres server. -func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, error) { - p := timestampParser{} - - monSep := strings.IndexRune(str, '-') - // this is Gregorian year, not ISO Year - // In Gregorian system, the year 1 BC is followed by AD 1 - year := p.mustAtoi(str, 0, monSep) - daySep := monSep + 3 - month := p.mustAtoi(str, monSep+1, daySep) - p.expect(str, '-', daySep) - timeSep := daySep + 3 - day := p.mustAtoi(str, daySep+1, timeSep) - - minLen := monSep + len("01-01") + 1 - - isBC := strings.HasSuffix(str, " BC") - if isBC { - minLen += 3 - } - - var hour, minute, second int - if len(str) > minLen { - p.expect(str, ' ', timeSep) - minSep := timeSep + 3 - p.expect(str, ':', minSep) - hour = p.mustAtoi(str, timeSep+1, minSep) - secSep := minSep + 3 - p.expect(str, ':', secSep) - minute = p.mustAtoi(str, minSep+1, secSep) - secEnd := secSep + 3 - second = p.mustAtoi(str, secSep+1, secEnd) - } - remainderIdx := monSep + len("01-01 00:00:00") + 1 - // Three optional (but ordered) sections follow: the - // fractional seconds, the time zone offset, and the BC - // designation. We set them up here and adjust the other - // offsets if the preceding sections exist. - - nanoSec := 0 - tzOff := 0 - - if remainderIdx < len(str) && str[remainderIdx] == '.' { - fracStart := remainderIdx + 1 - fracOff := strings.IndexAny(str[fracStart:], "-+Z ") - if fracOff < 0 { - fracOff = len(str) - fracStart - } - fracSec := p.mustAtoi(str, fracStart, fracStart+fracOff) - nanoSec = fracSec * (1000000000 / int(math.Pow(10, float64(fracOff)))) - - remainderIdx += fracOff + 1 - } - if tzStart := remainderIdx; tzStart < len(str) && (str[tzStart] == '-' || str[tzStart] == '+') { - // time zone separator is always '-' or '+' or 'Z' (UTC is +00) - var tzSign int - switch c := str[tzStart]; c { - case '-': - tzSign = -1 - case '+': - tzSign = +1 - default: - return time.Time{}, fmt.Errorf("expected '-' or '+' at position %v; got %v", tzStart, c) - } - tzHours := p.mustAtoi(str, tzStart+1, tzStart+3) - remainderIdx += 3 - var tzMin, tzSec int - if remainderIdx < len(str) && str[remainderIdx] == ':' { - tzMin = p.mustAtoi(str, remainderIdx+1, remainderIdx+3) - remainderIdx += 3 - } - if remainderIdx < len(str) && str[remainderIdx] == ':' { - tzSec = p.mustAtoi(str, remainderIdx+1, remainderIdx+3) - remainderIdx += 3 - } - tzOff = tzSign * ((tzHours * 60 * 60) + (tzMin * 60) + tzSec) - } else if tzStart < len(str) && str[tzStart] == 'Z' { - // time zone Z separator indicates UTC is +00 - remainderIdx += 1 - } - - var isoYear int - - if isBC { - isoYear = 1 - year - remainderIdx += 3 - } else { - isoYear = year - } - if remainderIdx < len(str) { - return time.Time{}, fmt.Errorf("expected end of input, got %v", str[remainderIdx:]) - } - t := time.Date(isoYear, time.Month(month), day, - hour, minute, second, nanoSec, - globalLocationCache.getLocation(tzOff)) - - if currentLocation != nil { - // Set the location of the returned Time based on the session's - // TimeZone value, but only if the local time zone database agrees with - // the remote database on the offset. - lt := t.In(currentLocation) - _, newOff := lt.Zone() - if newOff == tzOff { - t = lt - } - } - - return t, p.err -} - -// formatTs formats t into a format postgres understands. -func formatTs(t time.Time) []byte { - if infinityTsEnabled { - // t <= -infinity : ! (t > -infinity) - if !t.After(infinityTsNegative) { - return []byte("-infinity") - } - // t >= infinity : ! (!t < infinity) - if !t.Before(infinityTsPositive) { - return []byte("infinity") - } - } - return FormatTimestamp(t) -} - -// FormatTimestamp formats t into Postgres' text format for timestamps. -func FormatTimestamp(t time.Time) []byte { - // Need to send dates before 0001 A.D. with " BC" suffix, instead of the - // minus sign preferred by Go. - // Beware, "0000" in ISO is "1 BC", "-0001" is "2 BC" and so on - bc := false - if t.Year() <= 0 { - // flip year sign, and add 1, e.g: "0" will be "1", and "-10" will be "11" - t = t.AddDate((-t.Year())*2+1, 0, 0) - bc = true - } - b := []byte(t.Format("2006-01-02 15:04:05.999999999Z07:00")) - - _, offset := t.Zone() - offset %= 60 - if offset != 0 { - // RFC3339Nano already printed the minus sign - if offset < 0 { - offset = -offset - } - - b = append(b, ':') - if offset < 10 { - b = append(b, '0') - } - b = strconv.AppendInt(b, int64(offset), 10) - } - - if bc { - b = append(b, " BC"...) - } - return b -} - -// Parse a bytea value received from the server. Both "hex" and the legacy -// "escape" format are supported. -func parseBytea(s []byte) (result []byte, err error) { - if len(s) >= 2 && bytes.Equal(s[:2], []byte("\\x")) { - // bytea_output = hex - s = s[2:] // trim off leading "\\x" - result = make([]byte, hex.DecodedLen(len(s))) - _, err := hex.Decode(result, s) - if err != nil { - return nil, err - } - } else { - // bytea_output = escape - for len(s) > 0 { - if s[0] == '\\' { - // escaped '\\' - if len(s) >= 2 && s[1] == '\\' { - result = append(result, '\\') - s = s[2:] - continue - } - - // '\\' followed by an octal number - if len(s) < 4 { - return nil, fmt.Errorf("invalid bytea sequence %v", s) - } - r, err := strconv.ParseUint(string(s[1:4]), 8, 8) - if err != nil { - return nil, fmt.Errorf("could not parse bytea value: %s", err.Error()) - } - result = append(result, byte(r)) - s = s[4:] - } else { - // We hit an unescaped, raw byte. Try to read in as many as - // possible in one go. - i := bytes.IndexByte(s, '\\') - if i == -1 { - result = append(result, s...) - break - } - result = append(result, s[:i]...) - s = s[i:] - } - } - } - - return result, nil -} - -func encodeBytea(serverVersion int, v []byte) (result []byte) { - if serverVersion >= 90000 { - // Use the hex format if we know that the server supports it - result = make([]byte, 2+hex.EncodedLen(len(v))) - result[0] = '\\' - result[1] = 'x' - hex.Encode(result[2:], v) - } else { - // .. or resort to "escape" - for _, b := range v { - if b == '\\' { - result = append(result, '\\', '\\') - } else if b < 0x20 || b > 0x7e { - result = append(result, []byte(fmt.Sprintf("\\%03o", b))...) - } else { - result = append(result, b) - } - } - } - - return result -} - -// NullTime represents a time.Time that may be null. NullTime implements the -// sql.Scanner interface so it can be used as a scan destination, similar to -// sql.NullString. -type NullTime struct { - Time time.Time - Valid bool // Valid is true if Time is not NULL -} - -// Scan implements the Scanner interface. -func (nt *NullTime) Scan(value interface{}) error { - nt.Time, nt.Valid = value.(time.Time) - return nil -} - -// Value implements the driver Valuer interface. -func (nt NullTime) Value() (driver.Value, error) { - if !nt.Valid { - return nil, nil - } - return nt.Time, nil -} diff --git a/vendor/github.com/lib/pq/error.go b/vendor/github.com/lib/pq/error.go deleted file mode 100644 index f67c5a5f..00000000 --- a/vendor/github.com/lib/pq/error.go +++ /dev/null @@ -1,523 +0,0 @@ -package pq - -import ( - "database/sql/driver" - "fmt" - "io" - "net" - "runtime" -) - -// Error severities -const ( - Efatal = "FATAL" - Epanic = "PANIC" - Ewarning = "WARNING" - Enotice = "NOTICE" - Edebug = "DEBUG" - Einfo = "INFO" - Elog = "LOG" -) - -// Error represents an error communicating with the server. -// -// See http://www.postgresql.org/docs/current/static/protocol-error-fields.html for details of the fields -type Error struct { - Severity string - Code ErrorCode - Message string - Detail string - Hint string - Position string - InternalPosition string - InternalQuery string - Where string - Schema string - Table string - Column string - DataTypeName string - Constraint string - File string - Line string - Routine string -} - -// ErrorCode is a five-character error code. -type ErrorCode string - -// Name returns a more human friendly rendering of the error code, namely the -// "condition name". -// -// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for -// details. -func (ec ErrorCode) Name() string { - return errorCodeNames[ec] -} - -// ErrorClass is only the class part of an error code. -type ErrorClass string - -// Name returns the condition name of an error class. It is equivalent to the -// condition name of the "standard" error code (i.e. the one having the last -// three characters "000"). -func (ec ErrorClass) Name() string { - return errorCodeNames[ErrorCode(ec+"000")] -} - -// Class returns the error class, e.g. "28". -// -// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for -// details. -func (ec ErrorCode) Class() ErrorClass { - return ErrorClass(ec[0:2]) -} - -// errorCodeNames is a mapping between the five-character error codes and the -// human readable "condition names". It is derived from the list at -// http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html -var errorCodeNames = map[ErrorCode]string{ - // Class 00 - Successful Completion - "00000": "successful_completion", - // Class 01 - Warning - "01000": "warning", - "0100C": "dynamic_result_sets_returned", - "01008": "implicit_zero_bit_padding", - "01003": "null_value_eliminated_in_set_function", - "01007": "privilege_not_granted", - "01006": "privilege_not_revoked", - "01004": "string_data_right_truncation", - "01P01": "deprecated_feature", - // Class 02 - No Data (this is also a warning class per the SQL standard) - "02000": "no_data", - "02001": "no_additional_dynamic_result_sets_returned", - // Class 03 - SQL Statement Not Yet Complete - "03000": "sql_statement_not_yet_complete", - // Class 08 - Connection Exception - "08000": "connection_exception", - "08003": "connection_does_not_exist", - "08006": "connection_failure", - "08001": "sqlclient_unable_to_establish_sqlconnection", - "08004": "sqlserver_rejected_establishment_of_sqlconnection", - "08007": "transaction_resolution_unknown", - "08P01": "protocol_violation", - // Class 09 - Triggered Action Exception - "09000": "triggered_action_exception", - // Class 0A - Feature Not Supported - "0A000": "feature_not_supported", - // Class 0B - Invalid Transaction Initiation - "0B000": "invalid_transaction_initiation", - // Class 0F - Locator Exception - "0F000": "locator_exception", - "0F001": "invalid_locator_specification", - // Class 0L - Invalid Grantor - "0L000": "invalid_grantor", - "0LP01": "invalid_grant_operation", - // Class 0P - Invalid Role Specification - "0P000": "invalid_role_specification", - // Class 0Z - Diagnostics Exception - "0Z000": "diagnostics_exception", - "0Z002": "stacked_diagnostics_accessed_without_active_handler", - // Class 20 - Case Not Found - "20000": "case_not_found", - // Class 21 - Cardinality Violation - "21000": "cardinality_violation", - // Class 22 - Data Exception - "22000": "data_exception", - "2202E": "array_subscript_error", - "22021": "character_not_in_repertoire", - "22008": "datetime_field_overflow", - "22012": "division_by_zero", - "22005": "error_in_assignment", - "2200B": "escape_character_conflict", - "22022": "indicator_overflow", - "22015": "interval_field_overflow", - "2201E": "invalid_argument_for_logarithm", - "22014": "invalid_argument_for_ntile_function", - "22016": "invalid_argument_for_nth_value_function", - "2201F": "invalid_argument_for_power_function", - "2201G": "invalid_argument_for_width_bucket_function", - "22018": "invalid_character_value_for_cast", - "22007": "invalid_datetime_format", - "22019": "invalid_escape_character", - "2200D": "invalid_escape_octet", - "22025": "invalid_escape_sequence", - "22P06": "nonstandard_use_of_escape_character", - "22010": "invalid_indicator_parameter_value", - "22023": "invalid_parameter_value", - "2201B": "invalid_regular_expression", - "2201W": "invalid_row_count_in_limit_clause", - "2201X": "invalid_row_count_in_result_offset_clause", - "22009": "invalid_time_zone_displacement_value", - "2200C": "invalid_use_of_escape_character", - "2200G": "most_specific_type_mismatch", - "22004": "null_value_not_allowed", - "22002": "null_value_no_indicator_parameter", - "22003": "numeric_value_out_of_range", - "2200H": "sequence_generator_limit_exceeded", - "22026": "string_data_length_mismatch", - "22001": "string_data_right_truncation", - "22011": "substring_error", - "22027": "trim_error", - "22024": "unterminated_c_string", - "2200F": "zero_length_character_string", - "22P01": "floating_point_exception", - "22P02": "invalid_text_representation", - "22P03": "invalid_binary_representation", - "22P04": "bad_copy_file_format", - "22P05": "untranslatable_character", - "2200L": "not_an_xml_document", - "2200M": "invalid_xml_document", - "2200N": "invalid_xml_content", - "2200S": "invalid_xml_comment", - "2200T": "invalid_xml_processing_instruction", - // Class 23 - Integrity Constraint Violation - "23000": "integrity_constraint_violation", - "23001": "restrict_violation", - "23502": "not_null_violation", - "23503": "foreign_key_violation", - "23505": "unique_violation", - "23514": "check_violation", - "23P01": "exclusion_violation", - // Class 24 - Invalid Cursor State - "24000": "invalid_cursor_state", - // Class 25 - Invalid Transaction State - "25000": "invalid_transaction_state", - "25001": "active_sql_transaction", - "25002": "branch_transaction_already_active", - "25008": "held_cursor_requires_same_isolation_level", - "25003": "inappropriate_access_mode_for_branch_transaction", - "25004": "inappropriate_isolation_level_for_branch_transaction", - "25005": "no_active_sql_transaction_for_branch_transaction", - "25006": "read_only_sql_transaction", - "25007": "schema_and_data_statement_mixing_not_supported", - "25P01": "no_active_sql_transaction", - "25P02": "in_failed_sql_transaction", - // Class 26 - Invalid SQL Statement Name - "26000": "invalid_sql_statement_name", - // Class 27 - Triggered Data Change Violation - "27000": "triggered_data_change_violation", - // Class 28 - Invalid Authorization Specification - "28000": "invalid_authorization_specification", - "28P01": "invalid_password", - // Class 2B - Dependent Privilege Descriptors Still Exist - "2B000": "dependent_privilege_descriptors_still_exist", - "2BP01": "dependent_objects_still_exist", - // Class 2D - Invalid Transaction Termination - "2D000": "invalid_transaction_termination", - // Class 2F - SQL Routine Exception - "2F000": "sql_routine_exception", - "2F005": "function_executed_no_return_statement", - "2F002": "modifying_sql_data_not_permitted", - "2F003": "prohibited_sql_statement_attempted", - "2F004": "reading_sql_data_not_permitted", - // Class 34 - Invalid Cursor Name - "34000": "invalid_cursor_name", - // Class 38 - External Routine Exception - "38000": "external_routine_exception", - "38001": "containing_sql_not_permitted", - "38002": "modifying_sql_data_not_permitted", - "38003": "prohibited_sql_statement_attempted", - "38004": "reading_sql_data_not_permitted", - // Class 39 - External Routine Invocation Exception - "39000": "external_routine_invocation_exception", - "39001": "invalid_sqlstate_returned", - "39004": "null_value_not_allowed", - "39P01": "trigger_protocol_violated", - "39P02": "srf_protocol_violated", - // Class 3B - Savepoint Exception - "3B000": "savepoint_exception", - "3B001": "invalid_savepoint_specification", - // Class 3D - Invalid Catalog Name - "3D000": "invalid_catalog_name", - // Class 3F - Invalid Schema Name - "3F000": "invalid_schema_name", - // Class 40 - Transaction Rollback - "40000": "transaction_rollback", - "40002": "transaction_integrity_constraint_violation", - "40001": "serialization_failure", - "40003": "statement_completion_unknown", - "40P01": "deadlock_detected", - // Class 42 - Syntax Error or Access Rule Violation - "42000": "syntax_error_or_access_rule_violation", - "42601": "syntax_error", - "42501": "insufficient_privilege", - "42846": "cannot_coerce", - "42803": "grouping_error", - "42P20": "windowing_error", - "42P19": "invalid_recursion", - "42830": "invalid_foreign_key", - "42602": "invalid_name", - "42622": "name_too_long", - "42939": "reserved_name", - "42804": "datatype_mismatch", - "42P18": "indeterminate_datatype", - "42P21": "collation_mismatch", - "42P22": "indeterminate_collation", - "42809": "wrong_object_type", - "42703": "undefined_column", - "42883": "undefined_function", - "42P01": "undefined_table", - "42P02": "undefined_parameter", - "42704": "undefined_object", - "42701": "duplicate_column", - "42P03": "duplicate_cursor", - "42P04": "duplicate_database", - "42723": "duplicate_function", - "42P05": "duplicate_prepared_statement", - "42P06": "duplicate_schema", - "42P07": "duplicate_table", - "42712": "duplicate_alias", - "42710": "duplicate_object", - "42702": "ambiguous_column", - "42725": "ambiguous_function", - "42P08": "ambiguous_parameter", - "42P09": "ambiguous_alias", - "42P10": "invalid_column_reference", - "42611": "invalid_column_definition", - "42P11": "invalid_cursor_definition", - "42P12": "invalid_database_definition", - "42P13": "invalid_function_definition", - "42P14": "invalid_prepared_statement_definition", - "42P15": "invalid_schema_definition", - "42P16": "invalid_table_definition", - "42P17": "invalid_object_definition", - // Class 44 - WITH CHECK OPTION Violation - "44000": "with_check_option_violation", - // Class 53 - Insufficient Resources - "53000": "insufficient_resources", - "53100": "disk_full", - "53200": "out_of_memory", - "53300": "too_many_connections", - "53400": "configuration_limit_exceeded", - // Class 54 - Program Limit Exceeded - "54000": "program_limit_exceeded", - "54001": "statement_too_complex", - "54011": "too_many_columns", - "54023": "too_many_arguments", - // Class 55 - Object Not In Prerequisite State - "55000": "object_not_in_prerequisite_state", - "55006": "object_in_use", - "55P02": "cant_change_runtime_param", - "55P03": "lock_not_available", - // Class 57 - Operator Intervention - "57000": "operator_intervention", - "57014": "query_canceled", - "57P01": "admin_shutdown", - "57P02": "crash_shutdown", - "57P03": "cannot_connect_now", - "57P04": "database_dropped", - // Class 58 - System Error (errors external to PostgreSQL itself) - "58000": "system_error", - "58030": "io_error", - "58P01": "undefined_file", - "58P02": "duplicate_file", - // Class F0 - Configuration File Error - "F0000": "config_file_error", - "F0001": "lock_file_exists", - // Class HV - Foreign Data Wrapper Error (SQL/MED) - "HV000": "fdw_error", - "HV005": "fdw_column_name_not_found", - "HV002": "fdw_dynamic_parameter_value_needed", - "HV010": "fdw_function_sequence_error", - "HV021": "fdw_inconsistent_descriptor_information", - "HV024": "fdw_invalid_attribute_value", - "HV007": "fdw_invalid_column_name", - "HV008": "fdw_invalid_column_number", - "HV004": "fdw_invalid_data_type", - "HV006": "fdw_invalid_data_type_descriptors", - "HV091": "fdw_invalid_descriptor_field_identifier", - "HV00B": "fdw_invalid_handle", - "HV00C": "fdw_invalid_option_index", - "HV00D": "fdw_invalid_option_name", - "HV090": "fdw_invalid_string_length_or_buffer_length", - "HV00A": "fdw_invalid_string_format", - "HV009": "fdw_invalid_use_of_null_pointer", - "HV014": "fdw_too_many_handles", - "HV001": "fdw_out_of_memory", - "HV00P": "fdw_no_schemas", - "HV00J": "fdw_option_name_not_found", - "HV00K": "fdw_reply_handle", - "HV00Q": "fdw_schema_not_found", - "HV00R": "fdw_table_not_found", - "HV00L": "fdw_unable_to_create_execution", - "HV00M": "fdw_unable_to_create_reply", - "HV00N": "fdw_unable_to_establish_connection", - // Class P0 - PL/pgSQL Error - "P0000": "plpgsql_error", - "P0001": "raise_exception", - "P0002": "no_data_found", - "P0003": "too_many_rows", - // Class XX - Internal Error - "XX000": "internal_error", - "XX001": "data_corrupted", - "XX002": "index_corrupted", -} - -func parseError(r *readBuf) *Error { - err := new(Error) - for t := r.byte(); t != 0; t = r.byte() { - msg := r.string() - switch t { - case 'S': - err.Severity = msg - case 'C': - err.Code = ErrorCode(msg) - case 'M': - err.Message = msg - case 'D': - err.Detail = msg - case 'H': - err.Hint = msg - case 'P': - err.Position = msg - case 'p': - err.InternalPosition = msg - case 'q': - err.InternalQuery = msg - case 'W': - err.Where = msg - case 's': - err.Schema = msg - case 't': - err.Table = msg - case 'c': - err.Column = msg - case 'd': - err.DataTypeName = msg - case 'n': - err.Constraint = msg - case 'F': - err.File = msg - case 'L': - err.Line = msg - case 'R': - err.Routine = msg - } - } - return err -} - -// Fatal returns true if the Error Severity is fatal. -func (err *Error) Fatal() bool { - return err.Severity == Efatal -} - -// SQLState returns the SQLState of the error. -func (err *Error) SQLState() string { - return string(err.Code) -} - -// Get implements the legacy PGError interface. New code should use the fields -// of the Error struct directly. -func (err *Error) Get(k byte) (v string) { - switch k { - case 'S': - return err.Severity - case 'C': - return string(err.Code) - case 'M': - return err.Message - case 'D': - return err.Detail - case 'H': - return err.Hint - case 'P': - return err.Position - case 'p': - return err.InternalPosition - case 'q': - return err.InternalQuery - case 'W': - return err.Where - case 's': - return err.Schema - case 't': - return err.Table - case 'c': - return err.Column - case 'd': - return err.DataTypeName - case 'n': - return err.Constraint - case 'F': - return err.File - case 'L': - return err.Line - case 'R': - return err.Routine - } - return "" -} - -func (err *Error) Error() string { - return "pq: " + err.Message -} - -// PGError is an interface used by previous versions of pq. It is provided -// only to support legacy code. New code should use the Error type. -type PGError interface { - Error() string - Fatal() bool - Get(k byte) (v string) -} - -func errorf(s string, args ...interface{}) { - panic(fmt.Errorf("pq: %s", fmt.Sprintf(s, args...))) -} - -// TODO(ainar-g) Rename to errorf after removing panics. -func fmterrorf(s string, args ...interface{}) error { - return fmt.Errorf("pq: %s", fmt.Sprintf(s, args...)) -} - -func errRecoverNoErrBadConn(err *error) { - e := recover() - if e == nil { - // Do nothing - return - } - var ok bool - *err, ok = e.(error) - if !ok { - *err = fmt.Errorf("pq: unexpected error: %#v", e) - } -} - -func (cn *conn) errRecover(err *error) { - e := recover() - switch v := e.(type) { - case nil: - // Do nothing - case runtime.Error: - cn.err.set(driver.ErrBadConn) - panic(v) - case *Error: - if v.Fatal() { - *err = driver.ErrBadConn - } else { - *err = v - } - case *net.OpError: - cn.err.set(driver.ErrBadConn) - *err = v - case *safeRetryError: - cn.err.set(driver.ErrBadConn) - *err = driver.ErrBadConn - case error: - if v == io.EOF || v.Error() == "remote error: handshake failure" { - *err = driver.ErrBadConn - } else { - *err = v - } - - default: - cn.err.set(driver.ErrBadConn) - panic(fmt.Sprintf("unknown error: %#v", e)) - } - - // Any time we return ErrBadConn, we need to remember it since *Tx doesn't - // mark the connection bad in database/sql. - if *err == driver.ErrBadConn { - cn.err.set(driver.ErrBadConn) - } -} diff --git a/vendor/github.com/lib/pq/krb.go b/vendor/github.com/lib/pq/krb.go deleted file mode 100644 index 408ec01f..00000000 --- a/vendor/github.com/lib/pq/krb.go +++ /dev/null @@ -1,27 +0,0 @@ -package pq - -// NewGSSFunc creates a GSS authentication provider, for use with -// RegisterGSSProvider. -type NewGSSFunc func() (GSS, error) - -var newGss NewGSSFunc - -// RegisterGSSProvider registers a GSS authentication provider. For example, if -// you need to use Kerberos to authenticate with your server, add this to your -// main package: -// -// import "github.com/lib/pq/auth/kerberos" -// -// func init() { -// pq.RegisterGSSProvider(func() (pq.GSS, error) { return kerberos.NewGSS() }) -// } -func RegisterGSSProvider(newGssArg NewGSSFunc) { - newGss = newGssArg -} - -// GSS provides GSSAPI authentication (e.g., Kerberos). -type GSS interface { - GetInitToken(host string, service string) ([]byte, error) - GetInitTokenFromSpn(spn string) ([]byte, error) - Continue(inToken []byte) (done bool, outToken []byte, err error) -} diff --git a/vendor/github.com/lib/pq/notice.go b/vendor/github.com/lib/pq/notice.go deleted file mode 100644 index 70ad122a..00000000 --- a/vendor/github.com/lib/pq/notice.go +++ /dev/null @@ -1,72 +0,0 @@ -//go:build go1.10 -// +build go1.10 - -package pq - -import ( - "context" - "database/sql/driver" -) - -// NoticeHandler returns the notice handler on the given connection, if any. A -// runtime panic occurs if c is not a pq connection. This is rarely used -// directly, use ConnectorNoticeHandler and ConnectorWithNoticeHandler instead. -func NoticeHandler(c driver.Conn) func(*Error) { - return c.(*conn).noticeHandler -} - -// SetNoticeHandler sets the given notice handler on the given connection. A -// runtime panic occurs if c is not a pq connection. A nil handler may be used -// to unset it. This is rarely used directly, use ConnectorNoticeHandler and -// ConnectorWithNoticeHandler instead. -// -// Note: Notice handlers are executed synchronously by pq meaning commands -// won't continue to be processed until the handler returns. -func SetNoticeHandler(c driver.Conn, handler func(*Error)) { - c.(*conn).noticeHandler = handler -} - -// NoticeHandlerConnector wraps a regular connector and sets a notice handler -// on it. -type NoticeHandlerConnector struct { - driver.Connector - noticeHandler func(*Error) -} - -// Connect calls the underlying connector's connect method and then sets the -// notice handler. -func (n *NoticeHandlerConnector) Connect(ctx context.Context) (driver.Conn, error) { - c, err := n.Connector.Connect(ctx) - if err == nil { - SetNoticeHandler(c, n.noticeHandler) - } - return c, err -} - -// ConnectorNoticeHandler returns the currently set notice handler, if any. If -// the given connector is not a result of ConnectorWithNoticeHandler, nil is -// returned. -func ConnectorNoticeHandler(c driver.Connector) func(*Error) { - if c, ok := c.(*NoticeHandlerConnector); ok { - return c.noticeHandler - } - return nil -} - -// ConnectorWithNoticeHandler creates or sets the given handler for the given -// connector. If the given connector is a result of calling this function -// previously, it is simply set on the given connector and returned. Otherwise, -// this returns a new connector wrapping the given one and setting the notice -// handler. A nil notice handler may be used to unset it. -// -// The returned connector is intended to be used with database/sql.OpenDB. -// -// Note: Notice handlers are executed synchronously by pq meaning commands -// won't continue to be processed until the handler returns. -func ConnectorWithNoticeHandler(c driver.Connector, handler func(*Error)) *NoticeHandlerConnector { - if c, ok := c.(*NoticeHandlerConnector); ok { - c.noticeHandler = handler - return c - } - return &NoticeHandlerConnector{Connector: c, noticeHandler: handler} -} diff --git a/vendor/github.com/lib/pq/notify.go b/vendor/github.com/lib/pq/notify.go deleted file mode 100644 index 5c421fdb..00000000 --- a/vendor/github.com/lib/pq/notify.go +++ /dev/null @@ -1,858 +0,0 @@ -package pq - -// Package pq is a pure Go Postgres driver for the database/sql package. -// This module contains support for Postgres LISTEN/NOTIFY. - -import ( - "context" - "database/sql/driver" - "errors" - "fmt" - "sync" - "sync/atomic" - "time" -) - -// Notification represents a single notification from the database. -type Notification struct { - // Process ID (PID) of the notifying postgres backend. - BePid int - // Name of the channel the notification was sent on. - Channel string - // Payload, or the empty string if unspecified. - Extra string -} - -func recvNotification(r *readBuf) *Notification { - bePid := r.int32() - channel := r.string() - extra := r.string() - - return &Notification{bePid, channel, extra} -} - -// SetNotificationHandler sets the given notification handler on the given -// connection. A runtime panic occurs if c is not a pq connection. A nil handler -// may be used to unset it. -// -// Note: Notification handlers are executed synchronously by pq meaning commands -// won't continue to be processed until the handler returns. -func SetNotificationHandler(c driver.Conn, handler func(*Notification)) { - c.(*conn).notificationHandler = handler -} - -// NotificationHandlerConnector wraps a regular connector and sets a notification handler -// on it. -type NotificationHandlerConnector struct { - driver.Connector - notificationHandler func(*Notification) -} - -// Connect calls the underlying connector's connect method and then sets the -// notification handler. -func (n *NotificationHandlerConnector) Connect(ctx context.Context) (driver.Conn, error) { - c, err := n.Connector.Connect(ctx) - if err == nil { - SetNotificationHandler(c, n.notificationHandler) - } - return c, err -} - -// ConnectorNotificationHandler returns the currently set notification handler, if any. If -// the given connector is not a result of ConnectorWithNotificationHandler, nil is -// returned. -func ConnectorNotificationHandler(c driver.Connector) func(*Notification) { - if c, ok := c.(*NotificationHandlerConnector); ok { - return c.notificationHandler - } - return nil -} - -// ConnectorWithNotificationHandler creates or sets the given handler for the given -// connector. If the given connector is a result of calling this function -// previously, it is simply set on the given connector and returned. Otherwise, -// this returns a new connector wrapping the given one and setting the notification -// handler. A nil notification handler may be used to unset it. -// -// The returned connector is intended to be used with database/sql.OpenDB. -// -// Note: Notification handlers are executed synchronously by pq meaning commands -// won't continue to be processed until the handler returns. -func ConnectorWithNotificationHandler(c driver.Connector, handler func(*Notification)) *NotificationHandlerConnector { - if c, ok := c.(*NotificationHandlerConnector); ok { - c.notificationHandler = handler - return c - } - return &NotificationHandlerConnector{Connector: c, notificationHandler: handler} -} - -const ( - connStateIdle int32 = iota - connStateExpectResponse - connStateExpectReadyForQuery -) - -type message struct { - typ byte - err error -} - -var errListenerConnClosed = errors.New("pq: ListenerConn has been closed") - -// ListenerConn is a low-level interface for waiting for notifications. You -// should use Listener instead. -type ListenerConn struct { - // guards cn and err - connectionLock sync.Mutex - cn *conn - err error - - connState int32 - - // the sending goroutine will be holding this lock - senderLock sync.Mutex - - notificationChan chan<- *Notification - - replyChan chan message -} - -// NewListenerConn creates a new ListenerConn. Use NewListener instead. -func NewListenerConn(name string, notificationChan chan<- *Notification) (*ListenerConn, error) { - return newDialListenerConn(defaultDialer{}, name, notificationChan) -} - -func newDialListenerConn(d Dialer, name string, c chan<- *Notification) (*ListenerConn, error) { - cn, err := DialOpen(d, name) - if err != nil { - return nil, err - } - - l := &ListenerConn{ - cn: cn.(*conn), - notificationChan: c, - connState: connStateIdle, - replyChan: make(chan message, 2), - } - - go l.listenerConnMain() - - return l, nil -} - -// We can only allow one goroutine at a time to be running a query on the -// connection for various reasons, so the goroutine sending on the connection -// must be holding senderLock. -// -// Returns an error if an unrecoverable error has occurred and the ListenerConn -// should be abandoned. -func (l *ListenerConn) acquireSenderLock() error { - // we must acquire senderLock first to avoid deadlocks; see ExecSimpleQuery - l.senderLock.Lock() - - l.connectionLock.Lock() - err := l.err - l.connectionLock.Unlock() - if err != nil { - l.senderLock.Unlock() - return err - } - return nil -} - -func (l *ListenerConn) releaseSenderLock() { - l.senderLock.Unlock() -} - -// setState advances the protocol state to newState. Returns false if moving -// to that state from the current state is not allowed. -func (l *ListenerConn) setState(newState int32) bool { - var expectedState int32 - - switch newState { - case connStateIdle: - expectedState = connStateExpectReadyForQuery - case connStateExpectResponse: - expectedState = connStateIdle - case connStateExpectReadyForQuery: - expectedState = connStateExpectResponse - default: - panic(fmt.Sprintf("unexpected listenerConnState %d", newState)) - } - - return atomic.CompareAndSwapInt32(&l.connState, expectedState, newState) -} - -// Main logic is here: receive messages from the postgres backend, forward -// notifications and query replies and keep the internal state in sync with the -// protocol state. Returns when the connection has been lost, is about to go -// away or should be discarded because we couldn't agree on the state with the -// server backend. -func (l *ListenerConn) listenerConnLoop() (err error) { - defer errRecoverNoErrBadConn(&err) - - r := &readBuf{} - for { - t, err := l.cn.recvMessage(r) - if err != nil { - return err - } - - switch t { - case 'A': - // recvNotification copies all the data so we don't need to worry - // about the scratch buffer being overwritten. - l.notificationChan <- recvNotification(r) - - case 'T', 'D': - // only used by tests; ignore - - case 'E': - // We might receive an ErrorResponse even when not in a query; it - // is expected that the server will close the connection after - // that, but we should make sure that the error we display is the - // one from the stray ErrorResponse, not io.ErrUnexpectedEOF. - if !l.setState(connStateExpectReadyForQuery) { - return parseError(r) - } - l.replyChan <- message{t, parseError(r)} - - case 'C', 'I': - if !l.setState(connStateExpectReadyForQuery) { - // protocol out of sync - return fmt.Errorf("unexpected CommandComplete") - } - // ExecSimpleQuery doesn't need to know about this message - - case 'Z': - if !l.setState(connStateIdle) { - // protocol out of sync - return fmt.Errorf("unexpected ReadyForQuery") - } - l.replyChan <- message{t, nil} - - case 'S': - // ignore - case 'N': - if n := l.cn.noticeHandler; n != nil { - n(parseError(r)) - } - default: - return fmt.Errorf("unexpected message %q from server in listenerConnLoop", t) - } - } -} - -// This is the main routine for the goroutine receiving on the database -// connection. Most of the main logic is in listenerConnLoop. -func (l *ListenerConn) listenerConnMain() { - err := l.listenerConnLoop() - - // listenerConnLoop terminated; we're done, but we still have to clean up. - // Make sure nobody tries to start any new queries by making sure the err - // pointer is set. It is important that we do not overwrite its value; a - // connection could be closed by either this goroutine or one sending on - // the connection -- whoever closes the connection is assumed to have the - // more meaningful error message (as the other one will probably get - // net.errClosed), so that goroutine sets the error we expose while the - // other error is discarded. If the connection is lost while two - // goroutines are operating on the socket, it probably doesn't matter which - // error we expose so we don't try to do anything more complex. - l.connectionLock.Lock() - if l.err == nil { - l.err = err - } - l.cn.Close() - l.connectionLock.Unlock() - - // There might be a query in-flight; make sure nobody's waiting for a - // response to it, since there's not going to be one. - close(l.replyChan) - - // let the listener know we're done - close(l.notificationChan) - - // this ListenerConn is done -} - -// Listen sends a LISTEN query to the server. See ExecSimpleQuery. -func (l *ListenerConn) Listen(channel string) (bool, error) { - return l.ExecSimpleQuery("LISTEN " + QuoteIdentifier(channel)) -} - -// Unlisten sends an UNLISTEN query to the server. See ExecSimpleQuery. -func (l *ListenerConn) Unlisten(channel string) (bool, error) { - return l.ExecSimpleQuery("UNLISTEN " + QuoteIdentifier(channel)) -} - -// UnlistenAll sends an `UNLISTEN *` query to the server. See ExecSimpleQuery. -func (l *ListenerConn) UnlistenAll() (bool, error) { - return l.ExecSimpleQuery("UNLISTEN *") -} - -// Ping the remote server to make sure it's alive. Non-nil error means the -// connection has failed and should be abandoned. -func (l *ListenerConn) Ping() error { - sent, err := l.ExecSimpleQuery("") - if !sent { - return err - } - if err != nil { - // shouldn't happen - panic(err) - } - return nil -} - -// Attempt to send a query on the connection. Returns an error if sending the -// query failed, and the caller should initiate closure of this connection. -// The caller must be holding senderLock (see acquireSenderLock and -// releaseSenderLock). -func (l *ListenerConn) sendSimpleQuery(q string) (err error) { - defer errRecoverNoErrBadConn(&err) - - // must set connection state before sending the query - if !l.setState(connStateExpectResponse) { - panic("two queries running at the same time") - } - - // Can't use l.cn.writeBuf here because it uses the scratch buffer which - // might get overwritten by listenerConnLoop. - b := &writeBuf{ - buf: []byte("Q\x00\x00\x00\x00"), - pos: 1, - } - b.string(q) - l.cn.send(b) - - return nil -} - -// ExecSimpleQuery executes a "simple query" (i.e. one with no bindable -// parameters) on the connection. The possible return values are: -// 1) "executed" is true; the query was executed to completion on the -// database server. If the query failed, err will be set to the error -// returned by the database, otherwise err will be nil. -// 2) If "executed" is false, the query could not be executed on the remote -// server. err will be non-nil. -// -// After a call to ExecSimpleQuery has returned an executed=false value, the -// connection has either been closed or will be closed shortly thereafter, and -// all subsequently executed queries will return an error. -func (l *ListenerConn) ExecSimpleQuery(q string) (executed bool, err error) { - if err = l.acquireSenderLock(); err != nil { - return false, err - } - defer l.releaseSenderLock() - - err = l.sendSimpleQuery(q) - if err != nil { - // We can't know what state the protocol is in, so we need to abandon - // this connection. - l.connectionLock.Lock() - // Set the error pointer if it hasn't been set already; see - // listenerConnMain. - if l.err == nil { - l.err = err - } - l.connectionLock.Unlock() - l.cn.c.Close() - return false, err - } - - // now we just wait for a reply.. - for { - m, ok := <-l.replyChan - if !ok { - // We lost the connection to server, don't bother waiting for a - // a response. err should have been set already. - l.connectionLock.Lock() - err := l.err - l.connectionLock.Unlock() - return false, err - } - switch m.typ { - case 'Z': - // sanity check - if m.err != nil { - panic("m.err != nil") - } - // done; err might or might not be set - return true, err - - case 'E': - // sanity check - if m.err == nil { - panic("m.err == nil") - } - // server responded with an error; ReadyForQuery to follow - err = m.err - - default: - return false, fmt.Errorf("unknown response for simple query: %q", m.typ) - } - } -} - -// Close closes the connection. -func (l *ListenerConn) Close() error { - l.connectionLock.Lock() - if l.err != nil { - l.connectionLock.Unlock() - return errListenerConnClosed - } - l.err = errListenerConnClosed - l.connectionLock.Unlock() - // We can't send anything on the connection without holding senderLock. - // Simply close the net.Conn to wake up everyone operating on it. - return l.cn.c.Close() -} - -// Err returns the reason the connection was closed. It is not safe to call -// this function until l.Notify has been closed. -func (l *ListenerConn) Err() error { - return l.err -} - -var errListenerClosed = errors.New("pq: Listener has been closed") - -// ErrChannelAlreadyOpen is returned from Listen when a channel is already -// open. -var ErrChannelAlreadyOpen = errors.New("pq: channel is already open") - -// ErrChannelNotOpen is returned from Unlisten when a channel is not open. -var ErrChannelNotOpen = errors.New("pq: channel is not open") - -// ListenerEventType is an enumeration of listener event types. -type ListenerEventType int - -const ( - // ListenerEventConnected is emitted only when the database connection - // has been initially initialized. The err argument of the callback - // will always be nil. - ListenerEventConnected ListenerEventType = iota - - // ListenerEventDisconnected is emitted after a database connection has - // been lost, either because of an error or because Close has been - // called. The err argument will be set to the reason the database - // connection was lost. - ListenerEventDisconnected - - // ListenerEventReconnected is emitted after a database connection has - // been re-established after connection loss. The err argument of the - // callback will always be nil. After this event has been emitted, a - // nil pq.Notification is sent on the Listener.Notify channel. - ListenerEventReconnected - - // ListenerEventConnectionAttemptFailed is emitted after a connection - // to the database was attempted, but failed. The err argument will be - // set to an error describing why the connection attempt did not - // succeed. - ListenerEventConnectionAttemptFailed -) - -// EventCallbackType is the event callback type. See also ListenerEventType -// constants' documentation. -type EventCallbackType func(event ListenerEventType, err error) - -// Listener provides an interface for listening to notifications from a -// PostgreSQL database. For general usage information, see section -// "Notifications". -// -// Listener can safely be used from concurrently running goroutines. -type Listener struct { - // Channel for receiving notifications from the database. In some cases a - // nil value will be sent. See section "Notifications" above. - Notify chan *Notification - - name string - minReconnectInterval time.Duration - maxReconnectInterval time.Duration - dialer Dialer - eventCallback EventCallbackType - - lock sync.Mutex - isClosed bool - reconnectCond *sync.Cond - cn *ListenerConn - connNotificationChan <-chan *Notification - channels map[string]struct{} -} - -// NewListener creates a new database connection dedicated to LISTEN / NOTIFY. -// -// name should be set to a connection string to be used to establish the -// database connection (see section "Connection String Parameters" above). -// -// minReconnectInterval controls the duration to wait before trying to -// re-establish the database connection after connection loss. After each -// consecutive failure this interval is doubled, until maxReconnectInterval is -// reached. Successfully completing the connection establishment procedure -// resets the interval back to minReconnectInterval. -// -// The last parameter eventCallback can be set to a function which will be -// called by the Listener when the state of the underlying database connection -// changes. This callback will be called by the goroutine which dispatches the -// notifications over the Notify channel, so you should try to avoid doing -// potentially time-consuming operations from the callback. -func NewListener(name string, - minReconnectInterval time.Duration, - maxReconnectInterval time.Duration, - eventCallback EventCallbackType) *Listener { - return NewDialListener(defaultDialer{}, name, minReconnectInterval, maxReconnectInterval, eventCallback) -} - -// NewDialListener is like NewListener but it takes a Dialer. -func NewDialListener(d Dialer, - name string, - minReconnectInterval time.Duration, - maxReconnectInterval time.Duration, - eventCallback EventCallbackType) *Listener { - - l := &Listener{ - name: name, - minReconnectInterval: minReconnectInterval, - maxReconnectInterval: maxReconnectInterval, - dialer: d, - eventCallback: eventCallback, - - channels: make(map[string]struct{}), - - Notify: make(chan *Notification, 32), - } - l.reconnectCond = sync.NewCond(&l.lock) - - go l.listenerMain() - - return l -} - -// NotificationChannel returns the notification channel for this listener. -// This is the same channel as Notify, and will not be recreated during the -// life time of the Listener. -func (l *Listener) NotificationChannel() <-chan *Notification { - return l.Notify -} - -// Listen starts listening for notifications on a channel. Calls to this -// function will block until an acknowledgement has been received from the -// server. Note that Listener automatically re-establishes the connection -// after connection loss, so this function may block indefinitely if the -// connection can not be re-established. -// -// Listen will only fail in three conditions: -// 1) The channel is already open. The returned error will be -// ErrChannelAlreadyOpen. -// 2) The query was executed on the remote server, but PostgreSQL returned an -// error message in response to the query. The returned error will be a -// pq.Error containing the information the server supplied. -// 3) Close is called on the Listener before the request could be completed. -// -// The channel name is case-sensitive. -func (l *Listener) Listen(channel string) error { - l.lock.Lock() - defer l.lock.Unlock() - - if l.isClosed { - return errListenerClosed - } - - // The server allows you to issue a LISTEN on a channel which is already - // open, but it seems useful to be able to detect this case to spot for - // mistakes in application logic. If the application genuinely does't - // care, it can check the exported error and ignore it. - _, exists := l.channels[channel] - if exists { - return ErrChannelAlreadyOpen - } - - if l.cn != nil { - // If gotResponse is true but error is set, the query was executed on - // the remote server, but resulted in an error. This should be - // relatively rare, so it's fine if we just pass the error to our - // caller. However, if gotResponse is false, we could not complete the - // query on the remote server and our underlying connection is about - // to go away, so we only add relname to l.channels, and wait for - // resync() to take care of the rest. - gotResponse, err := l.cn.Listen(channel) - if gotResponse && err != nil { - return err - } - } - - l.channels[channel] = struct{}{} - for l.cn == nil { - l.reconnectCond.Wait() - // we let go of the mutex for a while - if l.isClosed { - return errListenerClosed - } - } - - return nil -} - -// Unlisten removes a channel from the Listener's channel list. Returns -// ErrChannelNotOpen if the Listener is not listening on the specified channel. -// Returns immediately with no error if there is no connection. Note that you -// might still get notifications for this channel even after Unlisten has -// returned. -// -// The channel name is case-sensitive. -func (l *Listener) Unlisten(channel string) error { - l.lock.Lock() - defer l.lock.Unlock() - - if l.isClosed { - return errListenerClosed - } - - // Similarly to LISTEN, this is not an error in Postgres, but it seems - // useful to distinguish from the normal conditions. - _, exists := l.channels[channel] - if !exists { - return ErrChannelNotOpen - } - - if l.cn != nil { - // Similarly to Listen (see comment in that function), the caller - // should only be bothered with an error if it came from the backend as - // a response to our query. - gotResponse, err := l.cn.Unlisten(channel) - if gotResponse && err != nil { - return err - } - } - - // Don't bother waiting for resync if there's no connection. - delete(l.channels, channel) - return nil -} - -// UnlistenAll removes all channels from the Listener's channel list. Returns -// immediately with no error if there is no connection. Note that you might -// still get notifications for any of the deleted channels even after -// UnlistenAll has returned. -func (l *Listener) UnlistenAll() error { - l.lock.Lock() - defer l.lock.Unlock() - - if l.isClosed { - return errListenerClosed - } - - if l.cn != nil { - // Similarly to Listen (see comment in that function), the caller - // should only be bothered with an error if it came from the backend as - // a response to our query. - gotResponse, err := l.cn.UnlistenAll() - if gotResponse && err != nil { - return err - } - } - - // Don't bother waiting for resync if there's no connection. - l.channels = make(map[string]struct{}) - return nil -} - -// Ping the remote server to make sure it's alive. Non-nil return value means -// that there is no active connection. -func (l *Listener) Ping() error { - l.lock.Lock() - defer l.lock.Unlock() - - if l.isClosed { - return errListenerClosed - } - if l.cn == nil { - return errors.New("no connection") - } - - return l.cn.Ping() -} - -// Clean up after losing the server connection. Returns l.cn.Err(), which -// should have the reason the connection was lost. -func (l *Listener) disconnectCleanup() error { - l.lock.Lock() - defer l.lock.Unlock() - - // sanity check; can't look at Err() until the channel has been closed - select { - case _, ok := <-l.connNotificationChan: - if ok { - panic("connNotificationChan not closed") - } - default: - panic("connNotificationChan not closed") - } - - err := l.cn.Err() - l.cn.Close() - l.cn = nil - return err -} - -// Synchronize the list of channels we want to be listening on with the server -// after the connection has been established. -func (l *Listener) resync(cn *ListenerConn, notificationChan <-chan *Notification) error { - doneChan := make(chan error) - go func(notificationChan <-chan *Notification) { - for channel := range l.channels { - // If we got a response, return that error to our caller as it's - // going to be more descriptive than cn.Err(). - gotResponse, err := cn.Listen(channel) - if gotResponse && err != nil { - doneChan <- err - return - } - - // If we couldn't reach the server, wait for notificationChan to - // close and then return the error message from the connection, as - // per ListenerConn's interface. - if err != nil { - for range notificationChan { - } - doneChan <- cn.Err() - return - } - } - doneChan <- nil - }(notificationChan) - - // Ignore notifications while synchronization is going on to avoid - // deadlocks. We have to send a nil notification over Notify anyway as - // we can't possibly know which notifications (if any) were lost while - // the connection was down, so there's no reason to try and process - // these messages at all. - for { - select { - case _, ok := <-notificationChan: - if !ok { - notificationChan = nil - } - - case err := <-doneChan: - return err - } - } -} - -// caller should NOT be holding l.lock -func (l *Listener) closed() bool { - l.lock.Lock() - defer l.lock.Unlock() - - return l.isClosed -} - -func (l *Listener) connect() error { - notificationChan := make(chan *Notification, 32) - cn, err := newDialListenerConn(l.dialer, l.name, notificationChan) - if err != nil { - return err - } - - l.lock.Lock() - defer l.lock.Unlock() - - err = l.resync(cn, notificationChan) - if err != nil { - cn.Close() - return err - } - - l.cn = cn - l.connNotificationChan = notificationChan - l.reconnectCond.Broadcast() - - return nil -} - -// Close disconnects the Listener from the database and shuts it down. -// Subsequent calls to its methods will return an error. Close returns an -// error if the connection has already been closed. -func (l *Listener) Close() error { - l.lock.Lock() - defer l.lock.Unlock() - - if l.isClosed { - return errListenerClosed - } - - if l.cn != nil { - l.cn.Close() - } - l.isClosed = true - - // Unblock calls to Listen() - l.reconnectCond.Broadcast() - - return nil -} - -func (l *Listener) emitEvent(event ListenerEventType, err error) { - if l.eventCallback != nil { - l.eventCallback(event, err) - } -} - -// Main logic here: maintain a connection to the server when possible, wait -// for notifications and emit events. -func (l *Listener) listenerConnLoop() { - var nextReconnect time.Time - - reconnectInterval := l.minReconnectInterval - for { - for { - err := l.connect() - if err == nil { - break - } - - if l.closed() { - return - } - l.emitEvent(ListenerEventConnectionAttemptFailed, err) - - time.Sleep(reconnectInterval) - reconnectInterval *= 2 - if reconnectInterval > l.maxReconnectInterval { - reconnectInterval = l.maxReconnectInterval - } - } - - if nextReconnect.IsZero() { - l.emitEvent(ListenerEventConnected, nil) - } else { - l.emitEvent(ListenerEventReconnected, nil) - l.Notify <- nil - } - - reconnectInterval = l.minReconnectInterval - nextReconnect = time.Now().Add(reconnectInterval) - - for { - notification, ok := <-l.connNotificationChan - if !ok { - // lost connection, loop again - break - } - l.Notify <- notification - } - - err := l.disconnectCleanup() - if l.closed() { - return - } - l.emitEvent(ListenerEventDisconnected, err) - - time.Sleep(time.Until(nextReconnect)) - } -} - -func (l *Listener) listenerMain() { - l.listenerConnLoop() - close(l.Notify) -} diff --git a/vendor/github.com/lib/pq/oid/doc.go b/vendor/github.com/lib/pq/oid/doc.go deleted file mode 100644 index caaede24..00000000 --- a/vendor/github.com/lib/pq/oid/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Package oid contains OID constants -// as defined by the Postgres server. -package oid - -// Oid is a Postgres Object ID. -type Oid uint32 diff --git a/vendor/github.com/lib/pq/oid/types.go b/vendor/github.com/lib/pq/oid/types.go deleted file mode 100644 index ecc84c2c..00000000 --- a/vendor/github.com/lib/pq/oid/types.go +++ /dev/null @@ -1,343 +0,0 @@ -// Code generated by gen.go. DO NOT EDIT. - -package oid - -const ( - T_bool Oid = 16 - T_bytea Oid = 17 - T_char Oid = 18 - T_name Oid = 19 - T_int8 Oid = 20 - T_int2 Oid = 21 - T_int2vector Oid = 22 - T_int4 Oid = 23 - T_regproc Oid = 24 - T_text Oid = 25 - T_oid Oid = 26 - T_tid Oid = 27 - T_xid Oid = 28 - T_cid Oid = 29 - T_oidvector Oid = 30 - T_pg_ddl_command Oid = 32 - T_pg_type Oid = 71 - T_pg_attribute Oid = 75 - T_pg_proc Oid = 81 - T_pg_class Oid = 83 - T_json Oid = 114 - T_xml Oid = 142 - T__xml Oid = 143 - T_pg_node_tree Oid = 194 - T__json Oid = 199 - T_smgr Oid = 210 - T_index_am_handler Oid = 325 - T_point Oid = 600 - T_lseg Oid = 601 - T_path Oid = 602 - T_box Oid = 603 - T_polygon Oid = 604 - T_line Oid = 628 - T__line Oid = 629 - T_cidr Oid = 650 - T__cidr Oid = 651 - T_float4 Oid = 700 - T_float8 Oid = 701 - T_abstime Oid = 702 - T_reltime Oid = 703 - T_tinterval Oid = 704 - T_unknown Oid = 705 - T_circle Oid = 718 - T__circle Oid = 719 - T_money Oid = 790 - T__money Oid = 791 - T_macaddr Oid = 829 - T_inet Oid = 869 - T__bool Oid = 1000 - T__bytea Oid = 1001 - T__char Oid = 1002 - T__name Oid = 1003 - T__int2 Oid = 1005 - T__int2vector Oid = 1006 - T__int4 Oid = 1007 - T__regproc Oid = 1008 - T__text Oid = 1009 - T__tid Oid = 1010 - T__xid Oid = 1011 - T__cid Oid = 1012 - T__oidvector Oid = 1013 - T__bpchar Oid = 1014 - T__varchar Oid = 1015 - T__int8 Oid = 1016 - T__point Oid = 1017 - T__lseg Oid = 1018 - T__path Oid = 1019 - T__box Oid = 1020 - T__float4 Oid = 1021 - T__float8 Oid = 1022 - T__abstime Oid = 1023 - T__reltime Oid = 1024 - T__tinterval Oid = 1025 - T__polygon Oid = 1027 - T__oid Oid = 1028 - T_aclitem Oid = 1033 - T__aclitem Oid = 1034 - T__macaddr Oid = 1040 - T__inet Oid = 1041 - T_bpchar Oid = 1042 - T_varchar Oid = 1043 - T_date Oid = 1082 - T_time Oid = 1083 - T_timestamp Oid = 1114 - T__timestamp Oid = 1115 - T__date Oid = 1182 - T__time Oid = 1183 - T_timestamptz Oid = 1184 - T__timestamptz Oid = 1185 - T_interval Oid = 1186 - T__interval Oid = 1187 - T__numeric Oid = 1231 - T_pg_database Oid = 1248 - T__cstring Oid = 1263 - T_timetz Oid = 1266 - T__timetz Oid = 1270 - T_bit Oid = 1560 - T__bit Oid = 1561 - T_varbit Oid = 1562 - T__varbit Oid = 1563 - T_numeric Oid = 1700 - T_refcursor Oid = 1790 - T__refcursor Oid = 2201 - T_regprocedure Oid = 2202 - T_regoper Oid = 2203 - T_regoperator Oid = 2204 - T_regclass Oid = 2205 - T_regtype Oid = 2206 - T__regprocedure Oid = 2207 - T__regoper Oid = 2208 - T__regoperator Oid = 2209 - T__regclass Oid = 2210 - T__regtype Oid = 2211 - T_record Oid = 2249 - T_cstring Oid = 2275 - T_any Oid = 2276 - T_anyarray Oid = 2277 - T_void Oid = 2278 - T_trigger Oid = 2279 - T_language_handler Oid = 2280 - T_internal Oid = 2281 - T_opaque Oid = 2282 - T_anyelement Oid = 2283 - T__record Oid = 2287 - T_anynonarray Oid = 2776 - T_pg_authid Oid = 2842 - T_pg_auth_members Oid = 2843 - T__txid_snapshot Oid = 2949 - T_uuid Oid = 2950 - T__uuid Oid = 2951 - T_txid_snapshot Oid = 2970 - T_fdw_handler Oid = 3115 - T_pg_lsn Oid = 3220 - T__pg_lsn Oid = 3221 - T_tsm_handler Oid = 3310 - T_anyenum Oid = 3500 - T_tsvector Oid = 3614 - T_tsquery Oid = 3615 - T_gtsvector Oid = 3642 - T__tsvector Oid = 3643 - T__gtsvector Oid = 3644 - T__tsquery Oid = 3645 - T_regconfig Oid = 3734 - T__regconfig Oid = 3735 - T_regdictionary Oid = 3769 - T__regdictionary Oid = 3770 - T_jsonb Oid = 3802 - T__jsonb Oid = 3807 - T_anyrange Oid = 3831 - T_event_trigger Oid = 3838 - T_int4range Oid = 3904 - T__int4range Oid = 3905 - T_numrange Oid = 3906 - T__numrange Oid = 3907 - T_tsrange Oid = 3908 - T__tsrange Oid = 3909 - T_tstzrange Oid = 3910 - T__tstzrange Oid = 3911 - T_daterange Oid = 3912 - T__daterange Oid = 3913 - T_int8range Oid = 3926 - T__int8range Oid = 3927 - T_pg_shseclabel Oid = 4066 - T_regnamespace Oid = 4089 - T__regnamespace Oid = 4090 - T_regrole Oid = 4096 - T__regrole Oid = 4097 -) - -var TypeName = map[Oid]string{ - T_bool: "BOOL", - T_bytea: "BYTEA", - T_char: "CHAR", - T_name: "NAME", - T_int8: "INT8", - T_int2: "INT2", - T_int2vector: "INT2VECTOR", - T_int4: "INT4", - T_regproc: "REGPROC", - T_text: "TEXT", - T_oid: "OID", - T_tid: "TID", - T_xid: "XID", - T_cid: "CID", - T_oidvector: "OIDVECTOR", - T_pg_ddl_command: "PG_DDL_COMMAND", - T_pg_type: "PG_TYPE", - T_pg_attribute: "PG_ATTRIBUTE", - T_pg_proc: "PG_PROC", - T_pg_class: "PG_CLASS", - T_json: "JSON", - T_xml: "XML", - T__xml: "_XML", - T_pg_node_tree: "PG_NODE_TREE", - T__json: "_JSON", - T_smgr: "SMGR", - T_index_am_handler: "INDEX_AM_HANDLER", - T_point: "POINT", - T_lseg: "LSEG", - T_path: "PATH", - T_box: "BOX", - T_polygon: "POLYGON", - T_line: "LINE", - T__line: "_LINE", - T_cidr: "CIDR", - T__cidr: "_CIDR", - T_float4: "FLOAT4", - T_float8: "FLOAT8", - T_abstime: "ABSTIME", - T_reltime: "RELTIME", - T_tinterval: "TINTERVAL", - T_unknown: "UNKNOWN", - T_circle: "CIRCLE", - T__circle: "_CIRCLE", - T_money: "MONEY", - T__money: "_MONEY", - T_macaddr: "MACADDR", - T_inet: "INET", - T__bool: "_BOOL", - T__bytea: "_BYTEA", - T__char: "_CHAR", - T__name: "_NAME", - T__int2: "_INT2", - T__int2vector: "_INT2VECTOR", - T__int4: "_INT4", - T__regproc: "_REGPROC", - T__text: "_TEXT", - T__tid: "_TID", - T__xid: "_XID", - T__cid: "_CID", - T__oidvector: "_OIDVECTOR", - T__bpchar: "_BPCHAR", - T__varchar: "_VARCHAR", - T__int8: "_INT8", - T__point: "_POINT", - T__lseg: "_LSEG", - T__path: "_PATH", - T__box: "_BOX", - T__float4: "_FLOAT4", - T__float8: "_FLOAT8", - T__abstime: "_ABSTIME", - T__reltime: "_RELTIME", - T__tinterval: "_TINTERVAL", - T__polygon: "_POLYGON", - T__oid: "_OID", - T_aclitem: "ACLITEM", - T__aclitem: "_ACLITEM", - T__macaddr: "_MACADDR", - T__inet: "_INET", - T_bpchar: "BPCHAR", - T_varchar: "VARCHAR", - T_date: "DATE", - T_time: "TIME", - T_timestamp: "TIMESTAMP", - T__timestamp: "_TIMESTAMP", - T__date: "_DATE", - T__time: "_TIME", - T_timestamptz: "TIMESTAMPTZ", - T__timestamptz: "_TIMESTAMPTZ", - T_interval: "INTERVAL", - T__interval: "_INTERVAL", - T__numeric: "_NUMERIC", - T_pg_database: "PG_DATABASE", - T__cstring: "_CSTRING", - T_timetz: "TIMETZ", - T__timetz: "_TIMETZ", - T_bit: "BIT", - T__bit: "_BIT", - T_varbit: "VARBIT", - T__varbit: "_VARBIT", - T_numeric: "NUMERIC", - T_refcursor: "REFCURSOR", - T__refcursor: "_REFCURSOR", - T_regprocedure: "REGPROCEDURE", - T_regoper: "REGOPER", - T_regoperator: "REGOPERATOR", - T_regclass: "REGCLASS", - T_regtype: "REGTYPE", - T__regprocedure: "_REGPROCEDURE", - T__regoper: "_REGOPER", - T__regoperator: "_REGOPERATOR", - T__regclass: "_REGCLASS", - T__regtype: "_REGTYPE", - T_record: "RECORD", - T_cstring: "CSTRING", - T_any: "ANY", - T_anyarray: "ANYARRAY", - T_void: "VOID", - T_trigger: "TRIGGER", - T_language_handler: "LANGUAGE_HANDLER", - T_internal: "INTERNAL", - T_opaque: "OPAQUE", - T_anyelement: "ANYELEMENT", - T__record: "_RECORD", - T_anynonarray: "ANYNONARRAY", - T_pg_authid: "PG_AUTHID", - T_pg_auth_members: "PG_AUTH_MEMBERS", - T__txid_snapshot: "_TXID_SNAPSHOT", - T_uuid: "UUID", - T__uuid: "_UUID", - T_txid_snapshot: "TXID_SNAPSHOT", - T_fdw_handler: "FDW_HANDLER", - T_pg_lsn: "PG_LSN", - T__pg_lsn: "_PG_LSN", - T_tsm_handler: "TSM_HANDLER", - T_anyenum: "ANYENUM", - T_tsvector: "TSVECTOR", - T_tsquery: "TSQUERY", - T_gtsvector: "GTSVECTOR", - T__tsvector: "_TSVECTOR", - T__gtsvector: "_GTSVECTOR", - T__tsquery: "_TSQUERY", - T_regconfig: "REGCONFIG", - T__regconfig: "_REGCONFIG", - T_regdictionary: "REGDICTIONARY", - T__regdictionary: "_REGDICTIONARY", - T_jsonb: "JSONB", - T__jsonb: "_JSONB", - T_anyrange: "ANYRANGE", - T_event_trigger: "EVENT_TRIGGER", - T_int4range: "INT4RANGE", - T__int4range: "_INT4RANGE", - T_numrange: "NUMRANGE", - T__numrange: "_NUMRANGE", - T_tsrange: "TSRANGE", - T__tsrange: "_TSRANGE", - T_tstzrange: "TSTZRANGE", - T__tstzrange: "_TSTZRANGE", - T_daterange: "DATERANGE", - T__daterange: "_DATERANGE", - T_int8range: "INT8RANGE", - T__int8range: "_INT8RANGE", - T_pg_shseclabel: "PG_SHSECLABEL", - T_regnamespace: "REGNAMESPACE", - T__regnamespace: "_REGNAMESPACE", - T_regrole: "REGROLE", - T__regrole: "_REGROLE", -} diff --git a/vendor/github.com/lib/pq/rows.go b/vendor/github.com/lib/pq/rows.go deleted file mode 100644 index c6aa5b9a..00000000 --- a/vendor/github.com/lib/pq/rows.go +++ /dev/null @@ -1,93 +0,0 @@ -package pq - -import ( - "math" - "reflect" - "time" - - "github.com/lib/pq/oid" -) - -const headerSize = 4 - -type fieldDesc struct { - // The object ID of the data type. - OID oid.Oid - // The data type size (see pg_type.typlen). - // Note that negative values denote variable-width types. - Len int - // The type modifier (see pg_attribute.atttypmod). - // The meaning of the modifier is type-specific. - Mod int -} - -func (fd fieldDesc) Type() reflect.Type { - switch fd.OID { - case oid.T_int8: - return reflect.TypeOf(int64(0)) - case oid.T_int4: - return reflect.TypeOf(int32(0)) - case oid.T_int2: - return reflect.TypeOf(int16(0)) - case oid.T_varchar, oid.T_text: - return reflect.TypeOf("") - case oid.T_bool: - return reflect.TypeOf(false) - case oid.T_date, oid.T_time, oid.T_timetz, oid.T_timestamp, oid.T_timestamptz: - return reflect.TypeOf(time.Time{}) - case oid.T_bytea: - return reflect.TypeOf([]byte(nil)) - default: - return reflect.TypeOf(new(interface{})).Elem() - } -} - -func (fd fieldDesc) Name() string { - return oid.TypeName[fd.OID] -} - -func (fd fieldDesc) Length() (length int64, ok bool) { - switch fd.OID { - case oid.T_text, oid.T_bytea: - return math.MaxInt64, true - case oid.T_varchar, oid.T_bpchar: - return int64(fd.Mod - headerSize), true - default: - return 0, false - } -} - -func (fd fieldDesc) PrecisionScale() (precision, scale int64, ok bool) { - switch fd.OID { - case oid.T_numeric, oid.T__numeric: - mod := fd.Mod - headerSize - precision = int64((mod >> 16) & 0xffff) - scale = int64(mod & 0xffff) - return precision, scale, true - default: - return 0, 0, false - } -} - -// ColumnTypeScanType returns the value type that can be used to scan types into. -func (rs *rows) ColumnTypeScanType(index int) reflect.Type { - return rs.colTyps[index].Type() -} - -// ColumnTypeDatabaseTypeName return the database system type name. -func (rs *rows) ColumnTypeDatabaseTypeName(index int) string { - return rs.colTyps[index].Name() -} - -// ColumnTypeLength returns the length of the column type if the column is a -// variable length type. If the column is not a variable length type ok -// should return false. -func (rs *rows) ColumnTypeLength(index int) (length int64, ok bool) { - return rs.colTyps[index].Length() -} - -// ColumnTypePrecisionScale should return the precision and scale for decimal -// types. If not applicable, ok should be false. -func (rs *rows) ColumnTypePrecisionScale(index int) (precision, scale int64, ok bool) { - return rs.colTyps[index].PrecisionScale() -} diff --git a/vendor/github.com/lib/pq/scram/scram.go b/vendor/github.com/lib/pq/scram/scram.go deleted file mode 100644 index 477216b6..00000000 --- a/vendor/github.com/lib/pq/scram/scram.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright (c) 2014 - Gustavo Niemeyer -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Package scram implements a SCRAM-{SHA-1,etc} client per RFC5802. -// -// http://tools.ietf.org/html/rfc5802 -// -package scram - -import ( - "bytes" - "crypto/hmac" - "crypto/rand" - "encoding/base64" - "fmt" - "hash" - "strconv" - "strings" -) - -// Client implements a SCRAM-* client (SCRAM-SHA-1, SCRAM-SHA-256, etc). -// -// A Client may be used within a SASL conversation with logic resembling: -// -// var in []byte -// var client = scram.NewClient(sha1.New, user, pass) -// for client.Step(in) { -// out := client.Out() -// // send out to server -// in := serverOut -// } -// if client.Err() != nil { -// // auth failed -// } -// -type Client struct { - newHash func() hash.Hash - - user string - pass string - step int - out bytes.Buffer - err error - - clientNonce []byte - serverNonce []byte - saltedPass []byte - authMsg bytes.Buffer -} - -// NewClient returns a new SCRAM-* client with the provided hash algorithm. -// -// For SCRAM-SHA-256, for example, use: -// -// client := scram.NewClient(sha256.New, user, pass) -// -func NewClient(newHash func() hash.Hash, user, pass string) *Client { - c := &Client{ - newHash: newHash, - user: user, - pass: pass, - } - c.out.Grow(256) - c.authMsg.Grow(256) - return c -} - -// Out returns the data to be sent to the server in the current step. -func (c *Client) Out() []byte { - if c.out.Len() == 0 { - return nil - } - return c.out.Bytes() -} - -// Err returns the error that occurred, or nil if there were no errors. -func (c *Client) Err() error { - return c.err -} - -// SetNonce sets the client nonce to the provided value. -// If not set, the nonce is generated automatically out of crypto/rand on the first step. -func (c *Client) SetNonce(nonce []byte) { - c.clientNonce = nonce -} - -var escaper = strings.NewReplacer("=", "=3D", ",", "=2C") - -// Step processes the incoming data from the server and makes the -// next round of data for the server available via Client.Out. -// Step returns false if there are no errors and more data is -// still expected. -func (c *Client) Step(in []byte) bool { - c.out.Reset() - if c.step > 2 || c.err != nil { - return false - } - c.step++ - switch c.step { - case 1: - c.err = c.step1(in) - case 2: - c.err = c.step2(in) - case 3: - c.err = c.step3(in) - } - return c.step > 2 || c.err != nil -} - -func (c *Client) step1(in []byte) error { - if len(c.clientNonce) == 0 { - const nonceLen = 16 - buf := make([]byte, nonceLen+b64.EncodedLen(nonceLen)) - if _, err := rand.Read(buf[:nonceLen]); err != nil { - return fmt.Errorf("cannot read random SCRAM-SHA-256 nonce from operating system: %v", err) - } - c.clientNonce = buf[nonceLen:] - b64.Encode(c.clientNonce, buf[:nonceLen]) - } - c.authMsg.WriteString("n=") - escaper.WriteString(&c.authMsg, c.user) - c.authMsg.WriteString(",r=") - c.authMsg.Write(c.clientNonce) - - c.out.WriteString("n,,") - c.out.Write(c.authMsg.Bytes()) - return nil -} - -var b64 = base64.StdEncoding - -func (c *Client) step2(in []byte) error { - c.authMsg.WriteByte(',') - c.authMsg.Write(in) - - fields := bytes.Split(in, []byte(",")) - if len(fields) != 3 { - return fmt.Errorf("expected 3 fields in first SCRAM-SHA-256 server message, got %d: %q", len(fields), in) - } - if !bytes.HasPrefix(fields[0], []byte("r=")) || len(fields[0]) < 2 { - return fmt.Errorf("server sent an invalid SCRAM-SHA-256 nonce: %q", fields[0]) - } - if !bytes.HasPrefix(fields[1], []byte("s=")) || len(fields[1]) < 6 { - return fmt.Errorf("server sent an invalid SCRAM-SHA-256 salt: %q", fields[1]) - } - if !bytes.HasPrefix(fields[2], []byte("i=")) || len(fields[2]) < 6 { - return fmt.Errorf("server sent an invalid SCRAM-SHA-256 iteration count: %q", fields[2]) - } - - c.serverNonce = fields[0][2:] - if !bytes.HasPrefix(c.serverNonce, c.clientNonce) { - return fmt.Errorf("server SCRAM-SHA-256 nonce is not prefixed by client nonce: got %q, want %q+\"...\"", c.serverNonce, c.clientNonce) - } - - salt := make([]byte, b64.DecodedLen(len(fields[1][2:]))) - n, err := b64.Decode(salt, fields[1][2:]) - if err != nil { - return fmt.Errorf("cannot decode SCRAM-SHA-256 salt sent by server: %q", fields[1]) - } - salt = salt[:n] - iterCount, err := strconv.Atoi(string(fields[2][2:])) - if err != nil { - return fmt.Errorf("server sent an invalid SCRAM-SHA-256 iteration count: %q", fields[2]) - } - c.saltPassword(salt, iterCount) - - c.authMsg.WriteString(",c=biws,r=") - c.authMsg.Write(c.serverNonce) - - c.out.WriteString("c=biws,r=") - c.out.Write(c.serverNonce) - c.out.WriteString(",p=") - c.out.Write(c.clientProof()) - return nil -} - -func (c *Client) step3(in []byte) error { - var isv, ise bool - var fields = bytes.Split(in, []byte(",")) - if len(fields) == 1 { - isv = bytes.HasPrefix(fields[0], []byte("v=")) - ise = bytes.HasPrefix(fields[0], []byte("e=")) - } - if ise { - return fmt.Errorf("SCRAM-SHA-256 authentication error: %s", fields[0][2:]) - } else if !isv { - return fmt.Errorf("unsupported SCRAM-SHA-256 final message from server: %q", in) - } - if !bytes.Equal(c.serverSignature(), fields[0][2:]) { - return fmt.Errorf("cannot authenticate SCRAM-SHA-256 server signature: %q", fields[0][2:]) - } - return nil -} - -func (c *Client) saltPassword(salt []byte, iterCount int) { - mac := hmac.New(c.newHash, []byte(c.pass)) - mac.Write(salt) - mac.Write([]byte{0, 0, 0, 1}) - ui := mac.Sum(nil) - hi := make([]byte, len(ui)) - copy(hi, ui) - for i := 1; i < iterCount; i++ { - mac.Reset() - mac.Write(ui) - mac.Sum(ui[:0]) - for j, b := range ui { - hi[j] ^= b - } - } - c.saltedPass = hi -} - -func (c *Client) clientProof() []byte { - mac := hmac.New(c.newHash, c.saltedPass) - mac.Write([]byte("Client Key")) - clientKey := mac.Sum(nil) - hash := c.newHash() - hash.Write(clientKey) - storedKey := hash.Sum(nil) - mac = hmac.New(c.newHash, storedKey) - mac.Write(c.authMsg.Bytes()) - clientProof := mac.Sum(nil) - for i, b := range clientKey { - clientProof[i] ^= b - } - clientProof64 := make([]byte, b64.EncodedLen(len(clientProof))) - b64.Encode(clientProof64, clientProof) - return clientProof64 -} - -func (c *Client) serverSignature() []byte { - mac := hmac.New(c.newHash, c.saltedPass) - mac.Write([]byte("Server Key")) - serverKey := mac.Sum(nil) - - mac = hmac.New(c.newHash, serverKey) - mac.Write(c.authMsg.Bytes()) - serverSignature := mac.Sum(nil) - - encoded := make([]byte, b64.EncodedLen(len(serverSignature))) - b64.Encode(encoded, serverSignature) - return encoded -} diff --git a/vendor/github.com/lib/pq/ssl.go b/vendor/github.com/lib/pq/ssl.go deleted file mode 100644 index 36b61ba4..00000000 --- a/vendor/github.com/lib/pq/ssl.go +++ /dev/null @@ -1,204 +0,0 @@ -package pq - -import ( - "crypto/tls" - "crypto/x509" - "io/ioutil" - "net" - "os" - "os/user" - "path/filepath" - "strings" -) - -// ssl generates a function to upgrade a net.Conn based on the "sslmode" and -// related settings. The function is nil when no upgrade should take place. -func ssl(o values) (func(net.Conn) (net.Conn, error), error) { - verifyCaOnly := false - tlsConf := tls.Config{} - switch mode := o["sslmode"]; mode { - // "require" is the default. - case "", "require": - // We must skip TLS's own verification since it requires full - // verification since Go 1.3. - tlsConf.InsecureSkipVerify = true - - // From http://www.postgresql.org/docs/current/static/libpq-ssl.html: - // - // Note: For backwards compatibility with earlier versions of - // PostgreSQL, if a root CA file exists, the behavior of - // sslmode=require will be the same as that of verify-ca, meaning the - // server certificate is validated against the CA. Relying on this - // behavior is discouraged, and applications that need certificate - // validation should always use verify-ca or verify-full. - if sslrootcert, ok := o["sslrootcert"]; ok { - if _, err := os.Stat(sslrootcert); err == nil { - verifyCaOnly = true - } else { - delete(o, "sslrootcert") - } - } - case "verify-ca": - // We must skip TLS's own verification since it requires full - // verification since Go 1.3. - tlsConf.InsecureSkipVerify = true - verifyCaOnly = true - case "verify-full": - tlsConf.ServerName = o["host"] - case "disable": - return nil, nil - default: - return nil, fmterrorf(`unsupported sslmode %q; only "require" (default), "verify-full", "verify-ca", and "disable" supported`, mode) - } - - // Set Server Name Indication (SNI), if enabled by connection parameters. - // By default SNI is on, any value which is not starting with "1" disables - // SNI -- that is the same check vanilla libpq uses. - if sslsni := o["sslsni"]; sslsni == "" || strings.HasPrefix(sslsni, "1") { - // RFC 6066 asks to not set SNI if the host is a literal IP address (IPv4 - // or IPv6). This check is coded already crypto.tls.hostnameInSNI, so - // just always set ServerName here and let crypto/tls do the filtering. - tlsConf.ServerName = o["host"] - } - - err := sslClientCertificates(&tlsConf, o) - if err != nil { - return nil, err - } - err = sslCertificateAuthority(&tlsConf, o) - if err != nil { - return nil, err - } - - // Accept renegotiation requests initiated by the backend. - // - // Renegotiation was deprecated then removed from PostgreSQL 9.5, but - // the default configuration of older versions has it enabled. Redshift - // also initiates renegotiations and cannot be reconfigured. - tlsConf.Renegotiation = tls.RenegotiateFreelyAsClient - - return func(conn net.Conn) (net.Conn, error) { - client := tls.Client(conn, &tlsConf) - if verifyCaOnly { - err := sslVerifyCertificateAuthority(client, &tlsConf) - if err != nil { - return nil, err - } - } - return client, nil - }, nil -} - -// sslClientCertificates adds the certificate specified in the "sslcert" and -// "sslkey" settings, or if they aren't set, from the .postgresql directory -// in the user's home directory. The configured files must exist and have -// the correct permissions. -func sslClientCertificates(tlsConf *tls.Config, o values) error { - sslinline := o["sslinline"] - if sslinline == "true" { - cert, err := tls.X509KeyPair([]byte(o["sslcert"]), []byte(o["sslkey"])) - if err != nil { - return err - } - tlsConf.Certificates = []tls.Certificate{cert} - return nil - } - - // user.Current() might fail when cross-compiling. We have to ignore the - // error and continue without home directory defaults, since we wouldn't - // know from where to load them. - user, _ := user.Current() - - // In libpq, the client certificate is only loaded if the setting is not blank. - // - // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1036-L1037 - sslcert := o["sslcert"] - if len(sslcert) == 0 && user != nil { - sslcert = filepath.Join(user.HomeDir, ".postgresql", "postgresql.crt") - } - // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1045 - if len(sslcert) == 0 { - return nil - } - // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1050:L1054 - if _, err := os.Stat(sslcert); os.IsNotExist(err) { - return nil - } else if err != nil { - return err - } - - // In libpq, the ssl key is only loaded if the setting is not blank. - // - // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1123-L1222 - sslkey := o["sslkey"] - if len(sslkey) == 0 && user != nil { - sslkey = filepath.Join(user.HomeDir, ".postgresql", "postgresql.key") - } - - if len(sslkey) > 0 { - if err := sslKeyPermissions(sslkey); err != nil { - return err - } - } - - cert, err := tls.LoadX509KeyPair(sslcert, sslkey) - if err != nil { - return err - } - - tlsConf.Certificates = []tls.Certificate{cert} - return nil -} - -// sslCertificateAuthority adds the RootCA specified in the "sslrootcert" setting. -func sslCertificateAuthority(tlsConf *tls.Config, o values) error { - // In libpq, the root certificate is only loaded if the setting is not blank. - // - // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L950-L951 - if sslrootcert := o["sslrootcert"]; len(sslrootcert) > 0 { - tlsConf.RootCAs = x509.NewCertPool() - - sslinline := o["sslinline"] - - var cert []byte - if sslinline == "true" { - cert = []byte(sslrootcert) - } else { - var err error - cert, err = ioutil.ReadFile(sslrootcert) - if err != nil { - return err - } - } - - if !tlsConf.RootCAs.AppendCertsFromPEM(cert) { - return fmterrorf("couldn't parse pem in sslrootcert") - } - } - - return nil -} - -// sslVerifyCertificateAuthority carries out a TLS handshake to the server and -// verifies the presented certificate against the CA, i.e. the one specified in -// sslrootcert or the system CA if sslrootcert was not specified. -func sslVerifyCertificateAuthority(client *tls.Conn, tlsConf *tls.Config) error { - err := client.Handshake() - if err != nil { - return err - } - certs := client.ConnectionState().PeerCertificates - opts := x509.VerifyOptions{ - DNSName: client.ConnectionState().ServerName, - Intermediates: x509.NewCertPool(), - Roots: tlsConf.RootCAs, - } - for i, cert := range certs { - if i == 0 { - continue - } - opts.Intermediates.AddCert(cert) - } - _, err = certs[0].Verify(opts) - return err -} diff --git a/vendor/github.com/lib/pq/ssl_permissions.go b/vendor/github.com/lib/pq/ssl_permissions.go deleted file mode 100644 index d587f102..00000000 --- a/vendor/github.com/lib/pq/ssl_permissions.go +++ /dev/null @@ -1,93 +0,0 @@ -//go:build !windows -// +build !windows - -package pq - -import ( - "errors" - "os" - "syscall" -) - -const ( - rootUserID = uint32(0) - - // The maximum permissions that a private key file owned by a regular user - // is allowed to have. This translates to u=rw. - maxUserOwnedKeyPermissions os.FileMode = 0600 - - // The maximum permissions that a private key file owned by root is allowed - // to have. This translates to u=rw,g=r. - maxRootOwnedKeyPermissions os.FileMode = 0640 -) - -var ( - errSSLKeyHasUnacceptableUserPermissions = errors.New("permissions for files not owned by root should be u=rw (0600) or less") - errSSLKeyHasUnacceptableRootPermissions = errors.New("permissions for root owned files should be u=rw,g=r (0640) or less") -) - -// sslKeyPermissions checks the permissions on user-supplied ssl key files. -// The key file should have very little access. -// -// libpq does not check key file permissions on Windows. -func sslKeyPermissions(sslkey string) error { - info, err := os.Stat(sslkey) - if err != nil { - return err - } - - err = hasCorrectPermissions(info) - - // return ErrSSLKeyHasWorldPermissions for backwards compatability with - // existing code. - if err == errSSLKeyHasUnacceptableUserPermissions || err == errSSLKeyHasUnacceptableRootPermissions { - err = ErrSSLKeyHasWorldPermissions - } - return err -} - -// hasCorrectPermissions checks the file info (and the unix-specific stat_t -// output) to verify that the permissions on the file are correct. -// -// If the file is owned by the same user the process is running as, -// the file should only have 0600 (u=rw). If the file is owned by root, -// and the group matches the group that the process is running in, the -// permissions cannot be more than 0640 (u=rw,g=r). The file should -// never have world permissions. -// -// Returns an error when the permission check fails. -func hasCorrectPermissions(info os.FileInfo) error { - // if file's permission matches 0600, allow access. - userPermissionMask := (os.FileMode(0777) ^ maxUserOwnedKeyPermissions) - - // regardless of if we're running as root or not, 0600 is acceptable, - // so we return if we match the regular user permission mask. - if info.Mode().Perm()&userPermissionMask == 0 { - return nil - } - - // We need to pull the Unix file information to get the file's owner. - // If we can't access it, there's some sort of operating system level error - // and we should fail rather than attempting to use faulty information. - sysInfo := info.Sys() - if sysInfo == nil { - return ErrSSLKeyUnknownOwnership - } - - unixStat, ok := sysInfo.(*syscall.Stat_t) - if !ok { - return ErrSSLKeyUnknownOwnership - } - - // if the file is owned by root, we allow 0640 (u=rw,g=r) to match what - // Postgres does. - if unixStat.Uid == rootUserID { - rootPermissionMask := (os.FileMode(0777) ^ maxRootOwnedKeyPermissions) - if info.Mode().Perm()&rootPermissionMask != 0 { - return errSSLKeyHasUnacceptableRootPermissions - } - return nil - } - - return errSSLKeyHasUnacceptableUserPermissions -} diff --git a/vendor/github.com/lib/pq/ssl_windows.go b/vendor/github.com/lib/pq/ssl_windows.go deleted file mode 100644 index 73663c8f..00000000 --- a/vendor/github.com/lib/pq/ssl_windows.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build windows -// +build windows - -package pq - -// sslKeyPermissions checks the permissions on user-supplied ssl key files. -// The key file should have very little access. -// -// libpq does not check key file permissions on Windows. -func sslKeyPermissions(string) error { return nil } diff --git a/vendor/github.com/lib/pq/url.go b/vendor/github.com/lib/pq/url.go deleted file mode 100644 index aec6e95b..00000000 --- a/vendor/github.com/lib/pq/url.go +++ /dev/null @@ -1,76 +0,0 @@ -package pq - -import ( - "fmt" - "net" - nurl "net/url" - "sort" - "strings" -) - -// ParseURL no longer needs to be used by clients of this library since supplying a URL as a -// connection string to sql.Open() is now supported: -// -// sql.Open("postgres", "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full") -// -// It remains exported here for backwards-compatibility. -// -// ParseURL converts a url to a connection string for driver.Open. -// Example: -// -// "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full" -// -// converts to: -// -// "user=bob password=secret host=1.2.3.4 port=5432 dbname=mydb sslmode=verify-full" -// -// A minimal example: -// -// "postgres://" -// -// This will be blank, causing driver.Open to use all of the defaults -func ParseURL(url string) (string, error) { - u, err := nurl.Parse(url) - if err != nil { - return "", err - } - - if u.Scheme != "postgres" && u.Scheme != "postgresql" { - return "", fmt.Errorf("invalid connection protocol: %s", u.Scheme) - } - - var kvs []string - escaper := strings.NewReplacer(`'`, `\'`, `\`, `\\`) - accrue := func(k, v string) { - if v != "" { - kvs = append(kvs, k+"='"+escaper.Replace(v)+"'") - } - } - - if u.User != nil { - v := u.User.Username() - accrue("user", v) - - v, _ = u.User.Password() - accrue("password", v) - } - - if host, port, err := net.SplitHostPort(u.Host); err != nil { - accrue("host", u.Host) - } else { - accrue("host", host) - accrue("port", port) - } - - if u.Path != "" { - accrue("dbname", u.Path[1:]) - } - - q := u.Query() - for k := range q { - accrue(k, q.Get(k)) - } - - sort.Strings(kvs) // Makes testing easier (not a performance concern) - return strings.Join(kvs, " "), nil -} diff --git a/vendor/github.com/lib/pq/user_other.go b/vendor/github.com/lib/pq/user_other.go deleted file mode 100644 index 3dae8f55..00000000 --- a/vendor/github.com/lib/pq/user_other.go +++ /dev/null @@ -1,10 +0,0 @@ -// Package pq is a pure Go Postgres driver for the database/sql package. - -//go:build js || android || hurd || zos -// +build js android hurd zos - -package pq - -func userCurrent() (string, error) { - return "", ErrCouldNotDetectUsername -} diff --git a/vendor/github.com/lib/pq/user_posix.go b/vendor/github.com/lib/pq/user_posix.go deleted file mode 100644 index 5f2d439b..00000000 --- a/vendor/github.com/lib/pq/user_posix.go +++ /dev/null @@ -1,25 +0,0 @@ -// Package pq is a pure Go Postgres driver for the database/sql package. - -//go:build aix || darwin || dragonfly || freebsd || (linux && !android) || nacl || netbsd || openbsd || plan9 || solaris || rumprun || illumos -// +build aix darwin dragonfly freebsd linux,!android nacl netbsd openbsd plan9 solaris rumprun illumos - -package pq - -import ( - "os" - "os/user" -) - -func userCurrent() (string, error) { - u, err := user.Current() - if err == nil { - return u.Username, nil - } - - name := os.Getenv("USER") - if name != "" { - return name, nil - } - - return "", ErrCouldNotDetectUsername -} diff --git a/vendor/github.com/lib/pq/user_windows.go b/vendor/github.com/lib/pq/user_windows.go deleted file mode 100644 index 2b691267..00000000 --- a/vendor/github.com/lib/pq/user_windows.go +++ /dev/null @@ -1,27 +0,0 @@ -// Package pq is a pure Go Postgres driver for the database/sql package. -package pq - -import ( - "path/filepath" - "syscall" -) - -// Perform Windows user name lookup identically to libpq. -// -// The PostgreSQL code makes use of the legacy Win32 function -// GetUserName, and that function has not been imported into stock Go. -// GetUserNameEx is available though, the difference being that a -// wider range of names are available. To get the output to be the -// same as GetUserName, only the base (or last) component of the -// result is returned. -func userCurrent() (string, error) { - pw_name := make([]uint16, 128) - pwname_size := uint32(len(pw_name)) - 1 - err := syscall.GetUserNameEx(syscall.NameSamCompatible, &pw_name[0], &pwname_size) - if err != nil { - return "", ErrCouldNotDetectUsername - } - s := syscall.UTF16ToString(pw_name) - u := filepath.Base(s) - return u, nil -} diff --git a/vendor/github.com/lib/pq/uuid.go b/vendor/github.com/lib/pq/uuid.go deleted file mode 100644 index 9a1b9e07..00000000 --- a/vendor/github.com/lib/pq/uuid.go +++ /dev/null @@ -1,23 +0,0 @@ -package pq - -import ( - "encoding/hex" - "fmt" -) - -// decodeUUIDBinary interprets the binary format of a uuid, returning it in text format. -func decodeUUIDBinary(src []byte) ([]byte, error) { - if len(src) != 16 { - return nil, fmt.Errorf("pq: unable to decode uuid; bad length: %d", len(src)) - } - - dst := make([]byte, 36) - dst[8], dst[13], dst[18], dst[23] = '-', '-', '-', '-' - hex.Encode(dst[0:], src[0:4]) - hex.Encode(dst[9:], src[4:6]) - hex.Encode(dst[14:], src[6:8]) - hex.Encode(dst[19:], src[8:10]) - hex.Encode(dst[24:], src[10:16]) - - return dst, nil -} diff --git a/vendor/github.com/mattn/go-colorable/LICENSE b/vendor/github.com/mattn/go-colorable/LICENSE deleted file mode 100644 index 91b5cef3..00000000 --- a/vendor/github.com/mattn/go-colorable/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Yasuhiro Matsumoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/mattn/go-colorable/README.md b/vendor/github.com/mattn/go-colorable/README.md deleted file mode 100644 index ca048371..00000000 --- a/vendor/github.com/mattn/go-colorable/README.md +++ /dev/null @@ -1,48 +0,0 @@ -# go-colorable - -[![Build Status](https://github.com/mattn/go-colorable/workflows/test/badge.svg)](https://github.com/mattn/go-colorable/actions?query=workflow%3Atest) -[![Codecov](https://codecov.io/gh/mattn/go-colorable/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-colorable) -[![GoDoc](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable) -[![Go Report Card](https://goreportcard.com/badge/mattn/go-colorable)](https://goreportcard.com/report/mattn/go-colorable) - -Colorable writer for windows. - -For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.) -This package is possible to handle escape sequence for ansi color on windows. - -## Too Bad! - -![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png) - - -## So Good! - -![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png) - -## Usage - -```go -logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true}) -logrus.SetOutput(colorable.NewColorableStdout()) - -logrus.Info("succeeded") -logrus.Warn("not correct") -logrus.Error("something error") -logrus.Fatal("panic") -``` - -You can compile above code on non-windows OSs. - -## Installation - -``` -$ go get github.com/mattn/go-colorable -``` - -# License - -MIT - -# Author - -Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/mattn/go-colorable/colorable_appengine.go b/vendor/github.com/mattn/go-colorable/colorable_appengine.go deleted file mode 100644 index 416d1bbb..00000000 --- a/vendor/github.com/mattn/go-colorable/colorable_appengine.go +++ /dev/null @@ -1,38 +0,0 @@ -//go:build appengine -// +build appengine - -package colorable - -import ( - "io" - "os" - - _ "github.com/mattn/go-isatty" -) - -// NewColorable returns new instance of Writer which handles escape sequence. -func NewColorable(file *os.File) io.Writer { - if file == nil { - panic("nil passed instead of *os.File to NewColorable()") - } - - return file -} - -// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. -func NewColorableStdout() io.Writer { - return os.Stdout -} - -// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. -func NewColorableStderr() io.Writer { - return os.Stderr -} - -// EnableColorsStdout enable colors if possible. -func EnableColorsStdout(enabled *bool) func() { - if enabled != nil { - *enabled = true - } - return func() {} -} diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go deleted file mode 100644 index 766d9460..00000000 --- a/vendor/github.com/mattn/go-colorable/colorable_others.go +++ /dev/null @@ -1,38 +0,0 @@ -//go:build !windows && !appengine -// +build !windows,!appengine - -package colorable - -import ( - "io" - "os" - - _ "github.com/mattn/go-isatty" -) - -// NewColorable returns new instance of Writer which handles escape sequence. -func NewColorable(file *os.File) io.Writer { - if file == nil { - panic("nil passed instead of *os.File to NewColorable()") - } - - return file -} - -// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. -func NewColorableStdout() io.Writer { - return os.Stdout -} - -// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. -func NewColorableStderr() io.Writer { - return os.Stderr -} - -// EnableColorsStdout enable colors if possible. -func EnableColorsStdout(enabled *bool) func() { - if enabled != nil { - *enabled = true - } - return func() {} -} diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go deleted file mode 100644 index 1846ad5a..00000000 --- a/vendor/github.com/mattn/go-colorable/colorable_windows.go +++ /dev/null @@ -1,1047 +0,0 @@ -//go:build windows && !appengine -// +build windows,!appengine - -package colorable - -import ( - "bytes" - "io" - "math" - "os" - "strconv" - "strings" - "sync" - "syscall" - "unsafe" - - "github.com/mattn/go-isatty" -) - -const ( - foregroundBlue = 0x1 - foregroundGreen = 0x2 - foregroundRed = 0x4 - foregroundIntensity = 0x8 - foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) - backgroundBlue = 0x10 - backgroundGreen = 0x20 - backgroundRed = 0x40 - backgroundIntensity = 0x80 - backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) - commonLvbUnderscore = 0x8000 - - cENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4 -) - -const ( - genericRead = 0x80000000 - genericWrite = 0x40000000 -) - -const ( - consoleTextmodeBuffer = 0x1 -) - -type wchar uint16 -type short int16 -type dword uint32 -type word uint16 - -type coord struct { - x short - y short -} - -type smallRect struct { - left short - top short - right short - bottom short -} - -type consoleScreenBufferInfo struct { - size coord - cursorPosition coord - attributes word - window smallRect - maximumWindowSize coord -} - -type consoleCursorInfo struct { - size dword - visible int32 -} - -var ( - kernel32 = syscall.NewLazyDLL("kernel32.dll") - procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") - procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") - procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") - procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") - procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") - procGetConsoleCursorInfo = kernel32.NewProc("GetConsoleCursorInfo") - procSetConsoleCursorInfo = kernel32.NewProc("SetConsoleCursorInfo") - procSetConsoleTitle = kernel32.NewProc("SetConsoleTitleW") - procGetConsoleMode = kernel32.NewProc("GetConsoleMode") - procSetConsoleMode = kernel32.NewProc("SetConsoleMode") - procCreateConsoleScreenBuffer = kernel32.NewProc("CreateConsoleScreenBuffer") -) - -// Writer provides colorable Writer to the console -type Writer struct { - out io.Writer - handle syscall.Handle - althandle syscall.Handle - oldattr word - oldpos coord - rest bytes.Buffer - mutex sync.Mutex -} - -// NewColorable returns new instance of Writer which handles escape sequence from File. -func NewColorable(file *os.File) io.Writer { - if file == nil { - panic("nil passed instead of *os.File to NewColorable()") - } - - if isatty.IsTerminal(file.Fd()) { - var mode uint32 - if r, _, _ := procGetConsoleMode.Call(file.Fd(), uintptr(unsafe.Pointer(&mode))); r != 0 && mode&cENABLE_VIRTUAL_TERMINAL_PROCESSING != 0 { - return file - } - var csbi consoleScreenBufferInfo - handle := syscall.Handle(file.Fd()) - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - return &Writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}} - } - return file -} - -// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. -func NewColorableStdout() io.Writer { - return NewColorable(os.Stdout) -} - -// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. -func NewColorableStderr() io.Writer { - return NewColorable(os.Stderr) -} - -var color256 = map[int]int{ - 0: 0x000000, - 1: 0x800000, - 2: 0x008000, - 3: 0x808000, - 4: 0x000080, - 5: 0x800080, - 6: 0x008080, - 7: 0xc0c0c0, - 8: 0x808080, - 9: 0xff0000, - 10: 0x00ff00, - 11: 0xffff00, - 12: 0x0000ff, - 13: 0xff00ff, - 14: 0x00ffff, - 15: 0xffffff, - 16: 0x000000, - 17: 0x00005f, - 18: 0x000087, - 19: 0x0000af, - 20: 0x0000d7, - 21: 0x0000ff, - 22: 0x005f00, - 23: 0x005f5f, - 24: 0x005f87, - 25: 0x005faf, - 26: 0x005fd7, - 27: 0x005fff, - 28: 0x008700, - 29: 0x00875f, - 30: 0x008787, - 31: 0x0087af, - 32: 0x0087d7, - 33: 0x0087ff, - 34: 0x00af00, - 35: 0x00af5f, - 36: 0x00af87, - 37: 0x00afaf, - 38: 0x00afd7, - 39: 0x00afff, - 40: 0x00d700, - 41: 0x00d75f, - 42: 0x00d787, - 43: 0x00d7af, - 44: 0x00d7d7, - 45: 0x00d7ff, - 46: 0x00ff00, - 47: 0x00ff5f, - 48: 0x00ff87, - 49: 0x00ffaf, - 50: 0x00ffd7, - 51: 0x00ffff, - 52: 0x5f0000, - 53: 0x5f005f, - 54: 0x5f0087, - 55: 0x5f00af, - 56: 0x5f00d7, - 57: 0x5f00ff, - 58: 0x5f5f00, - 59: 0x5f5f5f, - 60: 0x5f5f87, - 61: 0x5f5faf, - 62: 0x5f5fd7, - 63: 0x5f5fff, - 64: 0x5f8700, - 65: 0x5f875f, - 66: 0x5f8787, - 67: 0x5f87af, - 68: 0x5f87d7, - 69: 0x5f87ff, - 70: 0x5faf00, - 71: 0x5faf5f, - 72: 0x5faf87, - 73: 0x5fafaf, - 74: 0x5fafd7, - 75: 0x5fafff, - 76: 0x5fd700, - 77: 0x5fd75f, - 78: 0x5fd787, - 79: 0x5fd7af, - 80: 0x5fd7d7, - 81: 0x5fd7ff, - 82: 0x5fff00, - 83: 0x5fff5f, - 84: 0x5fff87, - 85: 0x5fffaf, - 86: 0x5fffd7, - 87: 0x5fffff, - 88: 0x870000, - 89: 0x87005f, - 90: 0x870087, - 91: 0x8700af, - 92: 0x8700d7, - 93: 0x8700ff, - 94: 0x875f00, - 95: 0x875f5f, - 96: 0x875f87, - 97: 0x875faf, - 98: 0x875fd7, - 99: 0x875fff, - 100: 0x878700, - 101: 0x87875f, - 102: 0x878787, - 103: 0x8787af, - 104: 0x8787d7, - 105: 0x8787ff, - 106: 0x87af00, - 107: 0x87af5f, - 108: 0x87af87, - 109: 0x87afaf, - 110: 0x87afd7, - 111: 0x87afff, - 112: 0x87d700, - 113: 0x87d75f, - 114: 0x87d787, - 115: 0x87d7af, - 116: 0x87d7d7, - 117: 0x87d7ff, - 118: 0x87ff00, - 119: 0x87ff5f, - 120: 0x87ff87, - 121: 0x87ffaf, - 122: 0x87ffd7, - 123: 0x87ffff, - 124: 0xaf0000, - 125: 0xaf005f, - 126: 0xaf0087, - 127: 0xaf00af, - 128: 0xaf00d7, - 129: 0xaf00ff, - 130: 0xaf5f00, - 131: 0xaf5f5f, - 132: 0xaf5f87, - 133: 0xaf5faf, - 134: 0xaf5fd7, - 135: 0xaf5fff, - 136: 0xaf8700, - 137: 0xaf875f, - 138: 0xaf8787, - 139: 0xaf87af, - 140: 0xaf87d7, - 141: 0xaf87ff, - 142: 0xafaf00, - 143: 0xafaf5f, - 144: 0xafaf87, - 145: 0xafafaf, - 146: 0xafafd7, - 147: 0xafafff, - 148: 0xafd700, - 149: 0xafd75f, - 150: 0xafd787, - 151: 0xafd7af, - 152: 0xafd7d7, - 153: 0xafd7ff, - 154: 0xafff00, - 155: 0xafff5f, - 156: 0xafff87, - 157: 0xafffaf, - 158: 0xafffd7, - 159: 0xafffff, - 160: 0xd70000, - 161: 0xd7005f, - 162: 0xd70087, - 163: 0xd700af, - 164: 0xd700d7, - 165: 0xd700ff, - 166: 0xd75f00, - 167: 0xd75f5f, - 168: 0xd75f87, - 169: 0xd75faf, - 170: 0xd75fd7, - 171: 0xd75fff, - 172: 0xd78700, - 173: 0xd7875f, - 174: 0xd78787, - 175: 0xd787af, - 176: 0xd787d7, - 177: 0xd787ff, - 178: 0xd7af00, - 179: 0xd7af5f, - 180: 0xd7af87, - 181: 0xd7afaf, - 182: 0xd7afd7, - 183: 0xd7afff, - 184: 0xd7d700, - 185: 0xd7d75f, - 186: 0xd7d787, - 187: 0xd7d7af, - 188: 0xd7d7d7, - 189: 0xd7d7ff, - 190: 0xd7ff00, - 191: 0xd7ff5f, - 192: 0xd7ff87, - 193: 0xd7ffaf, - 194: 0xd7ffd7, - 195: 0xd7ffff, - 196: 0xff0000, - 197: 0xff005f, - 198: 0xff0087, - 199: 0xff00af, - 200: 0xff00d7, - 201: 0xff00ff, - 202: 0xff5f00, - 203: 0xff5f5f, - 204: 0xff5f87, - 205: 0xff5faf, - 206: 0xff5fd7, - 207: 0xff5fff, - 208: 0xff8700, - 209: 0xff875f, - 210: 0xff8787, - 211: 0xff87af, - 212: 0xff87d7, - 213: 0xff87ff, - 214: 0xffaf00, - 215: 0xffaf5f, - 216: 0xffaf87, - 217: 0xffafaf, - 218: 0xffafd7, - 219: 0xffafff, - 220: 0xffd700, - 221: 0xffd75f, - 222: 0xffd787, - 223: 0xffd7af, - 224: 0xffd7d7, - 225: 0xffd7ff, - 226: 0xffff00, - 227: 0xffff5f, - 228: 0xffff87, - 229: 0xffffaf, - 230: 0xffffd7, - 231: 0xffffff, - 232: 0x080808, - 233: 0x121212, - 234: 0x1c1c1c, - 235: 0x262626, - 236: 0x303030, - 237: 0x3a3a3a, - 238: 0x444444, - 239: 0x4e4e4e, - 240: 0x585858, - 241: 0x626262, - 242: 0x6c6c6c, - 243: 0x767676, - 244: 0x808080, - 245: 0x8a8a8a, - 246: 0x949494, - 247: 0x9e9e9e, - 248: 0xa8a8a8, - 249: 0xb2b2b2, - 250: 0xbcbcbc, - 251: 0xc6c6c6, - 252: 0xd0d0d0, - 253: 0xdadada, - 254: 0xe4e4e4, - 255: 0xeeeeee, -} - -// `\033]0;TITLESTR\007` -func doTitleSequence(er *bytes.Reader) error { - var c byte - var err error - - c, err = er.ReadByte() - if err != nil { - return err - } - if c != '0' && c != '2' { - return nil - } - c, err = er.ReadByte() - if err != nil { - return err - } - if c != ';' { - return nil - } - title := make([]byte, 0, 80) - for { - c, err = er.ReadByte() - if err != nil { - return err - } - if c == 0x07 || c == '\n' { - break - } - title = append(title, c) - } - if len(title) > 0 { - title8, err := syscall.UTF16PtrFromString(string(title)) - if err == nil { - procSetConsoleTitle.Call(uintptr(unsafe.Pointer(title8))) - } - } - return nil -} - -// returns Atoi(s) unless s == "" in which case it returns def -func atoiWithDefault(s string, def int) (int, error) { - if s == "" { - return def, nil - } - return strconv.Atoi(s) -} - -// Write writes data on console -func (w *Writer) Write(data []byte) (n int, err error) { - w.mutex.Lock() - defer w.mutex.Unlock() - var csbi consoleScreenBufferInfo - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) - - handle := w.handle - - var er *bytes.Reader - if w.rest.Len() > 0 { - var rest bytes.Buffer - w.rest.WriteTo(&rest) - w.rest.Reset() - rest.Write(data) - er = bytes.NewReader(rest.Bytes()) - } else { - er = bytes.NewReader(data) - } - var plaintext bytes.Buffer -loop: - for { - c1, err := er.ReadByte() - if err != nil { - plaintext.WriteTo(w.out) - break loop - } - if c1 != 0x1b { - plaintext.WriteByte(c1) - continue - } - _, err = plaintext.WriteTo(w.out) - if err != nil { - break loop - } - c2, err := er.ReadByte() - if err != nil { - break loop - } - - switch c2 { - case '>': - continue - case ']': - w.rest.WriteByte(c1) - w.rest.WriteByte(c2) - er.WriteTo(&w.rest) - if bytes.IndexByte(w.rest.Bytes(), 0x07) == -1 { - break loop - } - er = bytes.NewReader(w.rest.Bytes()[2:]) - err := doTitleSequence(er) - if err != nil { - break loop - } - w.rest.Reset() - continue - // https://github.com/mattn/go-colorable/issues/27 - case '7': - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - w.oldpos = csbi.cursorPosition - continue - case '8': - procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) - continue - case 0x5b: - // execute part after switch - default: - continue - } - - w.rest.WriteByte(c1) - w.rest.WriteByte(c2) - er.WriteTo(&w.rest) - - var buf bytes.Buffer - var m byte - for i, c := range w.rest.Bytes()[2:] { - if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { - m = c - er = bytes.NewReader(w.rest.Bytes()[2+i+1:]) - w.rest.Reset() - break - } - buf.Write([]byte(string(c))) - } - if m == 0 { - break loop - } - - switch m { - case 'A': - n, err = atoiWithDefault(buf.String(), 1) - if err != nil { - continue - } - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - csbi.cursorPosition.y -= short(n) - procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) - case 'B': - n, err = atoiWithDefault(buf.String(), 1) - if err != nil { - continue - } - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - csbi.cursorPosition.y += short(n) - procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) - case 'C': - n, err = atoiWithDefault(buf.String(), 1) - if err != nil { - continue - } - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - csbi.cursorPosition.x += short(n) - procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) - case 'D': - n, err = atoiWithDefault(buf.String(), 1) - if err != nil { - continue - } - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - csbi.cursorPosition.x -= short(n) - if csbi.cursorPosition.x < 0 { - csbi.cursorPosition.x = 0 - } - procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) - case 'E': - n, err = strconv.Atoi(buf.String()) - if err != nil { - continue - } - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - csbi.cursorPosition.x = 0 - csbi.cursorPosition.y += short(n) - procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) - case 'F': - n, err = strconv.Atoi(buf.String()) - if err != nil { - continue - } - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - csbi.cursorPosition.x = 0 - csbi.cursorPosition.y -= short(n) - procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) - case 'G': - n, err = strconv.Atoi(buf.String()) - if err != nil { - continue - } - if n < 1 { - n = 1 - } - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - csbi.cursorPosition.x = short(n - 1) - procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) - case 'H', 'f': - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - if buf.Len() > 0 { - token := strings.Split(buf.String(), ";") - switch len(token) { - case 1: - n1, err := strconv.Atoi(token[0]) - if err != nil { - continue - } - csbi.cursorPosition.y = short(n1 - 1) - case 2: - n1, err := strconv.Atoi(token[0]) - if err != nil { - continue - } - n2, err := strconv.Atoi(token[1]) - if err != nil { - continue - } - csbi.cursorPosition.x = short(n2 - 1) - csbi.cursorPosition.y = short(n1 - 1) - } - } else { - csbi.cursorPosition.y = 0 - } - procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) - case 'J': - n := 0 - if buf.Len() > 0 { - n, err = strconv.Atoi(buf.String()) - if err != nil { - continue - } - } - var count, written dword - var cursor coord - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - switch n { - case 0: - cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} - count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) - case 1: - cursor = coord{x: csbi.window.left, y: csbi.window.top} - count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.window.top-csbi.cursorPosition.y)*dword(csbi.size.x) - case 2: - cursor = coord{x: csbi.window.left, y: csbi.window.top} - count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) - } - procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) - procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) - case 'K': - n := 0 - if buf.Len() > 0 { - n, err = strconv.Atoi(buf.String()) - if err != nil { - continue - } - } - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - var cursor coord - var count, written dword - switch n { - case 0: - cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} - count = dword(csbi.size.x - csbi.cursorPosition.x) - case 1: - cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} - count = dword(csbi.size.x - csbi.cursorPosition.x) - case 2: - cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} - count = dword(csbi.size.x) - } - procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) - procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) - case 'X': - n := 0 - if buf.Len() > 0 { - n, err = strconv.Atoi(buf.String()) - if err != nil { - continue - } - } - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - var cursor coord - var written dword - cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} - procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(n), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) - procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(n), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) - case 'm': - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - attr := csbi.attributes - cs := buf.String() - if cs == "" { - procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(w.oldattr)) - continue - } - token := strings.Split(cs, ";") - for i := 0; i < len(token); i++ { - ns := token[i] - if n, err = strconv.Atoi(ns); err == nil { - switch { - case n == 0 || n == 100: - attr = w.oldattr - case n == 4: - attr |= commonLvbUnderscore - case (1 <= n && n <= 3) || n == 5: - attr |= foregroundIntensity - case n == 7 || n == 27: - attr = - (attr &^ (foregroundMask | backgroundMask)) | - ((attr & foregroundMask) << 4) | - ((attr & backgroundMask) >> 4) - case n == 22: - attr &^= foregroundIntensity - case n == 24: - attr &^= commonLvbUnderscore - case 30 <= n && n <= 37: - attr &= backgroundMask - if (n-30)&1 != 0 { - attr |= foregroundRed - } - if (n-30)&2 != 0 { - attr |= foregroundGreen - } - if (n-30)&4 != 0 { - attr |= foregroundBlue - } - case n == 38: // set foreground color. - if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") { - if n256, err := strconv.Atoi(token[i+2]); err == nil { - if n256foreAttr == nil { - n256setup() - } - attr &= backgroundMask - attr |= n256foreAttr[n256%len(n256foreAttr)] - i += 2 - } - } else if len(token) == 5 && token[i+1] == "2" { - var r, g, b int - r, _ = strconv.Atoi(token[i+2]) - g, _ = strconv.Atoi(token[i+3]) - b, _ = strconv.Atoi(token[i+4]) - i += 4 - if r > 127 { - attr |= foregroundRed - } - if g > 127 { - attr |= foregroundGreen - } - if b > 127 { - attr |= foregroundBlue - } - } else { - attr = attr & (w.oldattr & backgroundMask) - } - case n == 39: // reset foreground color. - attr &= backgroundMask - attr |= w.oldattr & foregroundMask - case 40 <= n && n <= 47: - attr &= foregroundMask - if (n-40)&1 != 0 { - attr |= backgroundRed - } - if (n-40)&2 != 0 { - attr |= backgroundGreen - } - if (n-40)&4 != 0 { - attr |= backgroundBlue - } - case n == 48: // set background color. - if i < len(token)-2 && token[i+1] == "5" { - if n256, err := strconv.Atoi(token[i+2]); err == nil { - if n256backAttr == nil { - n256setup() - } - attr &= foregroundMask - attr |= n256backAttr[n256%len(n256backAttr)] - i += 2 - } - } else if len(token) == 5 && token[i+1] == "2" { - var r, g, b int - r, _ = strconv.Atoi(token[i+2]) - g, _ = strconv.Atoi(token[i+3]) - b, _ = strconv.Atoi(token[i+4]) - i += 4 - if r > 127 { - attr |= backgroundRed - } - if g > 127 { - attr |= backgroundGreen - } - if b > 127 { - attr |= backgroundBlue - } - } else { - attr = attr & (w.oldattr & foregroundMask) - } - case n == 49: // reset foreground color. - attr &= foregroundMask - attr |= w.oldattr & backgroundMask - case 90 <= n && n <= 97: - attr = (attr & backgroundMask) - attr |= foregroundIntensity - if (n-90)&1 != 0 { - attr |= foregroundRed - } - if (n-90)&2 != 0 { - attr |= foregroundGreen - } - if (n-90)&4 != 0 { - attr |= foregroundBlue - } - case 100 <= n && n <= 107: - attr = (attr & foregroundMask) - attr |= backgroundIntensity - if (n-100)&1 != 0 { - attr |= backgroundRed - } - if (n-100)&2 != 0 { - attr |= backgroundGreen - } - if (n-100)&4 != 0 { - attr |= backgroundBlue - } - } - procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(attr)) - } - } - case 'h': - var ci consoleCursorInfo - cs := buf.String() - if cs == "5>" { - procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) - ci.visible = 0 - procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) - } else if cs == "?25" { - procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) - ci.visible = 1 - procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) - } else if cs == "?1049" { - if w.althandle == 0 { - h, _, _ := procCreateConsoleScreenBuffer.Call(uintptr(genericRead|genericWrite), 0, 0, uintptr(consoleTextmodeBuffer), 0, 0) - w.althandle = syscall.Handle(h) - if w.althandle != 0 { - handle = w.althandle - } - } - } - case 'l': - var ci consoleCursorInfo - cs := buf.String() - if cs == "5>" { - procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) - ci.visible = 1 - procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) - } else if cs == "?25" { - procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) - ci.visible = 0 - procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) - } else if cs == "?1049" { - if w.althandle != 0 { - syscall.CloseHandle(w.althandle) - w.althandle = 0 - handle = w.handle - } - } - case 's': - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - w.oldpos = csbi.cursorPosition - case 'u': - procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) - } - } - - return len(data), nil -} - -type consoleColor struct { - rgb int - red bool - green bool - blue bool - intensity bool -} - -func (c consoleColor) foregroundAttr() (attr word) { - if c.red { - attr |= foregroundRed - } - if c.green { - attr |= foregroundGreen - } - if c.blue { - attr |= foregroundBlue - } - if c.intensity { - attr |= foregroundIntensity - } - return -} - -func (c consoleColor) backgroundAttr() (attr word) { - if c.red { - attr |= backgroundRed - } - if c.green { - attr |= backgroundGreen - } - if c.blue { - attr |= backgroundBlue - } - if c.intensity { - attr |= backgroundIntensity - } - return -} - -var color16 = []consoleColor{ - {0x000000, false, false, false, false}, - {0x000080, false, false, true, false}, - {0x008000, false, true, false, false}, - {0x008080, false, true, true, false}, - {0x800000, true, false, false, false}, - {0x800080, true, false, true, false}, - {0x808000, true, true, false, false}, - {0xc0c0c0, true, true, true, false}, - {0x808080, false, false, false, true}, - {0x0000ff, false, false, true, true}, - {0x00ff00, false, true, false, true}, - {0x00ffff, false, true, true, true}, - {0xff0000, true, false, false, true}, - {0xff00ff, true, false, true, true}, - {0xffff00, true, true, false, true}, - {0xffffff, true, true, true, true}, -} - -type hsv struct { - h, s, v float32 -} - -func (a hsv) dist(b hsv) float32 { - dh := a.h - b.h - switch { - case dh > 0.5: - dh = 1 - dh - case dh < -0.5: - dh = -1 - dh - } - ds := a.s - b.s - dv := a.v - b.v - return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv))) -} - -func toHSV(rgb int) hsv { - r, g, b := float32((rgb&0xFF0000)>>16)/256.0, - float32((rgb&0x00FF00)>>8)/256.0, - float32(rgb&0x0000FF)/256.0 - min, max := minmax3f(r, g, b) - h := max - min - if h > 0 { - if max == r { - h = (g - b) / h - if h < 0 { - h += 6 - } - } else if max == g { - h = 2 + (b-r)/h - } else { - h = 4 + (r-g)/h - } - } - h /= 6.0 - s := max - min - if max != 0 { - s /= max - } - v := max - return hsv{h: h, s: s, v: v} -} - -type hsvTable []hsv - -func toHSVTable(rgbTable []consoleColor) hsvTable { - t := make(hsvTable, len(rgbTable)) - for i, c := range rgbTable { - t[i] = toHSV(c.rgb) - } - return t -} - -func (t hsvTable) find(rgb int) consoleColor { - hsv := toHSV(rgb) - n := 7 - l := float32(5.0) - for i, p := range t { - d := hsv.dist(p) - if d < l { - l, n = d, i - } - } - return color16[n] -} - -func minmax3f(a, b, c float32) (min, max float32) { - if a < b { - if b < c { - return a, c - } else if a < c { - return a, b - } else { - return c, b - } - } else { - if a < c { - return b, c - } else if b < c { - return b, a - } else { - return c, a - } - } -} - -var n256foreAttr []word -var n256backAttr []word - -func n256setup() { - n256foreAttr = make([]word, 256) - n256backAttr = make([]word, 256) - t := toHSVTable(color16) - for i, rgb := range color256 { - c := t.find(rgb) - n256foreAttr[i] = c.foregroundAttr() - n256backAttr[i] = c.backgroundAttr() - } -} - -// EnableColorsStdout enable colors if possible. -func EnableColorsStdout(enabled *bool) func() { - var mode uint32 - h := os.Stdout.Fd() - if r, _, _ := procGetConsoleMode.Call(h, uintptr(unsafe.Pointer(&mode))); r != 0 { - if r, _, _ = procSetConsoleMode.Call(h, uintptr(mode|cENABLE_VIRTUAL_TERMINAL_PROCESSING)); r != 0 { - if enabled != nil { - *enabled = true - } - return func() { - procSetConsoleMode.Call(h, uintptr(mode)) - } - } - } - if enabled != nil { - *enabled = true - } - return func() {} -} diff --git a/vendor/github.com/mattn/go-colorable/noncolorable.go b/vendor/github.com/mattn/go-colorable/noncolorable.go deleted file mode 100644 index 05d6f74b..00000000 --- a/vendor/github.com/mattn/go-colorable/noncolorable.go +++ /dev/null @@ -1,57 +0,0 @@ -package colorable - -import ( - "bytes" - "io" -) - -// NonColorable holds writer but removes escape sequence. -type NonColorable struct { - out io.Writer -} - -// NewNonColorable returns new instance of Writer which removes escape sequence from Writer. -func NewNonColorable(w io.Writer) io.Writer { - return &NonColorable{out: w} -} - -// Write writes data on console -func (w *NonColorable) Write(data []byte) (n int, err error) { - er := bytes.NewReader(data) - var plaintext bytes.Buffer -loop: - for { - c1, err := er.ReadByte() - if err != nil { - plaintext.WriteTo(w.out) - break loop - } - if c1 != 0x1b { - plaintext.WriteByte(c1) - continue - } - _, err = plaintext.WriteTo(w.out) - if err != nil { - break loop - } - c2, err := er.ReadByte() - if err != nil { - break loop - } - if c2 != 0x5b { - continue - } - - for { - c, err := er.ReadByte() - if err != nil { - break loop - } - if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { - break - } - } - } - - return len(data), nil -} diff --git a/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml b/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml index 3aa1840e..1d8b69e6 100644 --- a/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml +++ b/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml @@ -18,6 +18,7 @@ builds: - linux_amd64 - linux_arm64 - linux_arm + - linux_riscv64 - windows_amd64 - windows_arm64 - windows_arm @@ -37,6 +38,7 @@ builds: - linux_amd64 - linux_arm64 - linux_arm + - linux_riscv64 - windows_amd64 - windows_arm64 - windows_arm @@ -55,6 +57,7 @@ builds: targets: - linux_amd64 - linux_arm64 + - linux_riscv64 - linux_arm - windows_amd64 - windows_arm64 diff --git a/vendor/github.com/pelletier/go-toml/v2/LICENSE b/vendor/github.com/pelletier/go-toml/v2/LICENSE index 6839d51c..991e2ae9 100644 --- a/vendor/github.com/pelletier/go-toml/v2/LICENSE +++ b/vendor/github.com/pelletier/go-toml/v2/LICENSE @@ -1,6 +1,7 @@ The MIT License (MIT) -Copyright (c) 2013 - 2022 Thomas Pelletier, Eric Anderton +go-toml v2 +Copyright (c) 2021 - 2023 Thomas Pelletier Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/pelletier/go-toml/v2/README.md b/vendor/github.com/pelletier/go-toml/v2/README.md index d53f4397..63b92f3b 100644 --- a/vendor/github.com/pelletier/go-toml/v2/README.md +++ b/vendor/github.com/pelletier/go-toml/v2/README.md @@ -45,16 +45,15 @@ to check for typos. [See example in the documentation][strict]. ### Contextualized errors -When most decoding errors occur, go-toml returns [`DecodeError`][decode-err]), +When most decoding errors occur, go-toml returns [`DecodeError`][decode-err], which contains a human readable contextualized version of the error. For example: ``` -2| key1 = "value1" -3| key2 = "missing2" - | ~~~~ missing field -4| key3 = "missing3" -5| key4 = "value4" +1| [server] +2| path = 100 + | ~~~ cannot decode TOML integer into struct field toml_test.Server.Path of type string +3| port = 50 ``` [decode-err]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#DecodeError @@ -73,6 +72,26 @@ representation. [tlt]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalTime [tldt]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalDateTime +### Commented config + +Since TOML is often used for configuration files, go-toml can emit documents +annotated with [comments and commented-out values][comments-example]. For +example, it can generate the following file: + +```toml +# Host IP to connect to. +host = '127.0.0.1' +# Port of the remote server. +port = 4242 + +# Encryption parameters (optional) +# [TLS] +# cipher = 'AEAD-AES128-GCM-SHA256' +# version = 'TLS 1.3' +``` + +[comments-example]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#example-Marshal-Commented + ## Getting started Given the following struct, let's see how to read it and write it as TOML: @@ -497,27 +516,20 @@ is not necessary anymore. V1 used to provide multiple struct tags: `comment`, `commented`, `multiline`, `toml`, and `omitempty`. To behave more like the standard library, v2 has merged -`toml`, `multiline`, and `omitempty`. For example: +`toml`, `multiline`, `commented`, and `omitempty`. For example: ```go type doc struct { // v1 - F string `toml:"field" multiline:"true" omitempty:"true"` + F string `toml:"field" multiline:"true" omitempty:"true" commented:"true"` // v2 - F string `toml:"field,multiline,omitempty"` + F string `toml:"field,multiline,omitempty,commented"` } ``` Has a result, the `Encoder.SetTag*` methods have been removed, as there is just one tag now. - -#### `commented` tag has been removed - -There is no replacement for the `commented` tag. This feature would be better -suited in a proper document model for go-toml v2, which has been [cut from -scope][nodoc] at the moment. - #### `Encoder.ArraysWithOneElementPerLine` has been renamed The new name is `Encoder.SetArraysMultiline`. The behavior should be the same. diff --git a/vendor/github.com/pelletier/go-toml/v2/decode.go b/vendor/github.com/pelletier/go-toml/v2/decode.go index 3a860d0f..f0ec3b17 100644 --- a/vendor/github.com/pelletier/go-toml/v2/decode.go +++ b/vendor/github.com/pelletier/go-toml/v2/decode.go @@ -318,7 +318,7 @@ func parseFloat(b []byte) (float64, error) { if cleaned[0] == '+' || cleaned[0] == '-' { start = 1 } - if cleaned[start] == '0' && isDigit(cleaned[start+1]) { + if cleaned[start] == '0' && len(cleaned) > start+1 && isDigit(cleaned[start+1]) { return 0, unstable.NewParserError(b, "float integer part cannot have leading zeroes") } diff --git a/vendor/github.com/pelletier/go-toml/v2/marshaler.go b/vendor/github.com/pelletier/go-toml/v2/marshaler.go index 83875260..6fe78533 100644 --- a/vendor/github.com/pelletier/go-toml/v2/marshaler.go +++ b/vendor/github.com/pelletier/go-toml/v2/marshaler.go @@ -148,6 +148,9 @@ func (enc *Encoder) SetIndentTables(indent bool) *Encoder { // // The "omitempty" option prevents empty values or groups from being emitted. // +// The "commented" option prefixes the value and all its children with a comment +// symbol. +// // In addition to the "toml" tag struct tag, a "comment" tag can be used to emit // a TOML comment before the value being annotated. Comments are ignored inside // inline tables. For array tables, the comment is only present before the first @@ -180,6 +183,7 @@ func (enc *Encoder) Encode(v interface{}) error { type valueOptions struct { multiline bool omitempty bool + commented bool comment string } @@ -205,6 +209,9 @@ type encoderCtx struct { // Indentation level indent int + // Prefix the current value with a comment. + commented bool + // Options coming from struct tags options valueOptions } @@ -357,6 +364,7 @@ func (enc *Encoder) encodeKv(b []byte, ctx encoderCtx, options valueOptions, v r if !ctx.inline { b = enc.encodeComment(ctx.indent, options.comment, b) + b = enc.commented(ctx.commented, b) b = enc.indent(ctx.indent, b) } @@ -378,6 +386,13 @@ func (enc *Encoder) encodeKv(b []byte, ctx encoderCtx, options valueOptions, v r return b, nil } +func (enc *Encoder) commented(commented bool, b []byte) []byte { + if commented { + return append(b, "# "...) + } + return b +} + func isEmptyValue(v reflect.Value) bool { switch v.Kind() { case reflect.Struct: @@ -526,6 +541,8 @@ func (enc *Encoder) encodeTableHeader(ctx encoderCtx, b []byte) ([]byte, error) b = enc.encodeComment(ctx.indent, ctx.options.comment, b) + b = enc.commented(ctx.commented, b) + b = enc.indent(ctx.indent, b) b = append(b, '[') @@ -704,6 +721,7 @@ func walkStruct(ctx encoderCtx, t *table, v reflect.Value) { options := valueOptions{ multiline: opts.multiline, omitempty: opts.omitempty, + commented: opts.commented, comment: fieldType.Tag.Get("comment"), } @@ -763,6 +781,7 @@ type tagOptions struct { multiline bool inline bool omitempty bool + commented bool } func parseTag(tag string) (string, tagOptions) { @@ -790,6 +809,8 @@ func parseTag(tag string) (string, tagOptions) { opts.inline = true case "omitempty": opts.omitempty = true + case "commented": + opts.commented = true } } @@ -825,8 +846,10 @@ func (enc *Encoder) encodeTable(b []byte, ctx encoderCtx, t table) ([]byte, erro hasNonEmptyKV = true ctx.setKey(kv.Key) + ctx2 := ctx + ctx2.commented = kv.Options.commented || ctx2.commented - b, err = enc.encodeKv(b, ctx, kv.Options, kv.Value) + b, err = enc.encodeKv(b, ctx2, kv.Options, kv.Value) if err != nil { return nil, err } @@ -851,8 +874,10 @@ func (enc *Encoder) encodeTable(b []byte, ctx encoderCtx, t table) ([]byte, erro ctx.setKey(table.Key) ctx.options = table.Options + ctx2 := ctx + ctx2.commented = ctx2.commented || ctx.options.commented - b, err = enc.encode(b, ctx, table.Value) + b, err = enc.encode(b, ctx2, table.Value) if err != nil { return nil, err } @@ -970,6 +995,9 @@ func (enc *Encoder) encodeSliceAsArrayTable(b []byte, ctx encoderCtx, v reflect. ctx.shiftKey() scratch := make([]byte, 0, 64) + + scratch = enc.commented(ctx.commented, scratch) + scratch = append(scratch, "[["...) for i, k := range ctx.parentKey { @@ -985,6 +1013,10 @@ func (enc *Encoder) encodeSliceAsArrayTable(b []byte, ctx encoderCtx, v reflect. b = enc.encodeComment(ctx.indent, ctx.options.comment, b) + if enc.indentTables { + ctx.indent++ + } + for i := 0; i < v.Len(); i++ { if i != 0 { b = append(b, "\n"...) diff --git a/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go index 5cede081..868c74c1 100644 --- a/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go +++ b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go @@ -149,12 +149,16 @@ type errorContext struct { } func (d *decoder) typeMismatchError(toml string, target reflect.Type) error { + return fmt.Errorf("toml: %s", d.typeMismatchString(toml, target)) +} + +func (d *decoder) typeMismatchString(toml string, target reflect.Type) string { if d.errorContext != nil && d.errorContext.Struct != nil { ctx := d.errorContext f := ctx.Struct.FieldByIndex(ctx.Field) - return fmt.Errorf("toml: cannot decode TOML %s into struct field %s.%s of type %s", toml, ctx.Struct, f.Name, f.Type) + return fmt.Sprintf("cannot decode TOML %s into struct field %s.%s of type %s", toml, ctx.Struct, f.Name, f.Type) } - return fmt.Errorf("toml: cannot decode TOML %s into a Go value of type %s", toml, target) + return fmt.Sprintf("cannot decode TOML %s into a Go value of type %s", toml, target) } func (d *decoder) expr() *unstable.Node { @@ -963,7 +967,7 @@ func (d *decoder) unmarshalInteger(value *unstable.Node, v reflect.Value) error case reflect.Interface: r = reflect.ValueOf(i) default: - return d.typeMismatchError("integer", v.Type()) + return unstable.NewParserError(d.p.Raw(value.Raw), d.typeMismatchString("integer", v.Type())) } if !r.Type().AssignableTo(v.Type()) { @@ -982,7 +986,7 @@ func (d *decoder) unmarshalString(value *unstable.Node, v reflect.Value) error { case reflect.Interface: v.Set(reflect.ValueOf(string(value.Data))) default: - return unstable.NewParserError(d.p.Raw(value.Raw), "cannot store TOML string into a Go %s", v.Kind()) + return unstable.NewParserError(d.p.Raw(value.Raw), d.typeMismatchString("string", v.Type())) } return nil diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go b/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go index a8eb0529..50358a44 100644 --- a/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go +++ b/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go @@ -1013,6 +1013,7 @@ func (p *Parser) parseIntOrFloatOrDateTime(b []byte) (reference, []byte, error) return p.builder.Push(Node{ Kind: Float, Data: b[:3], + Raw: p.Range(b[:3]), }), b[3:], nil case 'n': if !scanFollowsNan(b) { @@ -1022,6 +1023,7 @@ func (p *Parser) parseIntOrFloatOrDateTime(b []byte) (reference, []byte, error) return p.builder.Push(Node{ Kind: Float, Data: b[:3], + Raw: p.Range(b[:3]), }), b[3:], nil case '+', '-': return p.scanIntOrFloat(b) @@ -1146,6 +1148,7 @@ func (p *Parser) scanIntOrFloat(b []byte) (reference, []byte, error) { return p.builder.Push(Node{ Kind: Integer, Data: b[:i], + Raw: p.Range(b[:i]), }), b[i:], nil } @@ -1169,6 +1172,7 @@ func (p *Parser) scanIntOrFloat(b []byte) (reference, []byte, error) { return p.builder.Push(Node{ Kind: Float, Data: b[:i+3], + Raw: p.Range(b[:i+3]), }), b[i+3:], nil } @@ -1180,6 +1184,7 @@ func (p *Parser) scanIntOrFloat(b []byte) (reference, []byte, error) { return p.builder.Push(Node{ Kind: Float, Data: b[:i+3], + Raw: p.Range(b[:i+3]), }), b[i+3:], nil } @@ -1202,6 +1207,7 @@ func (p *Parser) scanIntOrFloat(b []byte) (reference, []byte, error) { return p.builder.Push(Node{ Kind: kind, Data: b[:i], + Raw: p.Range(b[:i]), }), b[i:], nil } diff --git a/vendor/github.com/qiniu/go-sdk/v7/CHANGELOG.md b/vendor/github.com/qiniu/go-sdk/v7/CHANGELOG.md index 8afd0e5a..4e644772 100644 --- a/vendor/github.com/qiniu/go-sdk/v7/CHANGELOG.md +++ b/vendor/github.com/qiniu/go-sdk/v7/CHANGELOG.md @@ -1,4 +1,10 @@ # Changelog +## 7.17.1 +* 优化 + * 调整在获取 Bucket 所在区域服务域名时的主备域名顺序 +* 调整 + * 移除内置的亚太-首尔区域 + ## 7.17.0 * 优化 * 对象存储,UC 服务相关请求支持主备重试 diff --git a/vendor/github.com/qiniu/go-sdk/v7/README.md b/vendor/github.com/qiniu/go-sdk/v7/README.md index 9fe77c31..9fc2ed20 100644 --- a/vendor/github.com/qiniu/go-sdk/v7/README.md +++ b/vendor/github.com/qiniu/go-sdk/v7/README.md @@ -17,7 +17,7 @@ github.com/qiniu/go-sdk 在您的项目中的 `go.mod` 文件内添加这行代码 ``` -require github.com/qiniu/go-sdk/v7 v7.17.0 +require github.com/qiniu/go-sdk/v7 v7.17.1 ``` 并且在项目中使用 `"github.com/qiniu/go-sdk/v7"` 引用 Qiniu Go SDK。 diff --git a/vendor/github.com/qiniu/go-sdk/v7/conf/conf.go b/vendor/github.com/qiniu/go-sdk/v7/conf/conf.go index b8421d16..1da4c7d2 100644 --- a/vendor/github.com/qiniu/go-sdk/v7/conf/conf.go +++ b/vendor/github.com/qiniu/go-sdk/v7/conf/conf.go @@ -5,7 +5,7 @@ import ( "strings" ) -const Version = "7.17.0" +const Version = "7.17.1" const ( CONTENT_TYPE_JSON = "application/json" diff --git a/vendor/github.com/qiniu/go-sdk/v7/internal/clientv2/client.go b/vendor/github.com/qiniu/go-sdk/v7/internal/clientv2/client.go index e8e693a0..5b716f53 100644 --- a/vendor/github.com/qiniu/go-sdk/v7/internal/clientv2/client.go +++ b/vendor/github.com/qiniu/go-sdk/v7/internal/clientv2/client.go @@ -19,7 +19,13 @@ type client struct { func NewClient(cli Client, interceptors ...Interceptor) Client { if cli == nil { - cli = http.DefaultClient + if clientV1.DefaultClient.Client != nil { + cli = NewClientWithClientV1(&clientV1.DefaultClient) + } else if http.DefaultClient != nil { + cli = http.DefaultClient + } else { + cli = &http.Client{} + } } var is interceptorList = interceptors @@ -96,3 +102,29 @@ func DoAndDecodeJsonResponse(c Client, options RequestParams, ret interface{}) ( return resp, nil } + +type clientV1Wrapper struct { + c *clientV1.Client +} + +func (c *clientV1Wrapper) Do(req *http.Request) (*http.Response, error) { + return c.c.Do(req.Context(), req) +} + +func NewClientWithClientV1(c *clientV1.Client) Client { + if c == nil { + c = &clientV1.DefaultClient + } + + if c.Client == nil { + if clientV1.DefaultClient.Client != nil { + c.Client = clientV1.DefaultClient.Client + } else { + c.Client = &http.Client{} + } + } + + return &clientV1Wrapper{ + c: c, + } +} diff --git a/vendor/github.com/qiniu/go-sdk/v7/internal/clientv2/interceptor_retry_simple.go b/vendor/github.com/qiniu/go-sdk/v7/internal/clientv2/interceptor_retry_simple.go index f3e7362e..b02f619f 100644 --- a/vendor/github.com/qiniu/go-sdk/v7/internal/clientv2/interceptor_retry_simple.go +++ b/vendor/github.com/qiniu/go-sdk/v7/internal/clientv2/interceptor_retry_simple.go @@ -1,15 +1,17 @@ package clientv2 import ( - clientv1 "github.com/qiniu/go-sdk/v7/client" "io" "math/rand" "net" "net/http" "net/url" "os" + "strings" "syscall" "time" + + clientv1 "github.com/qiniu/go-sdk/v7/client" ) type RetryConfig struct { @@ -157,12 +159,15 @@ func IsErrorRetryable(err error) bool { case *clientv1.ErrorInfo: return isStatusCodeRetryable(t.Code) default: + if err == io.EOF { + return true + } return false } } func isNetworkErrorWithOpError(err *net.OpError) bool { - if err == nil { + if err == nil || err.Err == nil { return false } @@ -176,6 +181,13 @@ func isNetworkErrorWithOpError(err *net.OpError) bool { errno == syscall.ECONNREFUSED || errno == syscall.ETIMEDOUT } + case *net.OpError: + return isNetworkErrorWithOpError(t) + default: + desc := err.Err.Error() + if strings.Contains(desc, "use of closed network connection") { + return true + } } return false diff --git a/vendor/github.com/qiniu/go-sdk/v7/storage/region.go b/vendor/github.com/qiniu/go-sdk/v7/storage/region.go index 0bc25ee9..8e3b7beb 100644 --- a/vendor/github.com/qiniu/go-sdk/v7/storage/region.go +++ b/vendor/github.com/qiniu/go-sdk/v7/storage/region.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "github.com/qiniu/go-sdk/v7/auth" + "github.com/qiniu/go-sdk/v7/client" "github.com/qiniu/go-sdk/v7/internal/clientv2" "github.com/qiniu/go-sdk/v7/internal/hostprovider" "strings" @@ -177,20 +178,6 @@ var ( ApiHost: "api-as0.qiniuapi.com", IovipHost: "iovip-as0.qbox.me", } - - // regionApNortheast1 表示亚太-首尔机房 - regionApNortheast1 = Region{ - SrcUpHosts: []string{ - "up-ap-northeast-1.qiniup.com", - }, - CdnUpHosts: []string{ - "upload-ap-northeast-1.qiniup.com", - }, - RsHost: "rs-ap-northeast-1.qiniuapi.com", - RsfHost: "rsf-ap-northeast-1.qiniuapi.com", - ApiHost: "api-ap-northeast-1.qiniuapi.com", - IovipHost: "iovip-ap-northeast-1.qiniuio.com", - } ) const ( @@ -201,7 +188,6 @@ const ( RIDHuanan = RegionID("z2") RIDNorthAmerica = RegionID("na0") RIDSingapore = RegionID("as0") - RIDApNortheast1 = RegionID("ap-northeast-1") ) // regionMap 是RegionID到具体的Region的映射 @@ -212,13 +198,12 @@ var regionMap = map[RegionID]Region{ RIDHuabei: regionHuabei, RIDSingapore: regionSingapore, RIDNorthAmerica: regionNorthAmerica, - RIDApNortheast1: regionApNortheast1, } const ( defaultApiHost = "api.qiniu.com" - defaultUcHost0 = "uc.qbox.me" - defaultUcHost1 = "kodo-config.qiniuapi.com" + defaultUcHost0 = "kodo-config.qiniuapi.com" + defaultUcHost1 = "uc.qbox.me" ) // UcHost 为查询空间相关域名的 API 服务地址 @@ -350,6 +335,8 @@ type ucClientConfig struct { // 主备域名冻结时间(默认:600s),当一个域名请求失败(单个域名会被重试 TryTimes 次),会被冻结一段时间,使用备用域名进行重试,在冻结时间内,域名不能被使用,当一个操作中所有域名竣备冻结操作不在进行重试,返回最后一次操作的错误。 HostFreezeDuration time.Duration + + Client *client.Client } func getUCClient(config ucClientConfig, mac *auth.Credentials) clientv2.Client { @@ -391,5 +378,5 @@ func getUCClient(config ucClientConfig, mac *auth.Credentials) clientv2.Client { })) } - return clientv2.NewClient(nil, is...) + return clientv2.NewClient(clientv2.NewClientWithClientV1(config.Client), is...) } diff --git a/vendor/github.com/qiniu/go-sdk/v7/storage/uc.go b/vendor/github.com/qiniu/go-sdk/v7/storage/uc.go index 1da4111a..ea44649a 100644 --- a/vendor/github.com/qiniu/go-sdk/v7/storage/uc.go +++ b/vendor/github.com/qiniu/go-sdk/v7/storage/uc.go @@ -743,5 +743,6 @@ func (m *BucketManager) getUCClient() clientv2.Client { IsUcQueryApi: false, RetryMax: m.options.RetryMax, HostFreezeDuration: m.options.HostFreezeDuration, + Client: m.Client, }, m.Mac) } diff --git a/vendor/github.com/qiniu/go-sdk/v7/storage/zone.go b/vendor/github.com/qiniu/go-sdk/v7/storage/zone.go index a1971e0a..51d5a6c5 100644 --- a/vendor/github.com/qiniu/go-sdk/v7/storage/zone.go +++ b/vendor/github.com/qiniu/go-sdk/v7/storage/zone.go @@ -34,9 +34,6 @@ var ( // 华东浙江 2 区 ZoneHuadongZheJiang2, _ = GetRegionByID(RIDHuadongZheJiang2) - // 亚太首尔 1 区 - ZoneShouEr1, _ = GetRegionByID(RIDApNortheast1) - // 兼容保留 Zone_z0 = ZoneHuadong // 兼容保留 diff --git a/vendor/github.com/syndtr/goleveldb/LICENSE b/vendor/github.com/syndtr/goleveldb/LICENSE deleted file mode 100644 index 4a772d1a..00000000 --- a/vendor/github.com/syndtr/goleveldb/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -Copyright 2012 Suryandaru Triandana -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/batch.go b/vendor/github.com/syndtr/goleveldb/leveldb/batch.go deleted file mode 100644 index 22592000..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/batch.go +++ /dev/null @@ -1,349 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "encoding/binary" - "fmt" - "io" - - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/memdb" - "github.com/syndtr/goleveldb/leveldb/storage" -) - -// ErrBatchCorrupted records reason of batch corruption. This error will be -// wrapped with errors.ErrCorrupted. -type ErrBatchCorrupted struct { - Reason string -} - -func (e *ErrBatchCorrupted) Error() string { - return fmt.Sprintf("leveldb: batch corrupted: %s", e.Reason) -} - -func newErrBatchCorrupted(reason string) error { - return errors.NewErrCorrupted(storage.FileDesc{}, &ErrBatchCorrupted{reason}) -} - -const ( - batchHeaderLen = 8 + 4 - batchGrowRec = 3000 - batchBufioSize = 16 -) - -// BatchReplay wraps basic batch operations. -type BatchReplay interface { - Put(key, value []byte) - Delete(key []byte) -} - -type batchIndex struct { - keyType keyType - keyPos, keyLen int - valuePos, valueLen int -} - -func (index batchIndex) k(data []byte) []byte { - return data[index.keyPos : index.keyPos+index.keyLen] -} - -func (index batchIndex) v(data []byte) []byte { - if index.valueLen != 0 { - return data[index.valuePos : index.valuePos+index.valueLen] - } - return nil -} - -func (index batchIndex) kv(data []byte) (key, value []byte) { - return index.k(data), index.v(data) -} - -// Batch is a write batch. -type Batch struct { - data []byte - index []batchIndex - - // internalLen is sums of key/value pair length plus 8-bytes internal key. - internalLen int -} - -func (b *Batch) grow(n int) { - o := len(b.data) - if cap(b.data)-o < n { - div := 1 - if len(b.index) > batchGrowRec { - div = len(b.index) / batchGrowRec - } - ndata := make([]byte, o, o+n+o/div) - copy(ndata, b.data) - b.data = ndata - } -} - -func (b *Batch) appendRec(kt keyType, key, value []byte) { - n := 1 + binary.MaxVarintLen32 + len(key) - if kt == keyTypeVal { - n += binary.MaxVarintLen32 + len(value) - } - b.grow(n) - index := batchIndex{keyType: kt} - o := len(b.data) - data := b.data[:o+n] - data[o] = byte(kt) - o++ - o += binary.PutUvarint(data[o:], uint64(len(key))) - index.keyPos = o - index.keyLen = len(key) - o += copy(data[o:], key) - if kt == keyTypeVal { - o += binary.PutUvarint(data[o:], uint64(len(value))) - index.valuePos = o - index.valueLen = len(value) - o += copy(data[o:], value) - } - b.data = data[:o] - b.index = append(b.index, index) - b.internalLen += index.keyLen + index.valueLen + 8 -} - -// Put appends 'put operation' of the given key/value pair to the batch. -// It is safe to modify the contents of the argument after Put returns but not -// before. -func (b *Batch) Put(key, value []byte) { - b.appendRec(keyTypeVal, key, value) -} - -// Delete appends 'delete operation' of the given key to the batch. -// It is safe to modify the contents of the argument after Delete returns but -// not before. -func (b *Batch) Delete(key []byte) { - b.appendRec(keyTypeDel, key, nil) -} - -// Dump dumps batch contents. The returned slice can be loaded into the -// batch using Load method. -// The returned slice is not its own copy, so the contents should not be -// modified. -func (b *Batch) Dump() []byte { - return b.data -} - -// Load loads given slice into the batch. Previous contents of the batch -// will be discarded. -// The given slice will not be copied and will be used as batch buffer, so -// it is not safe to modify the contents of the slice. -func (b *Batch) Load(data []byte) error { - return b.decode(data, -1) -} - -// Replay replays batch contents. -func (b *Batch) Replay(r BatchReplay) error { - for _, index := range b.index { - switch index.keyType { - case keyTypeVal: - r.Put(index.k(b.data), index.v(b.data)) - case keyTypeDel: - r.Delete(index.k(b.data)) - } - } - return nil -} - -// Len returns number of records in the batch. -func (b *Batch) Len() int { - return len(b.index) -} - -// Reset resets the batch. -func (b *Batch) Reset() { - b.data = b.data[:0] - b.index = b.index[:0] - b.internalLen = 0 -} - -func (b *Batch) replayInternal(fn func(i int, kt keyType, k, v []byte) error) error { - for i, index := range b.index { - if err := fn(i, index.keyType, index.k(b.data), index.v(b.data)); err != nil { - return err - } - } - return nil -} - -func (b *Batch) append(p *Batch) { - ob := len(b.data) - oi := len(b.index) - b.data = append(b.data, p.data...) - b.index = append(b.index, p.index...) - b.internalLen += p.internalLen - - // Updating index offset. - if ob != 0 { - for ; oi < len(b.index); oi++ { - index := &b.index[oi] - index.keyPos += ob - if index.valueLen != 0 { - index.valuePos += ob - } - } - } -} - -func (b *Batch) decode(data []byte, expectedLen int) error { - b.data = data - b.index = b.index[:0] - b.internalLen = 0 - err := decodeBatch(data, func(i int, index batchIndex) error { - b.index = append(b.index, index) - b.internalLen += index.keyLen + index.valueLen + 8 - return nil - }) - if err != nil { - return err - } - if expectedLen >= 0 && len(b.index) != expectedLen { - return newErrBatchCorrupted(fmt.Sprintf("invalid records length: %d vs %d", expectedLen, len(b.index))) - } - return nil -} - -func (b *Batch) putMem(seq uint64, mdb *memdb.DB) error { - var ik []byte - for i, index := range b.index { - ik = makeInternalKey(ik, index.k(b.data), seq+uint64(i), index.keyType) - if err := mdb.Put(ik, index.v(b.data)); err != nil { - return err - } - } - return nil -} - -func (b *Batch) revertMem(seq uint64, mdb *memdb.DB) error { - var ik []byte - for i, index := range b.index { - ik = makeInternalKey(ik, index.k(b.data), seq+uint64(i), index.keyType) - if err := mdb.Delete(ik); err != nil { - return err - } - } - return nil -} - -func newBatch() interface{} { - return &Batch{} -} - -func decodeBatch(data []byte, fn func(i int, index batchIndex) error) error { - var index batchIndex - for i, o := 0, 0; o < len(data); i++ { - // Key type. - index.keyType = keyType(data[o]) - if index.keyType > keyTypeVal { - return newErrBatchCorrupted(fmt.Sprintf("bad record: invalid type %#x", uint(index.keyType))) - } - o++ - - // Key. - x, n := binary.Uvarint(data[o:]) - o += n - if n <= 0 || o+int(x) > len(data) { - return newErrBatchCorrupted("bad record: invalid key length") - } - index.keyPos = o - index.keyLen = int(x) - o += index.keyLen - - // Value. - if index.keyType == keyTypeVal { - x, n = binary.Uvarint(data[o:]) - o += n - if n <= 0 || o+int(x) > len(data) { - return newErrBatchCorrupted("bad record: invalid value length") - } - index.valuePos = o - index.valueLen = int(x) - o += index.valueLen - } else { - index.valuePos = 0 - index.valueLen = 0 - } - - if err := fn(i, index); err != nil { - return err - } - } - return nil -} - -func decodeBatchToMem(data []byte, expectSeq uint64, mdb *memdb.DB) (seq uint64, batchLen int, err error) { - seq, batchLen, err = decodeBatchHeader(data) - if err != nil { - return 0, 0, err - } - if seq < expectSeq { - return 0, 0, newErrBatchCorrupted("invalid sequence number") - } - data = data[batchHeaderLen:] - var ik []byte - var decodedLen int - err = decodeBatch(data, func(i int, index batchIndex) error { - if i >= batchLen { - return newErrBatchCorrupted("invalid records length") - } - ik = makeInternalKey(ik, index.k(data), seq+uint64(i), index.keyType) - if err := mdb.Put(ik, index.v(data)); err != nil { - return err - } - decodedLen++ - return nil - }) - if err == nil && decodedLen != batchLen { - err = newErrBatchCorrupted(fmt.Sprintf("invalid records length: %d vs %d", batchLen, decodedLen)) - } - return -} - -func encodeBatchHeader(dst []byte, seq uint64, batchLen int) []byte { - dst = ensureBuffer(dst, batchHeaderLen) - binary.LittleEndian.PutUint64(dst, seq) - binary.LittleEndian.PutUint32(dst[8:], uint32(batchLen)) - return dst -} - -func decodeBatchHeader(data []byte) (seq uint64, batchLen int, err error) { - if len(data) < batchHeaderLen { - return 0, 0, newErrBatchCorrupted("too short") - } - - seq = binary.LittleEndian.Uint64(data) - batchLen = int(binary.LittleEndian.Uint32(data[8:])) - if batchLen < 0 { - return 0, 0, newErrBatchCorrupted("invalid records length") - } - return -} - -func batchesLen(batches []*Batch) int { - batchLen := 0 - for _, batch := range batches { - batchLen += batch.Len() - } - return batchLen -} - -func writeBatchesWithHeader(wr io.Writer, batches []*Batch, seq uint64) error { - if _, err := wr.Write(encodeBatchHeader(nil, seq, batchesLen(batches))); err != nil { - return err - } - for _, batch := range batches { - if _, err := wr.Write(batch.data); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go b/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go deleted file mode 100644 index c36ad323..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go +++ /dev/null @@ -1,704 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package cache provides interface and implementation of a cache algorithms. -package cache - -import ( - "sync" - "sync/atomic" - "unsafe" - - "github.com/syndtr/goleveldb/leveldb/util" -) - -// Cacher provides interface to implements a caching functionality. -// An implementation must be safe for concurrent use. -type Cacher interface { - // Capacity returns cache capacity. - Capacity() int - - // SetCapacity sets cache capacity. - SetCapacity(capacity int) - - // Promote promotes the 'cache node'. - Promote(n *Node) - - // Ban evicts the 'cache node' and prevent subsequent 'promote'. - Ban(n *Node) - - // Evict evicts the 'cache node'. - Evict(n *Node) - - // EvictNS evicts 'cache node' with the given namespace. - EvictNS(ns uint64) - - // EvictAll evicts all 'cache node'. - EvictAll() - - // Close closes the 'cache tree' - Close() error -} - -// Value is a 'cacheable object'. It may implements util.Releaser, if -// so the the Release method will be called once object is released. -type Value interface{} - -// NamespaceGetter provides convenient wrapper for namespace. -type NamespaceGetter struct { - Cache *Cache - NS uint64 -} - -// Get simply calls Cache.Get() method. -func (g *NamespaceGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle { - return g.Cache.Get(g.NS, key, setFunc) -} - -// The hash tables implementation is based on: -// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu, -// Kunlong Zhang, and Michael Spear. -// ACM Symposium on Principles of Distributed Computing, Jul 2014. - -const ( - mInitialSize = 1 << 4 - mOverflowThreshold = 1 << 5 - mOverflowGrowThreshold = 1 << 7 -) - -type mBucket struct { - mu sync.Mutex - node []*Node - frozen bool -} - -func (b *mBucket) freeze() []*Node { - b.mu.Lock() - defer b.mu.Unlock() - if !b.frozen { - b.frozen = true - } - return b.node -} - -func (b *mBucket) get(r *Cache, h *mNode, hash uint32, ns, key uint64, noset bool) (done, added bool, n *Node) { - b.mu.Lock() - - if b.frozen { - b.mu.Unlock() - return - } - - // Scan the node. - for _, n := range b.node { - if n.hash == hash && n.ns == ns && n.key == key { - atomic.AddInt32(&n.ref, 1) - b.mu.Unlock() - return true, false, n - } - } - - // Get only. - if noset { - b.mu.Unlock() - return true, false, nil - } - - // Create node. - n = &Node{ - r: r, - hash: hash, - ns: ns, - key: key, - ref: 1, - } - // Add node to bucket. - b.node = append(b.node, n) - bLen := len(b.node) - b.mu.Unlock() - - // Update counter. - grow := atomic.AddInt32(&r.nodes, 1) >= h.growThreshold - if bLen > mOverflowThreshold { - grow = grow || atomic.AddInt32(&h.overflow, 1) >= mOverflowGrowThreshold - } - - // Grow. - if grow && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) { - nhLen := len(h.buckets) << 1 - nh := &mNode{ - buckets: make([]unsafe.Pointer, nhLen), - mask: uint32(nhLen) - 1, - pred: unsafe.Pointer(h), - growThreshold: int32(nhLen * mOverflowThreshold), - shrinkThreshold: int32(nhLen >> 1), - } - ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh)) - if !ok { - panic("BUG: failed swapping head") - } - go nh.initBuckets() - } - - return true, true, n -} - -func (b *mBucket) delete(r *Cache, h *mNode, hash uint32, ns, key uint64) (done, deleted bool) { - b.mu.Lock() - - if b.frozen { - b.mu.Unlock() - return - } - - // Scan the node. - var ( - n *Node - bLen int - ) - for i := range b.node { - n = b.node[i] - if n.ns == ns && n.key == key { - if atomic.LoadInt32(&n.ref) == 0 { - deleted = true - - // Call releaser. - if n.value != nil { - if r, ok := n.value.(util.Releaser); ok { - r.Release() - } - n.value = nil - } - - // Remove node from bucket. - b.node = append(b.node[:i], b.node[i+1:]...) - bLen = len(b.node) - } - break - } - } - b.mu.Unlock() - - if deleted { - // Call OnDel. - for _, f := range n.onDel { - f() - } - - // Update counter. - atomic.AddInt32(&r.size, int32(n.size)*-1) - shrink := atomic.AddInt32(&r.nodes, -1) < h.shrinkThreshold - if bLen >= mOverflowThreshold { - atomic.AddInt32(&h.overflow, -1) - } - - // Shrink. - if shrink && len(h.buckets) > mInitialSize && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) { - nhLen := len(h.buckets) >> 1 - nh := &mNode{ - buckets: make([]unsafe.Pointer, nhLen), - mask: uint32(nhLen) - 1, - pred: unsafe.Pointer(h), - growThreshold: int32(nhLen * mOverflowThreshold), - shrinkThreshold: int32(nhLen >> 1), - } - ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh)) - if !ok { - panic("BUG: failed swapping head") - } - go nh.initBuckets() - } - } - - return true, deleted -} - -type mNode struct { - buckets []unsafe.Pointer // []*mBucket - mask uint32 - pred unsafe.Pointer // *mNode - resizeInProgess int32 - - overflow int32 - growThreshold int32 - shrinkThreshold int32 -} - -func (n *mNode) initBucket(i uint32) *mBucket { - if b := (*mBucket)(atomic.LoadPointer(&n.buckets[i])); b != nil { - return b - } - - p := (*mNode)(atomic.LoadPointer(&n.pred)) - if p != nil { - var node []*Node - if n.mask > p.mask { - // Grow. - pb := (*mBucket)(atomic.LoadPointer(&p.buckets[i&p.mask])) - if pb == nil { - pb = p.initBucket(i & p.mask) - } - m := pb.freeze() - // Split nodes. - for _, x := range m { - if x.hash&n.mask == i { - node = append(node, x) - } - } - } else { - // Shrink. - pb0 := (*mBucket)(atomic.LoadPointer(&p.buckets[i])) - if pb0 == nil { - pb0 = p.initBucket(i) - } - pb1 := (*mBucket)(atomic.LoadPointer(&p.buckets[i+uint32(len(n.buckets))])) - if pb1 == nil { - pb1 = p.initBucket(i + uint32(len(n.buckets))) - } - m0 := pb0.freeze() - m1 := pb1.freeze() - // Merge nodes. - node = make([]*Node, 0, len(m0)+len(m1)) - node = append(node, m0...) - node = append(node, m1...) - } - b := &mBucket{node: node} - if atomic.CompareAndSwapPointer(&n.buckets[i], nil, unsafe.Pointer(b)) { - if len(node) > mOverflowThreshold { - atomic.AddInt32(&n.overflow, int32(len(node)-mOverflowThreshold)) - } - return b - } - } - - return (*mBucket)(atomic.LoadPointer(&n.buckets[i])) -} - -func (n *mNode) initBuckets() { - for i := range n.buckets { - n.initBucket(uint32(i)) - } - atomic.StorePointer(&n.pred, nil) -} - -// Cache is a 'cache map'. -type Cache struct { - mu sync.RWMutex - mHead unsafe.Pointer // *mNode - nodes int32 - size int32 - cacher Cacher - closed bool -} - -// NewCache creates a new 'cache map'. The cacher is optional and -// may be nil. -func NewCache(cacher Cacher) *Cache { - h := &mNode{ - buckets: make([]unsafe.Pointer, mInitialSize), - mask: mInitialSize - 1, - growThreshold: int32(mInitialSize * mOverflowThreshold), - shrinkThreshold: 0, - } - for i := range h.buckets { - h.buckets[i] = unsafe.Pointer(&mBucket{}) - } - r := &Cache{ - mHead: unsafe.Pointer(h), - cacher: cacher, - } - return r -} - -func (r *Cache) getBucket(hash uint32) (*mNode, *mBucket) { - h := (*mNode)(atomic.LoadPointer(&r.mHead)) - i := hash & h.mask - b := (*mBucket)(atomic.LoadPointer(&h.buckets[i])) - if b == nil { - b = h.initBucket(i) - } - return h, b -} - -func (r *Cache) delete(n *Node) bool { - for { - h, b := r.getBucket(n.hash) - done, deleted := b.delete(r, h, n.hash, n.ns, n.key) - if done { - return deleted - } - } -} - -// Nodes returns number of 'cache node' in the map. -func (r *Cache) Nodes() int { - return int(atomic.LoadInt32(&r.nodes)) -} - -// Size returns sums of 'cache node' size in the map. -func (r *Cache) Size() int { - return int(atomic.LoadInt32(&r.size)) -} - -// Capacity returns cache capacity. -func (r *Cache) Capacity() int { - if r.cacher == nil { - return 0 - } - return r.cacher.Capacity() -} - -// SetCapacity sets cache capacity. -func (r *Cache) SetCapacity(capacity int) { - if r.cacher != nil { - r.cacher.SetCapacity(capacity) - } -} - -// Get gets 'cache node' with the given namespace and key. -// If cache node is not found and setFunc is not nil, Get will atomically creates -// the 'cache node' by calling setFunc. Otherwise Get will returns nil. -// -// The returned 'cache handle' should be released after use by calling Release -// method. -func (r *Cache) Get(ns, key uint64, setFunc func() (size int, value Value)) *Handle { - r.mu.RLock() - defer r.mu.RUnlock() - if r.closed { - return nil - } - - hash := murmur32(ns, key, 0xf00) - for { - h, b := r.getBucket(hash) - done, _, n := b.get(r, h, hash, ns, key, setFunc == nil) - if done { - if n != nil { - n.mu.Lock() - if n.value == nil { - if setFunc == nil { - n.mu.Unlock() - n.unref() - return nil - } - - n.size, n.value = setFunc() - if n.value == nil { - n.size = 0 - n.mu.Unlock() - n.unref() - return nil - } - atomic.AddInt32(&r.size, int32(n.size)) - } - n.mu.Unlock() - if r.cacher != nil { - r.cacher.Promote(n) - } - return &Handle{unsafe.Pointer(n)} - } - - break - } - } - return nil -} - -// Delete removes and ban 'cache node' with the given namespace and key. -// A banned 'cache node' will never inserted into the 'cache tree'. Ban -// only attributed to the particular 'cache node', so when a 'cache node' -// is recreated it will not be banned. -// -// If onDel is not nil, then it will be executed if such 'cache node' -// doesn't exist or once the 'cache node' is released. -// -// Delete return true is such 'cache node' exist. -func (r *Cache) Delete(ns, key uint64, onDel func()) bool { - r.mu.RLock() - defer r.mu.RUnlock() - if r.closed { - return false - } - - hash := murmur32(ns, key, 0xf00) - for { - h, b := r.getBucket(hash) - done, _, n := b.get(r, h, hash, ns, key, true) - if done { - if n != nil { - if onDel != nil { - n.mu.Lock() - n.onDel = append(n.onDel, onDel) - n.mu.Unlock() - } - if r.cacher != nil { - r.cacher.Ban(n) - } - n.unref() - return true - } - - break - } - } - - if onDel != nil { - onDel() - } - - return false -} - -// Evict evicts 'cache node' with the given namespace and key. This will -// simply call Cacher.Evict. -// -// Evict return true is such 'cache node' exist. -func (r *Cache) Evict(ns, key uint64) bool { - r.mu.RLock() - defer r.mu.RUnlock() - if r.closed { - return false - } - - hash := murmur32(ns, key, 0xf00) - for { - h, b := r.getBucket(hash) - done, _, n := b.get(r, h, hash, ns, key, true) - if done { - if n != nil { - if r.cacher != nil { - r.cacher.Evict(n) - } - n.unref() - return true - } - - break - } - } - - return false -} - -// EvictNS evicts 'cache node' with the given namespace. This will -// simply call Cacher.EvictNS. -func (r *Cache) EvictNS(ns uint64) { - r.mu.RLock() - defer r.mu.RUnlock() - if r.closed { - return - } - - if r.cacher != nil { - r.cacher.EvictNS(ns) - } -} - -// EvictAll evicts all 'cache node'. This will simply call Cacher.EvictAll. -func (r *Cache) EvictAll() { - r.mu.RLock() - defer r.mu.RUnlock() - if r.closed { - return - } - - if r.cacher != nil { - r.cacher.EvictAll() - } -} - -// Close closes the 'cache map' and forcefully releases all 'cache node'. -func (r *Cache) Close() error { - r.mu.Lock() - if !r.closed { - r.closed = true - - h := (*mNode)(r.mHead) - h.initBuckets() - - for i := range h.buckets { - b := (*mBucket)(h.buckets[i]) - for _, n := range b.node { - // Call releaser. - if n.value != nil { - if r, ok := n.value.(util.Releaser); ok { - r.Release() - } - n.value = nil - } - - // Call OnDel. - for _, f := range n.onDel { - f() - } - n.onDel = nil - } - } - } - r.mu.Unlock() - - // Avoid deadlock. - if r.cacher != nil { - if err := r.cacher.Close(); err != nil { - return err - } - } - return nil -} - -// CloseWeak closes the 'cache map' and evict all 'cache node' from cacher, but -// unlike Close it doesn't forcefully releases 'cache node'. -func (r *Cache) CloseWeak() error { - r.mu.Lock() - if !r.closed { - r.closed = true - } - r.mu.Unlock() - - // Avoid deadlock. - if r.cacher != nil { - r.cacher.EvictAll() - if err := r.cacher.Close(); err != nil { - return err - } - } - return nil -} - -// Node is a 'cache node'. -type Node struct { - r *Cache - - hash uint32 - ns, key uint64 - - mu sync.Mutex - size int - value Value - - ref int32 - onDel []func() - - CacheData unsafe.Pointer -} - -// NS returns this 'cache node' namespace. -func (n *Node) NS() uint64 { - return n.ns -} - -// Key returns this 'cache node' key. -func (n *Node) Key() uint64 { - return n.key -} - -// Size returns this 'cache node' size. -func (n *Node) Size() int { - return n.size -} - -// Value returns this 'cache node' value. -func (n *Node) Value() Value { - return n.value -} - -// Ref returns this 'cache node' ref counter. -func (n *Node) Ref() int32 { - return atomic.LoadInt32(&n.ref) -} - -// GetHandle returns an handle for this 'cache node'. -func (n *Node) GetHandle() *Handle { - if atomic.AddInt32(&n.ref, 1) <= 1 { - panic("BUG: Node.GetHandle on zero ref") - } - return &Handle{unsafe.Pointer(n)} -} - -func (n *Node) unref() { - if atomic.AddInt32(&n.ref, -1) == 0 { - n.r.delete(n) - } -} - -func (n *Node) unrefLocked() { - if atomic.AddInt32(&n.ref, -1) == 0 { - n.r.mu.RLock() - if !n.r.closed { - n.r.delete(n) - } - n.r.mu.RUnlock() - } -} - -// Handle is a 'cache handle' of a 'cache node'. -type Handle struct { - n unsafe.Pointer // *Node -} - -// Value returns the value of the 'cache node'. -func (h *Handle) Value() Value { - n := (*Node)(atomic.LoadPointer(&h.n)) - if n != nil { - return n.value - } - return nil -} - -// Release releases this 'cache handle'. -// It is safe to call release multiple times. -func (h *Handle) Release() { - nPtr := atomic.LoadPointer(&h.n) - if nPtr != nil && atomic.CompareAndSwapPointer(&h.n, nPtr, nil) { - n := (*Node)(nPtr) - n.unrefLocked() - } -} - -func murmur32(ns, key uint64, seed uint32) uint32 { - const ( - m = uint32(0x5bd1e995) - r = 24 - ) - - k1 := uint32(ns >> 32) - k2 := uint32(ns) - k3 := uint32(key >> 32) - k4 := uint32(key) - - k1 *= m - k1 ^= k1 >> r - k1 *= m - - k2 *= m - k2 ^= k2 >> r - k2 *= m - - k3 *= m - k3 ^= k3 >> r - k3 *= m - - k4 *= m - k4 ^= k4 >> r - k4 *= m - - h := seed - - h *= m - h ^= k1 - h *= m - h ^= k2 - h *= m - h ^= k3 - h *= m - h ^= k4 - - h ^= h >> 13 - h *= m - h ^= h >> 15 - - return h -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go b/vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go deleted file mode 100644 index d9a84cde..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package cache - -import ( - "sync" - "unsafe" -) - -type lruNode struct { - n *Node - h *Handle - ban bool - - next, prev *lruNode -} - -func (n *lruNode) insert(at *lruNode) { - x := at.next - at.next = n - n.prev = at - n.next = x - x.prev = n -} - -func (n *lruNode) remove() { - if n.prev != nil { - n.prev.next = n.next - n.next.prev = n.prev - n.prev = nil - n.next = nil - } else { - panic("BUG: removing removed node") - } -} - -type lru struct { - mu sync.Mutex - capacity int - used int - recent lruNode -} - -func (r *lru) reset() { - r.recent.next = &r.recent - r.recent.prev = &r.recent - r.used = 0 -} - -func (r *lru) Capacity() int { - r.mu.Lock() - defer r.mu.Unlock() - return r.capacity -} - -func (r *lru) SetCapacity(capacity int) { - var evicted []*lruNode - - r.mu.Lock() - r.capacity = capacity - for r.used > r.capacity { - rn := r.recent.prev - if rn == nil { - panic("BUG: invalid LRU used or capacity counter") - } - rn.remove() - rn.n.CacheData = nil - r.used -= rn.n.Size() - evicted = append(evicted, rn) - } - r.mu.Unlock() - - for _, rn := range evicted { - rn.h.Release() - } -} - -func (r *lru) Promote(n *Node) { - var evicted []*lruNode - - r.mu.Lock() - if n.CacheData == nil { - if n.Size() <= r.capacity { - rn := &lruNode{n: n, h: n.GetHandle()} - rn.insert(&r.recent) - n.CacheData = unsafe.Pointer(rn) - r.used += n.Size() - - for r.used > r.capacity { - rn := r.recent.prev - if rn == nil { - panic("BUG: invalid LRU used or capacity counter") - } - rn.remove() - rn.n.CacheData = nil - r.used -= rn.n.Size() - evicted = append(evicted, rn) - } - } - } else { - rn := (*lruNode)(n.CacheData) - if !rn.ban { - rn.remove() - rn.insert(&r.recent) - } - } - r.mu.Unlock() - - for _, rn := range evicted { - rn.h.Release() - } -} - -func (r *lru) Ban(n *Node) { - r.mu.Lock() - if n.CacheData == nil { - n.CacheData = unsafe.Pointer(&lruNode{n: n, ban: true}) - } else { - rn := (*lruNode)(n.CacheData) - if !rn.ban { - rn.remove() - rn.ban = true - r.used -= rn.n.Size() - r.mu.Unlock() - - rn.h.Release() - rn.h = nil - return - } - } - r.mu.Unlock() -} - -func (r *lru) Evict(n *Node) { - r.mu.Lock() - rn := (*lruNode)(n.CacheData) - if rn == nil || rn.ban { - r.mu.Unlock() - return - } - n.CacheData = nil - r.mu.Unlock() - - rn.h.Release() -} - -func (r *lru) EvictNS(ns uint64) { - var evicted []*lruNode - - r.mu.Lock() - for e := r.recent.prev; e != &r.recent; { - rn := e - e = e.prev - if rn.n.NS() == ns { - rn.remove() - rn.n.CacheData = nil - r.used -= rn.n.Size() - evicted = append(evicted, rn) - } - } - r.mu.Unlock() - - for _, rn := range evicted { - rn.h.Release() - } -} - -func (r *lru) EvictAll() { - r.mu.Lock() - back := r.recent.prev - for rn := back; rn != &r.recent; rn = rn.prev { - rn.n.CacheData = nil - } - r.reset() - r.mu.Unlock() - - for rn := back; rn != &r.recent; rn = rn.prev { - rn.h.Release() - } -} - -func (r *lru) Close() error { - return nil -} - -// NewLRU create a new LRU-cache. -func NewLRU(capacity int) Cacher { - r := &lru{capacity: capacity} - r.reset() - return r -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/comparer.go b/vendor/github.com/syndtr/goleveldb/leveldb/comparer.go deleted file mode 100644 index 448402b8..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/comparer.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "github.com/syndtr/goleveldb/leveldb/comparer" -) - -type iComparer struct { - ucmp comparer.Comparer -} - -func (icmp *iComparer) uName() string { - return icmp.ucmp.Name() -} - -func (icmp *iComparer) uCompare(a, b []byte) int { - return icmp.ucmp.Compare(a, b) -} - -func (icmp *iComparer) uSeparator(dst, a, b []byte) []byte { - return icmp.ucmp.Separator(dst, a, b) -} - -func (icmp *iComparer) uSuccessor(dst, b []byte) []byte { - return icmp.ucmp.Successor(dst, b) -} - -func (icmp *iComparer) Name() string { - return icmp.uName() -} - -func (icmp *iComparer) Compare(a, b []byte) int { - x := icmp.uCompare(internalKey(a).ukey(), internalKey(b).ukey()) - if x == 0 { - if m, n := internalKey(a).num(), internalKey(b).num(); m > n { - return -1 - } else if m < n { - return 1 - } - } - return x -} - -func (icmp *iComparer) Separator(dst, a, b []byte) []byte { - ua, ub := internalKey(a).ukey(), internalKey(b).ukey() - dst = icmp.uSeparator(dst, ua, ub) - if dst != nil && len(dst) < len(ua) && icmp.uCompare(ua, dst) < 0 { - // Append earliest possible number. - return append(dst, keyMaxNumBytes...) - } - return nil -} - -func (icmp *iComparer) Successor(dst, b []byte) []byte { - ub := internalKey(b).ukey() - dst = icmp.uSuccessor(dst, ub) - if dst != nil && len(dst) < len(ub) && icmp.uCompare(ub, dst) < 0 { - // Append earliest possible number. - return append(dst, keyMaxNumBytes...) - } - return nil -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go deleted file mode 100644 index abf9fb65..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package comparer - -import "bytes" - -type bytesComparer struct{} - -func (bytesComparer) Compare(a, b []byte) int { - return bytes.Compare(a, b) -} - -func (bytesComparer) Name() string { - return "leveldb.BytewiseComparator" -} - -func (bytesComparer) Separator(dst, a, b []byte) []byte { - i, n := 0, len(a) - if n > len(b) { - n = len(b) - } - for ; i < n && a[i] == b[i]; i++ { - } - if i >= n { - // Do not shorten if one string is a prefix of the other - } else if c := a[i]; c < 0xff && c+1 < b[i] { - dst = append(dst, a[:i+1]...) - dst[len(dst)-1]++ - return dst - } - return nil -} - -func (bytesComparer) Successor(dst, b []byte) []byte { - for i, c := range b { - if c != 0xff { - dst = append(dst, b[:i+1]...) - dst[len(dst)-1]++ - return dst - } - } - return nil -} - -// DefaultComparer are default implementation of the Comparer interface. -// It uses the natural ordering, consistent with bytes.Compare. -var DefaultComparer = bytesComparer{} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go deleted file mode 100644 index 2c522db2..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package comparer provides interface and implementation for ordering -// sets of data. -package comparer - -// BasicComparer is the interface that wraps the basic Compare method. -type BasicComparer interface { - // Compare returns -1, 0, or +1 depending on whether a is 'less than', - // 'equal to' or 'greater than' b. The two arguments can only be 'equal' - // if their contents are exactly equal. Furthermore, the empty slice - // must be 'less than' any non-empty slice. - Compare(a, b []byte) int -} - -// Comparer defines a total ordering over the space of []byte keys: a 'less -// than' relationship. -type Comparer interface { - BasicComparer - - // Name returns name of the comparer. - // - // The Level-DB on-disk format stores the comparer name, and opening a - // database with a different comparer from the one it was created with - // will result in an error. - // - // An implementation to a new name whenever the comparer implementation - // changes in a way that will cause the relative ordering of any two keys - // to change. - // - // Names starting with "leveldb." are reserved and should not be used - // by any users of this package. - Name() string - - // Bellow are advanced functions used to reduce the space requirements - // for internal data structures such as index blocks. - - // Separator appends a sequence of bytes x to dst such that a <= x && x < b, - // where 'less than' is consistent with Compare. An implementation should - // return nil if x equal to a. - // - // Either contents of a or b should not by any means modified. Doing so - // may cause corruption on the internal state. - Separator(dst, a, b []byte) []byte - - // Successor appends a sequence of bytes x to dst such that x >= b, where - // 'less than' is consistent with Compare. An implementation should return - // nil if x equal to b. - // - // Contents of b should not by any means modified. Doing so may cause - // corruption on the internal state. - Successor(dst, b []byte) []byte -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db.go b/vendor/github.com/syndtr/goleveldb/leveldb/db.go deleted file mode 100644 index 90fedf7b..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/db.go +++ /dev/null @@ -1,1179 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "container/list" - "fmt" - "io" - "os" - "runtime" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/journal" - "github.com/syndtr/goleveldb/leveldb/memdb" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/table" - "github.com/syndtr/goleveldb/leveldb/util" -) - -// DB is a LevelDB database. -type DB struct { - // Need 64-bit alignment. - seq uint64 - - // Stats. Need 64-bit alignment. - cWriteDelay int64 // The cumulative duration of write delays - cWriteDelayN int32 // The cumulative number of write delays - inWritePaused int32 // The indicator whether write operation is paused by compaction - aliveSnaps, aliveIters int32 - - // Session. - s *session - - // MemDB. - memMu sync.RWMutex - memPool chan *memdb.DB - mem, frozenMem *memDB - journal *journal.Writer - journalWriter storage.Writer - journalFd storage.FileDesc - frozenJournalFd storage.FileDesc - frozenSeq uint64 - - // Snapshot. - snapsMu sync.Mutex - snapsList *list.List - - // Write. - batchPool sync.Pool - writeMergeC chan writeMerge - writeMergedC chan bool - writeLockC chan struct{} - writeAckC chan error - writeDelay time.Duration - writeDelayN int - tr *Transaction - - // Compaction. - compCommitLk sync.Mutex - tcompCmdC chan cCmd - tcompPauseC chan chan<- struct{} - mcompCmdC chan cCmd - compErrC chan error - compPerErrC chan error - compErrSetC chan error - compWriteLocking bool - compStats cStats - memdbMaxLevel int // For testing. - - // Close. - closeW sync.WaitGroup - closeC chan struct{} - closed uint32 - closer io.Closer -} - -func openDB(s *session) (*DB, error) { - s.log("db@open opening") - start := time.Now() - db := &DB{ - s: s, - // Initial sequence - seq: s.stSeqNum, - // MemDB - memPool: make(chan *memdb.DB, 1), - // Snapshot - snapsList: list.New(), - // Write - batchPool: sync.Pool{New: newBatch}, - writeMergeC: make(chan writeMerge), - writeMergedC: make(chan bool), - writeLockC: make(chan struct{}, 1), - writeAckC: make(chan error), - // Compaction - tcompCmdC: make(chan cCmd), - tcompPauseC: make(chan chan<- struct{}), - mcompCmdC: make(chan cCmd), - compErrC: make(chan error), - compPerErrC: make(chan error), - compErrSetC: make(chan error), - // Close - closeC: make(chan struct{}), - } - - // Read-only mode. - readOnly := s.o.GetReadOnly() - - if readOnly { - // Recover journals (read-only mode). - if err := db.recoverJournalRO(); err != nil { - return nil, err - } - } else { - // Recover journals. - if err := db.recoverJournal(); err != nil { - return nil, err - } - - // Remove any obsolete files. - if err := db.checkAndCleanFiles(); err != nil { - // Close journal. - if db.journal != nil { - db.journal.Close() - db.journalWriter.Close() - } - return nil, err - } - - } - - // Doesn't need to be included in the wait group. - go db.compactionError() - go db.mpoolDrain() - - if readOnly { - db.SetReadOnly() - } else { - db.closeW.Add(2) - go db.tCompaction() - go db.mCompaction() - // go db.jWriter() - } - - s.logf("db@open done T·%v", time.Since(start)) - - runtime.SetFinalizer(db, (*DB).Close) - return db, nil -} - -// Open opens or creates a DB for the given storage. -// The DB will be created if not exist, unless ErrorIfMissing is true. -// Also, if ErrorIfExist is true and the DB exist Open will returns -// os.ErrExist error. -// -// Open will return an error with type of ErrCorrupted if corruption -// detected in the DB. Use errors.IsCorrupted to test whether an error is -// due to corruption. Corrupted DB can be recovered with Recover function. -// -// The returned DB instance is safe for concurrent use. -// The DB must be closed after use, by calling Close method. -func Open(stor storage.Storage, o *opt.Options) (db *DB, err error) { - s, err := newSession(stor, o) - if err != nil { - return - } - defer func() { - if err != nil { - s.close() - s.release() - } - }() - - err = s.recover() - if err != nil { - if !os.IsNotExist(err) || s.o.GetErrorIfMissing() || s.o.GetReadOnly() { - return - } - err = s.create() - if err != nil { - return - } - } else if s.o.GetErrorIfExist() { - err = os.ErrExist - return - } - - return openDB(s) -} - -// OpenFile opens or creates a DB for the given path. -// The DB will be created if not exist, unless ErrorIfMissing is true. -// Also, if ErrorIfExist is true and the DB exist OpenFile will returns -// os.ErrExist error. -// -// OpenFile uses standard file-system backed storage implementation as -// described in the leveldb/storage package. -// -// OpenFile will return an error with type of ErrCorrupted if corruption -// detected in the DB. Use errors.IsCorrupted to test whether an error is -// due to corruption. Corrupted DB can be recovered with Recover function. -// -// The returned DB instance is safe for concurrent use. -// The DB must be closed after use, by calling Close method. -func OpenFile(path string, o *opt.Options) (db *DB, err error) { - stor, err := storage.OpenFile(path, o.GetReadOnly()) - if err != nil { - return - } - db, err = Open(stor, o) - if err != nil { - stor.Close() - } else { - db.closer = stor - } - return -} - -// Recover recovers and opens a DB with missing or corrupted manifest files -// for the given storage. It will ignore any manifest files, valid or not. -// The DB must already exist or it will returns an error. -// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options. -// -// The returned DB instance is safe for concurrent use. -// The DB must be closed after use, by calling Close method. -func Recover(stor storage.Storage, o *opt.Options) (db *DB, err error) { - s, err := newSession(stor, o) - if err != nil { - return - } - defer func() { - if err != nil { - s.close() - s.release() - } - }() - - err = recoverTable(s, o) - if err != nil { - return - } - return openDB(s) -} - -// RecoverFile recovers and opens a DB with missing or corrupted manifest files -// for the given path. It will ignore any manifest files, valid or not. -// The DB must already exist or it will returns an error. -// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options. -// -// RecoverFile uses standard file-system backed storage implementation as described -// in the leveldb/storage package. -// -// The returned DB instance is safe for concurrent use. -// The DB must be closed after use, by calling Close method. -func RecoverFile(path string, o *opt.Options) (db *DB, err error) { - stor, err := storage.OpenFile(path, false) - if err != nil { - return - } - db, err = Recover(stor, o) - if err != nil { - stor.Close() - } else { - db.closer = stor - } - return -} - -func recoverTable(s *session, o *opt.Options) error { - o = dupOptions(o) - // Mask StrictReader, lets StrictRecovery doing its job. - o.Strict &= ^opt.StrictReader - - // Get all tables and sort it by file number. - fds, err := s.stor.List(storage.TypeTable) - if err != nil { - return err - } - sortFds(fds) - - var ( - maxSeq uint64 - recoveredKey, goodKey, corruptedKey, corruptedBlock, droppedTable int - - // We will drop corrupted table. - strict = o.GetStrict(opt.StrictRecovery) - noSync = o.GetNoSync() - - rec = &sessionRecord{} - bpool = util.NewBufferPool(o.GetBlockSize() + 5) - ) - buildTable := func(iter iterator.Iterator) (tmpFd storage.FileDesc, size int64, err error) { - tmpFd = s.newTemp() - writer, err := s.stor.Create(tmpFd) - if err != nil { - return - } - defer func() { - writer.Close() - if err != nil { - s.stor.Remove(tmpFd) - tmpFd = storage.FileDesc{} - } - }() - - // Copy entries. - tw := table.NewWriter(writer, o) - for iter.Next() { - key := iter.Key() - if validInternalKey(key) { - err = tw.Append(key, iter.Value()) - if err != nil { - return - } - } - } - err = iter.Error() - if err != nil && !errors.IsCorrupted(err) { - return - } - err = tw.Close() - if err != nil { - return - } - if !noSync { - err = writer.Sync() - if err != nil { - return - } - } - size = int64(tw.BytesLen()) - return - } - recoverTable := func(fd storage.FileDesc) error { - s.logf("table@recovery recovering @%d", fd.Num) - reader, err := s.stor.Open(fd) - if err != nil { - return err - } - var closed bool - defer func() { - if !closed { - reader.Close() - } - }() - - // Get file size. - size, err := reader.Seek(0, 2) - if err != nil { - return err - } - - var ( - tSeq uint64 - tgoodKey, tcorruptedKey, tcorruptedBlock int - imin, imax []byte - ) - tr, err := table.NewReader(reader, size, fd, nil, bpool, o) - if err != nil { - return err - } - iter := tr.NewIterator(nil, nil) - if itererr, ok := iter.(iterator.ErrorCallbackSetter); ok { - itererr.SetErrorCallback(func(err error) { - if errors.IsCorrupted(err) { - s.logf("table@recovery block corruption @%d %q", fd.Num, err) - tcorruptedBlock++ - } - }) - } - - // Scan the table. - for iter.Next() { - key := iter.Key() - _, seq, _, kerr := parseInternalKey(key) - if kerr != nil { - tcorruptedKey++ - continue - } - tgoodKey++ - if seq > tSeq { - tSeq = seq - } - if imin == nil { - imin = append([]byte{}, key...) - } - imax = append(imax[:0], key...) - } - if err := iter.Error(); err != nil && !errors.IsCorrupted(err) { - iter.Release() - return err - } - iter.Release() - - goodKey += tgoodKey - corruptedKey += tcorruptedKey - corruptedBlock += tcorruptedBlock - - if strict && (tcorruptedKey > 0 || tcorruptedBlock > 0) { - droppedTable++ - s.logf("table@recovery dropped @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", fd.Num, tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq) - return nil - } - - if tgoodKey > 0 { - if tcorruptedKey > 0 || tcorruptedBlock > 0 { - // Rebuild the table. - s.logf("table@recovery rebuilding @%d", fd.Num) - iter := tr.NewIterator(nil, nil) - tmpFd, newSize, err := buildTable(iter) - iter.Release() - if err != nil { - return err - } - closed = true - reader.Close() - if err := s.stor.Rename(tmpFd, fd); err != nil { - return err - } - size = newSize - } - if tSeq > maxSeq { - maxSeq = tSeq - } - recoveredKey += tgoodKey - // Add table to level 0. - rec.addTable(0, fd.Num, size, imin, imax) - s.logf("table@recovery recovered @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", fd.Num, tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq) - } else { - droppedTable++ - s.logf("table@recovery unrecoverable @%d Ck·%d Cb·%d S·%d", fd.Num, tcorruptedKey, tcorruptedBlock, size) - } - - return nil - } - - // Recover all tables. - if len(fds) > 0 { - s.logf("table@recovery F·%d", len(fds)) - - // Mark file number as used. - s.markFileNum(fds[len(fds)-1].Num) - - for _, fd := range fds { - if err := recoverTable(fd); err != nil { - return err - } - } - - s.logf("table@recovery recovered F·%d N·%d Gk·%d Ck·%d Q·%d", len(fds), recoveredKey, goodKey, corruptedKey, maxSeq) - } - - // Set sequence number. - rec.setSeqNum(maxSeq) - - // Create new manifest. - if err := s.create(); err != nil { - return err - } - - // Commit. - return s.commit(rec) -} - -func (db *DB) recoverJournal() error { - // Get all journals and sort it by file number. - rawFds, err := db.s.stor.List(storage.TypeJournal) - if err != nil { - return err - } - sortFds(rawFds) - - // Journals that will be recovered. - var fds []storage.FileDesc - for _, fd := range rawFds { - if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum { - fds = append(fds, fd) - } - } - - var ( - ofd storage.FileDesc // Obsolete file. - rec = &sessionRecord{} - ) - - // Recover journals. - if len(fds) > 0 { - db.logf("journal@recovery F·%d", len(fds)) - - // Mark file number as used. - db.s.markFileNum(fds[len(fds)-1].Num) - - var ( - // Options. - strict = db.s.o.GetStrict(opt.StrictJournal) - checksum = db.s.o.GetStrict(opt.StrictJournalChecksum) - writeBuffer = db.s.o.GetWriteBuffer() - - jr *journal.Reader - mdb = memdb.New(db.s.icmp, writeBuffer) - buf = &util.Buffer{} - batchSeq uint64 - batchLen int - ) - - for _, fd := range fds { - db.logf("journal@recovery recovering @%d", fd.Num) - - fr, err := db.s.stor.Open(fd) - if err != nil { - return err - } - - // Create or reset journal reader instance. - if jr == nil { - jr = journal.NewReader(fr, dropper{db.s, fd}, strict, checksum) - } else { - jr.Reset(fr, dropper{db.s, fd}, strict, checksum) - } - - // Flush memdb and remove obsolete journal file. - if !ofd.Zero() { - if mdb.Len() > 0 { - if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil { - fr.Close() - return err - } - } - - rec.setJournalNum(fd.Num) - rec.setSeqNum(db.seq) - if err := db.s.commit(rec); err != nil { - fr.Close() - return err - } - rec.resetAddedTables() - - db.s.stor.Remove(ofd) - ofd = storage.FileDesc{} - } - - // Replay journal to memdb. - mdb.Reset() - for { - r, err := jr.Next() - if err != nil { - if err == io.EOF { - break - } - - fr.Close() - return errors.SetFd(err, fd) - } - - buf.Reset() - if _, err := buf.ReadFrom(r); err != nil { - if err == io.ErrUnexpectedEOF { - // This is error returned due to corruption, with strict == false. - continue - } - - fr.Close() - return errors.SetFd(err, fd) - } - batchSeq, batchLen, err = decodeBatchToMem(buf.Bytes(), db.seq, mdb) - if err != nil { - if !strict && errors.IsCorrupted(err) { - db.s.logf("journal error: %v (skipped)", err) - // We won't apply sequence number as it might be corrupted. - continue - } - - fr.Close() - return errors.SetFd(err, fd) - } - - // Save sequence number. - db.seq = batchSeq + uint64(batchLen) - - // Flush it if large enough. - if mdb.Size() >= writeBuffer { - if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil { - fr.Close() - return err - } - - mdb.Reset() - } - } - - fr.Close() - ofd = fd - } - - // Flush the last memdb. - if mdb.Len() > 0 { - if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil { - return err - } - } - } - - // Create a new journal. - if _, err := db.newMem(0); err != nil { - return err - } - - // Commit. - rec.setJournalNum(db.journalFd.Num) - rec.setSeqNum(db.seq) - if err := db.s.commit(rec); err != nil { - // Close journal on error. - if db.journal != nil { - db.journal.Close() - db.journalWriter.Close() - } - return err - } - - // Remove the last obsolete journal file. - if !ofd.Zero() { - db.s.stor.Remove(ofd) - } - - return nil -} - -func (db *DB) recoverJournalRO() error { - // Get all journals and sort it by file number. - rawFds, err := db.s.stor.List(storage.TypeJournal) - if err != nil { - return err - } - sortFds(rawFds) - - // Journals that will be recovered. - var fds []storage.FileDesc - for _, fd := range rawFds { - if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum { - fds = append(fds, fd) - } - } - - var ( - // Options. - strict = db.s.o.GetStrict(opt.StrictJournal) - checksum = db.s.o.GetStrict(opt.StrictJournalChecksum) - writeBuffer = db.s.o.GetWriteBuffer() - - mdb = memdb.New(db.s.icmp, writeBuffer) - ) - - // Recover journals. - if len(fds) > 0 { - db.logf("journal@recovery RO·Mode F·%d", len(fds)) - - var ( - jr *journal.Reader - buf = &util.Buffer{} - batchSeq uint64 - batchLen int - ) - - for _, fd := range fds { - db.logf("journal@recovery recovering @%d", fd.Num) - - fr, err := db.s.stor.Open(fd) - if err != nil { - return err - } - - // Create or reset journal reader instance. - if jr == nil { - jr = journal.NewReader(fr, dropper{db.s, fd}, strict, checksum) - } else { - jr.Reset(fr, dropper{db.s, fd}, strict, checksum) - } - - // Replay journal to memdb. - for { - r, err := jr.Next() - if err != nil { - if err == io.EOF { - break - } - - fr.Close() - return errors.SetFd(err, fd) - } - - buf.Reset() - if _, err := buf.ReadFrom(r); err != nil { - if err == io.ErrUnexpectedEOF { - // This is error returned due to corruption, with strict == false. - continue - } - - fr.Close() - return errors.SetFd(err, fd) - } - batchSeq, batchLen, err = decodeBatchToMem(buf.Bytes(), db.seq, mdb) - if err != nil { - if !strict && errors.IsCorrupted(err) { - db.s.logf("journal error: %v (skipped)", err) - // We won't apply sequence number as it might be corrupted. - continue - } - - fr.Close() - return errors.SetFd(err, fd) - } - - // Save sequence number. - db.seq = batchSeq + uint64(batchLen) - } - - fr.Close() - } - } - - // Set memDB. - db.mem = &memDB{db: db, DB: mdb, ref: 1} - - return nil -} - -func memGet(mdb *memdb.DB, ikey internalKey, icmp *iComparer) (ok bool, mv []byte, err error) { - mk, mv, err := mdb.Find(ikey) - if err == nil { - ukey, _, kt, kerr := parseInternalKey(mk) - if kerr != nil { - // Shouldn't have had happen. - panic(kerr) - } - if icmp.uCompare(ukey, ikey.ukey()) == 0 { - if kt == keyTypeDel { - return true, nil, ErrNotFound - } - return true, mv, nil - - } - } else if err != ErrNotFound { - return true, nil, err - } - return -} - -func (db *DB) get(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) { - ikey := makeInternalKey(nil, key, seq, keyTypeSeek) - - if auxm != nil { - if ok, mv, me := memGet(auxm, ikey, db.s.icmp); ok { - return append([]byte{}, mv...), me - } - } - - em, fm := db.getMems() - for _, m := range [...]*memDB{em, fm} { - if m == nil { - continue - } - defer m.decref() - - if ok, mv, me := memGet(m.DB, ikey, db.s.icmp); ok { - return append([]byte{}, mv...), me - } - } - - v := db.s.version() - value, cSched, err := v.get(auxt, ikey, ro, false) - v.release() - if cSched { - // Trigger table compaction. - db.compTrigger(db.tcompCmdC) - } - return -} - -func nilIfNotFound(err error) error { - if err == ErrNotFound { - return nil - } - return err -} - -func (db *DB) has(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err error) { - ikey := makeInternalKey(nil, key, seq, keyTypeSeek) - - if auxm != nil { - if ok, _, me := memGet(auxm, ikey, db.s.icmp); ok { - return me == nil, nilIfNotFound(me) - } - } - - em, fm := db.getMems() - for _, m := range [...]*memDB{em, fm} { - if m == nil { - continue - } - defer m.decref() - - if ok, _, me := memGet(m.DB, ikey, db.s.icmp); ok { - return me == nil, nilIfNotFound(me) - } - } - - v := db.s.version() - _, cSched, err := v.get(auxt, ikey, ro, true) - v.release() - if cSched { - // Trigger table compaction. - db.compTrigger(db.tcompCmdC) - } - if err == nil { - ret = true - } else if err == ErrNotFound { - err = nil - } - return -} - -// Get gets the value for the given key. It returns ErrNotFound if the -// DB does not contains the key. -// -// The returned slice is its own copy, it is safe to modify the contents -// of the returned slice. -// It is safe to modify the contents of the argument after Get returns. -func (db *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { - err = db.ok() - if err != nil { - return - } - - se := db.acquireSnapshot() - defer db.releaseSnapshot(se) - return db.get(nil, nil, key, se.seq, ro) -} - -// Has returns true if the DB does contains the given key. -// -// It is safe to modify the contents of the argument after Has returns. -func (db *DB) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) { - err = db.ok() - if err != nil { - return - } - - se := db.acquireSnapshot() - defer db.releaseSnapshot(se) - return db.has(nil, nil, key, se.seq, ro) -} - -// NewIterator returns an iterator for the latest snapshot of the -// underlying DB. -// The returned iterator is not safe for concurrent use, but it is safe to use -// multiple iterators concurrently, with each in a dedicated goroutine. -// It is also safe to use an iterator concurrently with modifying its -// underlying DB. The resultant key/value pairs are guaranteed to be -// consistent. -// -// Slice allows slicing the iterator to only contains keys in the given -// range. A nil Range.Start is treated as a key before all keys in the -// DB. And a nil Range.Limit is treated as a key after all keys in -// the DB. -// -// WARNING: Any slice returned by interator (e.g. slice returned by calling -// Iterator.Key() or Iterator.Key() methods), its content should not be modified -// unless noted otherwise. -// -// The iterator must be released after use, by calling Release method. -// -// Also read Iterator documentation of the leveldb/iterator package. -func (db *DB) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - if err := db.ok(); err != nil { - return iterator.NewEmptyIterator(err) - } - - se := db.acquireSnapshot() - defer db.releaseSnapshot(se) - // Iterator holds 'version' lock, 'version' is immutable so snapshot - // can be released after iterator created. - return db.newIterator(nil, nil, se.seq, slice, ro) -} - -// GetSnapshot returns a latest snapshot of the underlying DB. A snapshot -// is a frozen snapshot of a DB state at a particular point in time. The -// content of snapshot are guaranteed to be consistent. -// -// The snapshot must be released after use, by calling Release method. -func (db *DB) GetSnapshot() (*Snapshot, error) { - if err := db.ok(); err != nil { - return nil, err - } - - return db.newSnapshot(), nil -} - -// GetProperty returns value of the given property name. -// -// Property names: -// leveldb.num-files-at-level{n} -// Returns the number of files at level 'n'. -// leveldb.stats -// Returns statistics of the underlying DB. -// leveldb.iostats -// Returns statistics of effective disk read and write. -// leveldb.writedelay -// Returns cumulative write delay caused by compaction. -// leveldb.sstables -// Returns sstables list for each level. -// leveldb.blockpool -// Returns block pool stats. -// leveldb.cachedblock -// Returns size of cached block. -// leveldb.openedtables -// Returns number of opened tables. -// leveldb.alivesnaps -// Returns number of alive snapshots. -// leveldb.aliveiters -// Returns number of alive iterators. -func (db *DB) GetProperty(name string) (value string, err error) { - err = db.ok() - if err != nil { - return - } - - const prefix = "leveldb." - if !strings.HasPrefix(name, prefix) { - return "", ErrNotFound - } - p := name[len(prefix):] - - v := db.s.version() - defer v.release() - - numFilesPrefix := "num-files-at-level" - switch { - case strings.HasPrefix(p, numFilesPrefix): - var level uint - var rest string - n, _ := fmt.Sscanf(p[len(numFilesPrefix):], "%d%s", &level, &rest) - if n != 1 { - err = ErrNotFound - } else { - value = fmt.Sprint(v.tLen(int(level))) - } - case p == "stats": - value = "Compactions\n" + - " Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)\n" + - "-------+------------+---------------+---------------+---------------+---------------\n" - for level, tables := range v.levels { - duration, read, write := db.compStats.getStat(level) - if len(tables) == 0 && duration == 0 { - continue - } - value += fmt.Sprintf(" %3d | %10d | %13.5f | %13.5f | %13.5f | %13.5f\n", - level, len(tables), float64(tables.size())/1048576.0, duration.Seconds(), - float64(read)/1048576.0, float64(write)/1048576.0) - } - case p == "iostats": - value = fmt.Sprintf("Read(MB):%.5f Write(MB):%.5f", - float64(db.s.stor.reads())/1048576.0, - float64(db.s.stor.writes())/1048576.0) - case p == "writedelay": - writeDelayN, writeDelay := atomic.LoadInt32(&db.cWriteDelayN), time.Duration(atomic.LoadInt64(&db.cWriteDelay)) - paused := atomic.LoadInt32(&db.inWritePaused) == 1 - value = fmt.Sprintf("DelayN:%d Delay:%s Paused:%t", writeDelayN, writeDelay, paused) - case p == "sstables": - for level, tables := range v.levels { - value += fmt.Sprintf("--- level %d ---\n", level) - for _, t := range tables { - value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.fd.Num, t.size, t.imin, t.imax) - } - } - case p == "blockpool": - value = fmt.Sprintf("%v", db.s.tops.bpool) - case p == "cachedblock": - if db.s.tops.bcache != nil { - value = fmt.Sprintf("%d", db.s.tops.bcache.Size()) - } else { - value = "" - } - case p == "openedtables": - value = fmt.Sprintf("%d", db.s.tops.cache.Size()) - case p == "alivesnaps": - value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveSnaps)) - case p == "aliveiters": - value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveIters)) - default: - err = ErrNotFound - } - - return -} - -// DBStats is database statistics. -type DBStats struct { - WriteDelayCount int32 - WriteDelayDuration time.Duration - WritePaused bool - - AliveSnapshots int32 - AliveIterators int32 - - IOWrite uint64 - IORead uint64 - - BlockCacheSize int - OpenedTablesCount int - - LevelSizes []int64 - LevelTablesCounts []int - LevelRead []int64 - LevelWrite []int64 - LevelDurations []time.Duration -} - -// Stats populates s with database statistics. -func (db *DB) Stats(s *DBStats) error { - err := db.ok() - if err != nil { - return err - } - - s.IORead = db.s.stor.reads() - s.IOWrite = db.s.stor.writes() - s.WriteDelayCount = atomic.LoadInt32(&db.cWriteDelayN) - s.WriteDelayDuration = time.Duration(atomic.LoadInt64(&db.cWriteDelay)) - s.WritePaused = atomic.LoadInt32(&db.inWritePaused) == 1 - - s.OpenedTablesCount = db.s.tops.cache.Size() - if db.s.tops.bcache != nil { - s.BlockCacheSize = db.s.tops.bcache.Size() - } else { - s.BlockCacheSize = 0 - } - - s.AliveIterators = atomic.LoadInt32(&db.aliveIters) - s.AliveSnapshots = atomic.LoadInt32(&db.aliveSnaps) - - s.LevelDurations = s.LevelDurations[:0] - s.LevelRead = s.LevelRead[:0] - s.LevelWrite = s.LevelWrite[:0] - s.LevelSizes = s.LevelSizes[:0] - s.LevelTablesCounts = s.LevelTablesCounts[:0] - - v := db.s.version() - defer v.release() - - for level, tables := range v.levels { - duration, read, write := db.compStats.getStat(level) - if len(tables) == 0 && duration == 0 { - continue - } - s.LevelDurations = append(s.LevelDurations, duration) - s.LevelRead = append(s.LevelRead, read) - s.LevelWrite = append(s.LevelWrite, write) - s.LevelSizes = append(s.LevelSizes, tables.size()) - s.LevelTablesCounts = append(s.LevelTablesCounts, len(tables)) - } - - return nil -} - -// SizeOf calculates approximate sizes of the given key ranges. -// The length of the returned sizes are equal with the length of the given -// ranges. The returned sizes measure storage space usage, so if the user -// data compresses by a factor of ten, the returned sizes will be one-tenth -// the size of the corresponding user data size. -// The results may not include the sizes of recently written data. -func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) { - if err := db.ok(); err != nil { - return nil, err - } - - v := db.s.version() - defer v.release() - - sizes := make(Sizes, 0, len(ranges)) - for _, r := range ranges { - imin := makeInternalKey(nil, r.Start, keyMaxSeq, keyTypeSeek) - imax := makeInternalKey(nil, r.Limit, keyMaxSeq, keyTypeSeek) - start, err := v.offsetOf(imin) - if err != nil { - return nil, err - } - limit, err := v.offsetOf(imax) - if err != nil { - return nil, err - } - var size int64 - if limit >= start { - size = limit - start - } - sizes = append(sizes, size) - } - - return sizes, nil -} - -// Close closes the DB. This will also releases any outstanding snapshot, -// abort any in-flight compaction and discard open transaction. -// -// It is not safe to close a DB until all outstanding iterators are released. -// It is valid to call Close multiple times. Other methods should not be -// called after the DB has been closed. -func (db *DB) Close() error { - if !db.setClosed() { - return ErrClosed - } - - start := time.Now() - db.log("db@close closing") - - // Clear the finalizer. - runtime.SetFinalizer(db, nil) - - // Get compaction error. - var err error - select { - case err = <-db.compErrC: - if err == ErrReadOnly { - err = nil - } - default: - } - - // Signal all goroutines. - close(db.closeC) - - // Discard open transaction. - if db.tr != nil { - db.tr.Discard() - } - - // Acquire writer lock. - db.writeLockC <- struct{}{} - - // Wait for all gorotines to exit. - db.closeW.Wait() - - // Closes journal. - if db.journal != nil { - db.journal.Close() - db.journalWriter.Close() - db.journal = nil - db.journalWriter = nil - } - - if db.writeDelayN > 0 { - db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay) - } - - // Close session. - db.s.close() - db.logf("db@close done T·%v", time.Since(start)) - db.s.release() - - if db.closer != nil { - if err1 := db.closer.Close(); err == nil { - err = err1 - } - db.closer = nil - } - - // Clear memdbs. - db.clearMems() - - return err -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go deleted file mode 100644 index 0c1b9a53..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go +++ /dev/null @@ -1,854 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "sync" - "time" - - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" -) - -var ( - errCompactionTransactExiting = errors.New("leveldb: compaction transact exiting") -) - -type cStat struct { - duration time.Duration - read int64 - write int64 -} - -func (p *cStat) add(n *cStatStaging) { - p.duration += n.duration - p.read += n.read - p.write += n.write -} - -func (p *cStat) get() (duration time.Duration, read, write int64) { - return p.duration, p.read, p.write -} - -type cStatStaging struct { - start time.Time - duration time.Duration - on bool - read int64 - write int64 -} - -func (p *cStatStaging) startTimer() { - if !p.on { - p.start = time.Now() - p.on = true - } -} - -func (p *cStatStaging) stopTimer() { - if p.on { - p.duration += time.Since(p.start) - p.on = false - } -} - -type cStats struct { - lk sync.Mutex - stats []cStat -} - -func (p *cStats) addStat(level int, n *cStatStaging) { - p.lk.Lock() - if level >= len(p.stats) { - newStats := make([]cStat, level+1) - copy(newStats, p.stats) - p.stats = newStats - } - p.stats[level].add(n) - p.lk.Unlock() -} - -func (p *cStats) getStat(level int) (duration time.Duration, read, write int64) { - p.lk.Lock() - defer p.lk.Unlock() - if level < len(p.stats) { - return p.stats[level].get() - } - return -} - -func (db *DB) compactionError() { - var err error -noerr: - // No error. - for { - select { - case err = <-db.compErrSetC: - switch { - case err == nil: - case err == ErrReadOnly, errors.IsCorrupted(err): - goto hasperr - default: - goto haserr - } - case <-db.closeC: - return - } - } -haserr: - // Transient error. - for { - select { - case db.compErrC <- err: - case err = <-db.compErrSetC: - switch { - case err == nil: - goto noerr - case err == ErrReadOnly, errors.IsCorrupted(err): - goto hasperr - default: - } - case <-db.closeC: - return - } - } -hasperr: - // Persistent error. - for { - select { - case db.compErrC <- err: - case db.compPerErrC <- err: - case db.writeLockC <- struct{}{}: - // Hold write lock, so that write won't pass-through. - db.compWriteLocking = true - case <-db.closeC: - if db.compWriteLocking { - // We should release the lock or Close will hang. - <-db.writeLockC - } - return - } - } -} - -type compactionTransactCounter int - -func (cnt *compactionTransactCounter) incr() { - *cnt++ -} - -type compactionTransactInterface interface { - run(cnt *compactionTransactCounter) error - revert() error -} - -func (db *DB) compactionTransact(name string, t compactionTransactInterface) { - defer func() { - if x := recover(); x != nil { - if x == errCompactionTransactExiting { - if err := t.revert(); err != nil { - db.logf("%s revert error %q", name, err) - } - } - panic(x) - } - }() - - const ( - backoffMin = 1 * time.Second - backoffMax = 8 * time.Second - backoffMul = 2 * time.Second - ) - var ( - backoff = backoffMin - backoffT = time.NewTimer(backoff) - lastCnt = compactionTransactCounter(0) - - disableBackoff = db.s.o.GetDisableCompactionBackoff() - ) - for n := 0; ; n++ { - // Check whether the DB is closed. - if db.isClosed() { - db.logf("%s exiting", name) - db.compactionExitTransact() - } else if n > 0 { - db.logf("%s retrying N·%d", name, n) - } - - // Execute. - cnt := compactionTransactCounter(0) - err := t.run(&cnt) - if err != nil { - db.logf("%s error I·%d %q", name, cnt, err) - } - - // Set compaction error status. - select { - case db.compErrSetC <- err: - case perr := <-db.compPerErrC: - if err != nil { - db.logf("%s exiting (persistent error %q)", name, perr) - db.compactionExitTransact() - } - case <-db.closeC: - db.logf("%s exiting", name) - db.compactionExitTransact() - } - if err == nil { - return - } - if errors.IsCorrupted(err) { - db.logf("%s exiting (corruption detected)", name) - db.compactionExitTransact() - } - - if !disableBackoff { - // Reset backoff duration if counter is advancing. - if cnt > lastCnt { - backoff = backoffMin - lastCnt = cnt - } - - // Backoff. - backoffT.Reset(backoff) - if backoff < backoffMax { - backoff *= backoffMul - if backoff > backoffMax { - backoff = backoffMax - } - } - select { - case <-backoffT.C: - case <-db.closeC: - db.logf("%s exiting", name) - db.compactionExitTransact() - } - } - } -} - -type compactionTransactFunc struct { - runFunc func(cnt *compactionTransactCounter) error - revertFunc func() error -} - -func (t *compactionTransactFunc) run(cnt *compactionTransactCounter) error { - return t.runFunc(cnt) -} - -func (t *compactionTransactFunc) revert() error { - if t.revertFunc != nil { - return t.revertFunc() - } - return nil -} - -func (db *DB) compactionTransactFunc(name string, run func(cnt *compactionTransactCounter) error, revert func() error) { - db.compactionTransact(name, &compactionTransactFunc{run, revert}) -} - -func (db *DB) compactionExitTransact() { - panic(errCompactionTransactExiting) -} - -func (db *DB) compactionCommit(name string, rec *sessionRecord) { - db.compCommitLk.Lock() - defer db.compCommitLk.Unlock() // Defer is necessary. - db.compactionTransactFunc(name+"@commit", func(cnt *compactionTransactCounter) error { - return db.s.commit(rec) - }, nil) -} - -func (db *DB) memCompaction() { - mdb := db.getFrozenMem() - if mdb == nil { - return - } - defer mdb.decref() - - db.logf("memdb@flush N·%d S·%s", mdb.Len(), shortenb(mdb.Size())) - - // Don't compact empty memdb. - if mdb.Len() == 0 { - db.logf("memdb@flush skipping") - // drop frozen memdb - db.dropFrozenMem() - return - } - - // Pause table compaction. - resumeC := make(chan struct{}) - select { - case db.tcompPauseC <- (chan<- struct{})(resumeC): - case <-db.compPerErrC: - close(resumeC) - resumeC = nil - case <-db.closeC: - db.compactionExitTransact() - } - - var ( - rec = &sessionRecord{} - stats = &cStatStaging{} - flushLevel int - ) - - // Generate tables. - db.compactionTransactFunc("memdb@flush", func(cnt *compactionTransactCounter) (err error) { - stats.startTimer() - flushLevel, err = db.s.flushMemdb(rec, mdb.DB, db.memdbMaxLevel) - stats.stopTimer() - return - }, func() error { - for _, r := range rec.addedTables { - db.logf("memdb@flush revert @%d", r.num) - if err := db.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: r.num}); err != nil { - return err - } - } - return nil - }) - - rec.setJournalNum(db.journalFd.Num) - rec.setSeqNum(db.frozenSeq) - - // Commit. - stats.startTimer() - db.compactionCommit("memdb", rec) - stats.stopTimer() - - db.logf("memdb@flush committed F·%d T·%v", len(rec.addedTables), stats.duration) - - for _, r := range rec.addedTables { - stats.write += r.size - } - db.compStats.addStat(flushLevel, stats) - - // Drop frozen memdb. - db.dropFrozenMem() - - // Resume table compaction. - if resumeC != nil { - select { - case <-resumeC: - close(resumeC) - case <-db.closeC: - db.compactionExitTransact() - } - } - - // Trigger table compaction. - db.compTrigger(db.tcompCmdC) -} - -type tableCompactionBuilder struct { - db *DB - s *session - c *compaction - rec *sessionRecord - stat0, stat1 *cStatStaging - - snapHasLastUkey bool - snapLastUkey []byte - snapLastSeq uint64 - snapIter int - snapKerrCnt int - snapDropCnt int - - kerrCnt int - dropCnt int - - minSeq uint64 - strict bool - tableSize int - - tw *tWriter -} - -func (b *tableCompactionBuilder) appendKV(key, value []byte) error { - // Create new table if not already. - if b.tw == nil { - // Check for pause event. - if b.db != nil { - select { - case ch := <-b.db.tcompPauseC: - b.db.pauseCompaction(ch) - case <-b.db.closeC: - b.db.compactionExitTransact() - default: - } - } - - // Create new table. - var err error - b.tw, err = b.s.tops.create() - if err != nil { - return err - } - } - - // Write key/value into table. - return b.tw.append(key, value) -} - -func (b *tableCompactionBuilder) needFlush() bool { - return b.tw.tw.BytesLen() >= b.tableSize -} - -func (b *tableCompactionBuilder) flush() error { - t, err := b.tw.finish() - if err != nil { - return err - } - b.rec.addTableFile(b.c.sourceLevel+1, t) - b.stat1.write += t.size - b.s.logf("table@build created L%d@%d N·%d S·%s %q:%q", b.c.sourceLevel+1, t.fd.Num, b.tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax) - b.tw = nil - return nil -} - -func (b *tableCompactionBuilder) cleanup() { - if b.tw != nil { - b.tw.drop() - b.tw = nil - } -} - -func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error { - snapResumed := b.snapIter > 0 - hasLastUkey := b.snapHasLastUkey // The key might has zero length, so this is necessary. - lastUkey := append([]byte{}, b.snapLastUkey...) - lastSeq := b.snapLastSeq - b.kerrCnt = b.snapKerrCnt - b.dropCnt = b.snapDropCnt - // Restore compaction state. - b.c.restore() - - defer b.cleanup() - - b.stat1.startTimer() - defer b.stat1.stopTimer() - - iter := b.c.newIterator() - defer iter.Release() - for i := 0; iter.Next(); i++ { - // Incr transact counter. - cnt.incr() - - // Skip until last state. - if i < b.snapIter { - continue - } - - resumed := false - if snapResumed { - resumed = true - snapResumed = false - } - - ikey := iter.Key() - ukey, seq, kt, kerr := parseInternalKey(ikey) - - if kerr == nil { - shouldStop := !resumed && b.c.shouldStopBefore(ikey) - - if !hasLastUkey || b.s.icmp.uCompare(lastUkey, ukey) != 0 { - // First occurrence of this user key. - - // Only rotate tables if ukey doesn't hop across. - if b.tw != nil && (shouldStop || b.needFlush()) { - if err := b.flush(); err != nil { - return err - } - - // Creates snapshot of the state. - b.c.save() - b.snapHasLastUkey = hasLastUkey - b.snapLastUkey = append(b.snapLastUkey[:0], lastUkey...) - b.snapLastSeq = lastSeq - b.snapIter = i - b.snapKerrCnt = b.kerrCnt - b.snapDropCnt = b.dropCnt - } - - hasLastUkey = true - lastUkey = append(lastUkey[:0], ukey...) - lastSeq = keyMaxSeq - } - - switch { - case lastSeq <= b.minSeq: - // Dropped because newer entry for same user key exist - fallthrough // (A) - case kt == keyTypeDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey): - // For this user key: - // (1) there is no data in higher levels - // (2) data in lower levels will have larger seq numbers - // (3) data in layers that are being compacted here and have - // smaller seq numbers will be dropped in the next - // few iterations of this loop (by rule (A) above). - // Therefore this deletion marker is obsolete and can be dropped. - lastSeq = seq - b.dropCnt++ - continue - default: - lastSeq = seq - } - } else { - if b.strict { - return kerr - } - - // Don't drop corrupted keys. - hasLastUkey = false - lastUkey = lastUkey[:0] - lastSeq = keyMaxSeq - b.kerrCnt++ - } - - if err := b.appendKV(ikey, iter.Value()); err != nil { - return err - } - } - - if err := iter.Error(); err != nil { - return err - } - - // Finish last table. - if b.tw != nil && !b.tw.empty() { - return b.flush() - } - return nil -} - -func (b *tableCompactionBuilder) revert() error { - for _, at := range b.rec.addedTables { - b.s.logf("table@build revert @%d", at.num) - if err := b.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: at.num}); err != nil { - return err - } - } - return nil -} - -func (db *DB) tableCompaction(c *compaction, noTrivial bool) { - defer c.release() - - rec := &sessionRecord{} - rec.addCompPtr(c.sourceLevel, c.imax) - - if !noTrivial && c.trivial() { - t := c.levels[0][0] - db.logf("table@move L%d@%d -> L%d", c.sourceLevel, t.fd.Num, c.sourceLevel+1) - rec.delTable(c.sourceLevel, t.fd.Num) - rec.addTableFile(c.sourceLevel+1, t) - db.compactionCommit("table-move", rec) - return - } - - var stats [2]cStatStaging - for i, tables := range c.levels { - for _, t := range tables { - stats[i].read += t.size - // Insert deleted tables into record - rec.delTable(c.sourceLevel+i, t.fd.Num) - } - } - sourceSize := int(stats[0].read + stats[1].read) - minSeq := db.minSeq() - db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.sourceLevel, len(c.levels[0]), c.sourceLevel+1, len(c.levels[1]), shortenb(sourceSize), minSeq) - - b := &tableCompactionBuilder{ - db: db, - s: db.s, - c: c, - rec: rec, - stat1: &stats[1], - minSeq: minSeq, - strict: db.s.o.GetStrict(opt.StrictCompaction), - tableSize: db.s.o.GetCompactionTableSize(c.sourceLevel + 1), - } - db.compactionTransact("table@build", b) - - // Commit. - stats[1].startTimer() - db.compactionCommit("table", rec) - stats[1].stopTimer() - - resultSize := int(stats[1].write) - db.logf("table@compaction committed F%s S%s Ke·%d D·%d T·%v", sint(len(rec.addedTables)-len(rec.deletedTables)), sshortenb(resultSize-sourceSize), b.kerrCnt, b.dropCnt, stats[1].duration) - - // Save compaction stats - for i := range stats { - db.compStats.addStat(c.sourceLevel+1, &stats[i]) - } -} - -func (db *DB) tableRangeCompaction(level int, umin, umax []byte) error { - db.logf("table@compaction range L%d %q:%q", level, umin, umax) - if level >= 0 { - if c := db.s.getCompactionRange(level, umin, umax, true); c != nil { - db.tableCompaction(c, true) - } - } else { - // Retry until nothing to compact. - for { - compacted := false - - // Scan for maximum level with overlapped tables. - v := db.s.version() - m := 1 - for i := m; i < len(v.levels); i++ { - tables := v.levels[i] - if tables.overlaps(db.s.icmp, umin, umax, false) { - m = i - } - } - v.release() - - for level := 0; level < m; level++ { - if c := db.s.getCompactionRange(level, umin, umax, false); c != nil { - db.tableCompaction(c, true) - compacted = true - } - } - - if !compacted { - break - } - } - } - - return nil -} - -func (db *DB) tableAutoCompaction() { - if c := db.s.pickCompaction(); c != nil { - db.tableCompaction(c, false) - } -} - -func (db *DB) tableNeedCompaction() bool { - v := db.s.version() - defer v.release() - return v.needCompaction() -} - -// resumeWrite returns an indicator whether we should resume write operation if enough level0 files are compacted. -func (db *DB) resumeWrite() bool { - v := db.s.version() - defer v.release() - if v.tLen(0) < db.s.o.GetWriteL0PauseTrigger() { - return true - } - return false -} - -func (db *DB) pauseCompaction(ch chan<- struct{}) { - select { - case ch <- struct{}{}: - case <-db.closeC: - db.compactionExitTransact() - } -} - -type cCmd interface { - ack(err error) -} - -type cAuto struct { - // Note for table compaction, an non-empty ackC represents it's a compaction waiting command. - ackC chan<- error -} - -func (r cAuto) ack(err error) { - if r.ackC != nil { - defer func() { - recover() - }() - r.ackC <- err - } -} - -type cRange struct { - level int - min, max []byte - ackC chan<- error -} - -func (r cRange) ack(err error) { - if r.ackC != nil { - defer func() { - recover() - }() - r.ackC <- err - } -} - -// This will trigger auto compaction but will not wait for it. -func (db *DB) compTrigger(compC chan<- cCmd) { - select { - case compC <- cAuto{}: - default: - } -} - -// This will trigger auto compaction and/or wait for all compaction to be done. -func (db *DB) compTriggerWait(compC chan<- cCmd) (err error) { - ch := make(chan error) - defer close(ch) - // Send cmd. - select { - case compC <- cAuto{ch}: - case err = <-db.compErrC: - return - case <-db.closeC: - return ErrClosed - } - // Wait cmd. - select { - case err = <-ch: - case err = <-db.compErrC: - case <-db.closeC: - return ErrClosed - } - return err -} - -// Send range compaction request. -func (db *DB) compTriggerRange(compC chan<- cCmd, level int, min, max []byte) (err error) { - ch := make(chan error) - defer close(ch) - // Send cmd. - select { - case compC <- cRange{level, min, max, ch}: - case err := <-db.compErrC: - return err - case <-db.closeC: - return ErrClosed - } - // Wait cmd. - select { - case err = <-ch: - case err = <-db.compErrC: - case <-db.closeC: - return ErrClosed - } - return err -} - -func (db *DB) mCompaction() { - var x cCmd - - defer func() { - if x := recover(); x != nil { - if x != errCompactionTransactExiting { - panic(x) - } - } - if x != nil { - x.ack(ErrClosed) - } - db.closeW.Done() - }() - - for { - select { - case x = <-db.mcompCmdC: - switch x.(type) { - case cAuto: - db.memCompaction() - x.ack(nil) - x = nil - default: - panic("leveldb: unknown command") - } - case <-db.closeC: - return - } - } -} - -func (db *DB) tCompaction() { - var ( - x cCmd - waitQ []cCmd - ) - - defer func() { - if x := recover(); x != nil { - if x != errCompactionTransactExiting { - panic(x) - } - } - for i := range waitQ { - waitQ[i].ack(ErrClosed) - waitQ[i] = nil - } - if x != nil { - x.ack(ErrClosed) - } - db.closeW.Done() - }() - - for { - if db.tableNeedCompaction() { - select { - case x = <-db.tcompCmdC: - case ch := <-db.tcompPauseC: - db.pauseCompaction(ch) - continue - case <-db.closeC: - return - default: - } - // Resume write operation as soon as possible. - if len(waitQ) > 0 && db.resumeWrite() { - for i := range waitQ { - waitQ[i].ack(nil) - waitQ[i] = nil - } - waitQ = waitQ[:0] - } - } else { - for i := range waitQ { - waitQ[i].ack(nil) - waitQ[i] = nil - } - waitQ = waitQ[:0] - select { - case x = <-db.tcompCmdC: - case ch := <-db.tcompPauseC: - db.pauseCompaction(ch) - continue - case <-db.closeC: - return - } - } - if x != nil { - switch cmd := x.(type) { - case cAuto: - if cmd.ackC != nil { - // Check the write pause state before caching it. - if db.resumeWrite() { - x.ack(nil) - } else { - waitQ = append(waitQ, x) - } - } - case cRange: - x.ack(db.tableRangeCompaction(cmd.level, cmd.min, cmd.max)) - default: - panic("leveldb: unknown command") - } - x = nil - } - db.tableAutoCompaction() - } -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go deleted file mode 100644 index 03c24cda..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go +++ /dev/null @@ -1,360 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "errors" - "math/rand" - "runtime" - "sync" - "sync/atomic" - - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/util" -) - -var ( - errInvalidInternalKey = errors.New("leveldb: Iterator: invalid internal key") -) - -type memdbReleaser struct { - once sync.Once - m *memDB -} - -func (mr *memdbReleaser) Release() { - mr.once.Do(func() { - mr.m.decref() - }) -} - -func (db *DB) newRawIterator(auxm *memDB, auxt tFiles, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - strict := opt.GetStrict(db.s.o.Options, ro, opt.StrictReader) - em, fm := db.getMems() - v := db.s.version() - - tableIts := v.getIterators(slice, ro) - n := len(tableIts) + len(auxt) + 3 - its := make([]iterator.Iterator, 0, n) - - if auxm != nil { - ami := auxm.NewIterator(slice) - ami.SetReleaser(&memdbReleaser{m: auxm}) - its = append(its, ami) - } - for _, t := range auxt { - its = append(its, v.s.tops.newIterator(t, slice, ro)) - } - - emi := em.NewIterator(slice) - emi.SetReleaser(&memdbReleaser{m: em}) - its = append(its, emi) - if fm != nil { - fmi := fm.NewIterator(slice) - fmi.SetReleaser(&memdbReleaser{m: fm}) - its = append(its, fmi) - } - its = append(its, tableIts...) - mi := iterator.NewMergedIterator(its, db.s.icmp, strict) - mi.SetReleaser(&versionReleaser{v: v}) - return mi -} - -func (db *DB) newIterator(auxm *memDB, auxt tFiles, seq uint64, slice *util.Range, ro *opt.ReadOptions) *dbIter { - var islice *util.Range - if slice != nil { - islice = &util.Range{} - if slice.Start != nil { - islice.Start = makeInternalKey(nil, slice.Start, keyMaxSeq, keyTypeSeek) - } - if slice.Limit != nil { - islice.Limit = makeInternalKey(nil, slice.Limit, keyMaxSeq, keyTypeSeek) - } - } - rawIter := db.newRawIterator(auxm, auxt, islice, ro) - iter := &dbIter{ - db: db, - icmp: db.s.icmp, - iter: rawIter, - seq: seq, - strict: opt.GetStrict(db.s.o.Options, ro, opt.StrictReader), - key: make([]byte, 0), - value: make([]byte, 0), - } - atomic.AddInt32(&db.aliveIters, 1) - runtime.SetFinalizer(iter, (*dbIter).Release) - return iter -} - -func (db *DB) iterSamplingRate() int { - return rand.Intn(2 * db.s.o.GetIteratorSamplingRate()) -} - -type dir int - -const ( - dirReleased dir = iota - 1 - dirSOI - dirEOI - dirBackward - dirForward -) - -// dbIter represent an interator states over a database session. -type dbIter struct { - db *DB - icmp *iComparer - iter iterator.Iterator - seq uint64 - strict bool - - smaplingGap int - dir dir - key []byte - value []byte - err error - releaser util.Releaser -} - -func (i *dbIter) sampleSeek() { - ikey := i.iter.Key() - i.smaplingGap -= len(ikey) + len(i.iter.Value()) - for i.smaplingGap < 0 { - i.smaplingGap += i.db.iterSamplingRate() - i.db.sampleSeek(ikey) - } -} - -func (i *dbIter) setErr(err error) { - i.err = err - i.key = nil - i.value = nil -} - -func (i *dbIter) iterErr() { - if err := i.iter.Error(); err != nil { - i.setErr(err) - } -} - -func (i *dbIter) Valid() bool { - return i.err == nil && i.dir > dirEOI -} - -func (i *dbIter) First() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if i.iter.First() { - i.dir = dirSOI - return i.next() - } - i.dir = dirEOI - i.iterErr() - return false -} - -func (i *dbIter) Last() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if i.iter.Last() { - return i.prev() - } - i.dir = dirSOI - i.iterErr() - return false -} - -func (i *dbIter) Seek(key []byte) bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - ikey := makeInternalKey(nil, key, i.seq, keyTypeSeek) - if i.iter.Seek(ikey) { - i.dir = dirSOI - return i.next() - } - i.dir = dirEOI - i.iterErr() - return false -} - -func (i *dbIter) next() bool { - for { - if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil { - i.sampleSeek() - if seq <= i.seq { - switch kt { - case keyTypeDel: - // Skip deleted key. - i.key = append(i.key[:0], ukey...) - i.dir = dirForward - case keyTypeVal: - if i.dir == dirSOI || i.icmp.uCompare(ukey, i.key) > 0 { - i.key = append(i.key[:0], ukey...) - i.value = append(i.value[:0], i.iter.Value()...) - i.dir = dirForward - return true - } - } - } - } else if i.strict { - i.setErr(kerr) - break - } - if !i.iter.Next() { - i.dir = dirEOI - i.iterErr() - break - } - } - return false -} - -func (i *dbIter) Next() bool { - if i.dir == dirEOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if !i.iter.Next() || (i.dir == dirBackward && !i.iter.Next()) { - i.dir = dirEOI - i.iterErr() - return false - } - return i.next() -} - -func (i *dbIter) prev() bool { - i.dir = dirBackward - del := true - if i.iter.Valid() { - for { - if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil { - i.sampleSeek() - if seq <= i.seq { - if !del && i.icmp.uCompare(ukey, i.key) < 0 { - return true - } - del = (kt == keyTypeDel) - if !del { - i.key = append(i.key[:0], ukey...) - i.value = append(i.value[:0], i.iter.Value()...) - } - } - } else if i.strict { - i.setErr(kerr) - return false - } - if !i.iter.Prev() { - break - } - } - } - if del { - i.dir = dirSOI - i.iterErr() - return false - } - return true -} - -func (i *dbIter) Prev() bool { - if i.dir == dirSOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - switch i.dir { - case dirEOI: - return i.Last() - case dirForward: - for i.iter.Prev() { - if ukey, _, _, kerr := parseInternalKey(i.iter.Key()); kerr == nil { - i.sampleSeek() - if i.icmp.uCompare(ukey, i.key) < 0 { - goto cont - } - } else if i.strict { - i.setErr(kerr) - return false - } - } - i.dir = dirSOI - i.iterErr() - return false - } - -cont: - return i.prev() -} - -func (i *dbIter) Key() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.key -} - -func (i *dbIter) Value() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.value -} - -func (i *dbIter) Release() { - if i.dir != dirReleased { - // Clear the finalizer. - runtime.SetFinalizer(i, nil) - - if i.releaser != nil { - i.releaser.Release() - i.releaser = nil - } - - i.dir = dirReleased - i.key = nil - i.value = nil - i.iter.Release() - i.iter = nil - atomic.AddInt32(&i.db.aliveIters, -1) - i.db = nil - } -} - -func (i *dbIter) SetReleaser(releaser util.Releaser) { - if i.dir == dirReleased { - panic(util.ErrReleased) - } - if i.releaser != nil && releaser != nil { - panic(util.ErrHasReleaser) - } - i.releaser = releaser -} - -func (i *dbIter) Error() error { - return i.err -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go deleted file mode 100644 index c2ad70c8..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "container/list" - "fmt" - "runtime" - "sync" - "sync/atomic" - - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/util" -) - -type snapshotElement struct { - seq uint64 - ref int - e *list.Element -} - -// Acquires a snapshot, based on latest sequence. -func (db *DB) acquireSnapshot() *snapshotElement { - db.snapsMu.Lock() - defer db.snapsMu.Unlock() - - seq := db.getSeq() - - if e := db.snapsList.Back(); e != nil { - se := e.Value.(*snapshotElement) - if se.seq == seq { - se.ref++ - return se - } else if seq < se.seq { - panic("leveldb: sequence number is not increasing") - } - } - se := &snapshotElement{seq: seq, ref: 1} - se.e = db.snapsList.PushBack(se) - return se -} - -// Releases given snapshot element. -func (db *DB) releaseSnapshot(se *snapshotElement) { - db.snapsMu.Lock() - defer db.snapsMu.Unlock() - - se.ref-- - if se.ref == 0 { - db.snapsList.Remove(se.e) - se.e = nil - } else if se.ref < 0 { - panic("leveldb: Snapshot: negative element reference") - } -} - -// Gets minimum sequence that not being snapshotted. -func (db *DB) minSeq() uint64 { - db.snapsMu.Lock() - defer db.snapsMu.Unlock() - - if e := db.snapsList.Front(); e != nil { - return e.Value.(*snapshotElement).seq - } - - return db.getSeq() -} - -// Snapshot is a DB snapshot. -type Snapshot struct { - db *DB - elem *snapshotElement - mu sync.RWMutex - released bool -} - -// Creates new snapshot object. -func (db *DB) newSnapshot() *Snapshot { - snap := &Snapshot{ - db: db, - elem: db.acquireSnapshot(), - } - atomic.AddInt32(&db.aliveSnaps, 1) - runtime.SetFinalizer(snap, (*Snapshot).Release) - return snap -} - -func (snap *Snapshot) String() string { - return fmt.Sprintf("leveldb.Snapshot{%d}", snap.elem.seq) -} - -// Get gets the value for the given key. It returns ErrNotFound if -// the DB does not contains the key. -// -// The caller should not modify the contents of the returned slice, but -// it is safe to modify the contents of the argument after Get returns. -func (snap *Snapshot) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { - err = snap.db.ok() - if err != nil { - return - } - snap.mu.RLock() - defer snap.mu.RUnlock() - if snap.released { - err = ErrSnapshotReleased - return - } - return snap.db.get(nil, nil, key, snap.elem.seq, ro) -} - -// Has returns true if the DB does contains the given key. -// -// It is safe to modify the contents of the argument after Get returns. -func (snap *Snapshot) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) { - err = snap.db.ok() - if err != nil { - return - } - snap.mu.RLock() - defer snap.mu.RUnlock() - if snap.released { - err = ErrSnapshotReleased - return - } - return snap.db.has(nil, nil, key, snap.elem.seq, ro) -} - -// NewIterator returns an iterator for the snapshot of the underlying DB. -// The returned iterator is not safe for concurrent use, but it is safe to use -// multiple iterators concurrently, with each in a dedicated goroutine. -// It is also safe to use an iterator concurrently with modifying its -// underlying DB. The resultant key/value pairs are guaranteed to be -// consistent. -// -// Slice allows slicing the iterator to only contains keys in the given -// range. A nil Range.Start is treated as a key before all keys in the -// DB. And a nil Range.Limit is treated as a key after all keys in -// the DB. -// -// WARNING: Any slice returned by interator (e.g. slice returned by calling -// Iterator.Key() or Iterator.Value() methods), its content should not be -// modified unless noted otherwise. -// -// The iterator must be released after use, by calling Release method. -// Releasing the snapshot doesn't mean releasing the iterator too, the -// iterator would be still valid until released. -// -// Also read Iterator documentation of the leveldb/iterator package. -func (snap *Snapshot) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - if err := snap.db.ok(); err != nil { - return iterator.NewEmptyIterator(err) - } - snap.mu.Lock() - defer snap.mu.Unlock() - if snap.released { - return iterator.NewEmptyIterator(ErrSnapshotReleased) - } - // Since iterator already hold version ref, it doesn't need to - // hold snapshot ref. - return snap.db.newIterator(nil, nil, snap.elem.seq, slice, ro) -} - -// Release releases the snapshot. This will not release any returned -// iterators, the iterators would still be valid until released or the -// underlying DB is closed. -// -// Other methods should not be called after the snapshot has been released. -func (snap *Snapshot) Release() { - snap.mu.Lock() - defer snap.mu.Unlock() - - if !snap.released { - // Clear the finalizer. - runtime.SetFinalizer(snap, nil) - - snap.released = true - snap.db.releaseSnapshot(snap.elem) - atomic.AddInt32(&snap.db.aliveSnaps, -1) - snap.db = nil - snap.elem = nil - } -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_state.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_state.go deleted file mode 100644 index 65e1c54b..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/db_state.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "errors" - "sync/atomic" - "time" - - "github.com/syndtr/goleveldb/leveldb/journal" - "github.com/syndtr/goleveldb/leveldb/memdb" - "github.com/syndtr/goleveldb/leveldb/storage" -) - -var ( - errHasFrozenMem = errors.New("has frozen mem") -) - -type memDB struct { - db *DB - *memdb.DB - ref int32 -} - -func (m *memDB) getref() int32 { - return atomic.LoadInt32(&m.ref) -} - -func (m *memDB) incref() { - atomic.AddInt32(&m.ref, 1) -} - -func (m *memDB) decref() { - if ref := atomic.AddInt32(&m.ref, -1); ref == 0 { - // Only put back memdb with std capacity. - if m.Capacity() == m.db.s.o.GetWriteBuffer() { - m.Reset() - m.db.mpoolPut(m.DB) - } - m.db = nil - m.DB = nil - } else if ref < 0 { - panic("negative memdb ref") - } -} - -// Get latest sequence number. -func (db *DB) getSeq() uint64 { - return atomic.LoadUint64(&db.seq) -} - -// Atomically adds delta to seq. -func (db *DB) addSeq(delta uint64) { - atomic.AddUint64(&db.seq, delta) -} - -func (db *DB) setSeq(seq uint64) { - atomic.StoreUint64(&db.seq, seq) -} - -func (db *DB) sampleSeek(ikey internalKey) { - v := db.s.version() - if v.sampleSeek(ikey) { - // Trigger table compaction. - db.compTrigger(db.tcompCmdC) - } - v.release() -} - -func (db *DB) mpoolPut(mem *memdb.DB) { - if !db.isClosed() { - select { - case db.memPool <- mem: - default: - } - } -} - -func (db *DB) mpoolGet(n int) *memDB { - var mdb *memdb.DB - select { - case mdb = <-db.memPool: - default: - } - if mdb == nil || mdb.Capacity() < n { - mdb = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n)) - } - return &memDB{ - db: db, - DB: mdb, - } -} - -func (db *DB) mpoolDrain() { - ticker := time.NewTicker(30 * time.Second) - for { - select { - case <-ticker.C: - select { - case <-db.memPool: - default: - } - case <-db.closeC: - ticker.Stop() - // Make sure the pool is drained. - select { - case <-db.memPool: - case <-time.After(time.Second): - } - close(db.memPool) - return - } - } -} - -// Create new memdb and froze the old one; need external synchronization. -// newMem only called synchronously by the writer. -func (db *DB) newMem(n int) (mem *memDB, err error) { - fd := storage.FileDesc{Type: storage.TypeJournal, Num: db.s.allocFileNum()} - w, err := db.s.stor.Create(fd) - if err != nil { - db.s.reuseFileNum(fd.Num) - return - } - - db.memMu.Lock() - defer db.memMu.Unlock() - - if db.frozenMem != nil { - return nil, errHasFrozenMem - } - - if db.journal == nil { - db.journal = journal.NewWriter(w) - } else { - db.journal.Reset(w) - db.journalWriter.Close() - db.frozenJournalFd = db.journalFd - } - db.journalWriter = w - db.journalFd = fd - db.frozenMem = db.mem - mem = db.mpoolGet(n) - mem.incref() // for self - mem.incref() // for caller - db.mem = mem - // The seq only incremented by the writer. And whoever called newMem - // should hold write lock, so no need additional synchronization here. - db.frozenSeq = db.seq - return -} - -// Get all memdbs. -func (db *DB) getMems() (e, f *memDB) { - db.memMu.RLock() - defer db.memMu.RUnlock() - if db.mem != nil { - db.mem.incref() - } else if !db.isClosed() { - panic("nil effective mem") - } - if db.frozenMem != nil { - db.frozenMem.incref() - } - return db.mem, db.frozenMem -} - -// Get effective memdb. -func (db *DB) getEffectiveMem() *memDB { - db.memMu.RLock() - defer db.memMu.RUnlock() - if db.mem != nil { - db.mem.incref() - } else if !db.isClosed() { - panic("nil effective mem") - } - return db.mem -} - -// Check whether we has frozen memdb. -func (db *DB) hasFrozenMem() bool { - db.memMu.RLock() - defer db.memMu.RUnlock() - return db.frozenMem != nil -} - -// Get frozen memdb. -func (db *DB) getFrozenMem() *memDB { - db.memMu.RLock() - defer db.memMu.RUnlock() - if db.frozenMem != nil { - db.frozenMem.incref() - } - return db.frozenMem -} - -// Drop frozen memdb; assume that frozen memdb isn't nil. -func (db *DB) dropFrozenMem() { - db.memMu.Lock() - if err := db.s.stor.Remove(db.frozenJournalFd); err != nil { - db.logf("journal@remove removing @%d %q", db.frozenJournalFd.Num, err) - } else { - db.logf("journal@remove removed @%d", db.frozenJournalFd.Num) - } - db.frozenJournalFd = storage.FileDesc{} - db.frozenMem.decref() - db.frozenMem = nil - db.memMu.Unlock() -} - -// Clear mems ptr; used by DB.Close(). -func (db *DB) clearMems() { - db.memMu.Lock() - db.mem = nil - db.frozenMem = nil - db.memMu.Unlock() -} - -// Set closed flag; return true if not already closed. -func (db *DB) setClosed() bool { - return atomic.CompareAndSwapUint32(&db.closed, 0, 1) -} - -// Check whether DB was closed. -func (db *DB) isClosed() bool { - return atomic.LoadUint32(&db.closed) != 0 -} - -// Check read ok status. -func (db *DB) ok() error { - if db.isClosed() { - return ErrClosed - } - return nil -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go deleted file mode 100644 index 1a000018..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go +++ /dev/null @@ -1,329 +0,0 @@ -// Copyright (c) 2016, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "errors" - "sync" - "time" - - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/util" -) - -var errTransactionDone = errors.New("leveldb: transaction already closed") - -// Transaction is the transaction handle. -type Transaction struct { - db *DB - lk sync.RWMutex - seq uint64 - mem *memDB - tables tFiles - ikScratch []byte - rec sessionRecord - stats cStatStaging - closed bool -} - -// Get gets the value for the given key. It returns ErrNotFound if the -// DB does not contains the key. -// -// The returned slice is its own copy, it is safe to modify the contents -// of the returned slice. -// It is safe to modify the contents of the argument after Get returns. -func (tr *Transaction) Get(key []byte, ro *opt.ReadOptions) ([]byte, error) { - tr.lk.RLock() - defer tr.lk.RUnlock() - if tr.closed { - return nil, errTransactionDone - } - return tr.db.get(tr.mem.DB, tr.tables, key, tr.seq, ro) -} - -// Has returns true if the DB does contains the given key. -// -// It is safe to modify the contents of the argument after Has returns. -func (tr *Transaction) Has(key []byte, ro *opt.ReadOptions) (bool, error) { - tr.lk.RLock() - defer tr.lk.RUnlock() - if tr.closed { - return false, errTransactionDone - } - return tr.db.has(tr.mem.DB, tr.tables, key, tr.seq, ro) -} - -// NewIterator returns an iterator for the latest snapshot of the transaction. -// The returned iterator is not safe for concurrent use, but it is safe to use -// multiple iterators concurrently, with each in a dedicated goroutine. -// It is also safe to use an iterator concurrently while writes to the -// transaction. The resultant key/value pairs are guaranteed to be consistent. -// -// Slice allows slicing the iterator to only contains keys in the given -// range. A nil Range.Start is treated as a key before all keys in the -// DB. And a nil Range.Limit is treated as a key after all keys in -// the DB. -// -// WARNING: Any slice returned by interator (e.g. slice returned by calling -// Iterator.Key() or Iterator.Key() methods), its content should not be modified -// unless noted otherwise. -// -// The iterator must be released after use, by calling Release method. -// -// Also read Iterator documentation of the leveldb/iterator package. -func (tr *Transaction) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - tr.lk.RLock() - defer tr.lk.RUnlock() - if tr.closed { - return iterator.NewEmptyIterator(errTransactionDone) - } - tr.mem.incref() - return tr.db.newIterator(tr.mem, tr.tables, tr.seq, slice, ro) -} - -func (tr *Transaction) flush() error { - // Flush memdb. - if tr.mem.Len() != 0 { - tr.stats.startTimer() - iter := tr.mem.NewIterator(nil) - t, n, err := tr.db.s.tops.createFrom(iter) - iter.Release() - tr.stats.stopTimer() - if err != nil { - return err - } - if tr.mem.getref() == 1 { - tr.mem.Reset() - } else { - tr.mem.decref() - tr.mem = tr.db.mpoolGet(0) - tr.mem.incref() - } - tr.tables = append(tr.tables, t) - tr.rec.addTableFile(0, t) - tr.stats.write += t.size - tr.db.logf("transaction@flush created L0@%d N·%d S·%s %q:%q", t.fd.Num, n, shortenb(int(t.size)), t.imin, t.imax) - } - return nil -} - -func (tr *Transaction) put(kt keyType, key, value []byte) error { - tr.ikScratch = makeInternalKey(tr.ikScratch, key, tr.seq+1, kt) - if tr.mem.Free() < len(tr.ikScratch)+len(value) { - if err := tr.flush(); err != nil { - return err - } - } - if err := tr.mem.Put(tr.ikScratch, value); err != nil { - return err - } - tr.seq++ - return nil -} - -// Put sets the value for the given key. It overwrites any previous value -// for that key; a DB is not a multi-map. -// Please note that the transaction is not compacted until committed, so if you -// writes 10 same keys, then those 10 same keys are in the transaction. -// -// It is safe to modify the contents of the arguments after Put returns. -func (tr *Transaction) Put(key, value []byte, wo *opt.WriteOptions) error { - tr.lk.Lock() - defer tr.lk.Unlock() - if tr.closed { - return errTransactionDone - } - return tr.put(keyTypeVal, key, value) -} - -// Delete deletes the value for the given key. -// Please note that the transaction is not compacted until committed, so if you -// writes 10 same keys, then those 10 same keys are in the transaction. -// -// It is safe to modify the contents of the arguments after Delete returns. -func (tr *Transaction) Delete(key []byte, wo *opt.WriteOptions) error { - tr.lk.Lock() - defer tr.lk.Unlock() - if tr.closed { - return errTransactionDone - } - return tr.put(keyTypeDel, key, nil) -} - -// Write apply the given batch to the transaction. The batch will be applied -// sequentially. -// Please note that the transaction is not compacted until committed, so if you -// writes 10 same keys, then those 10 same keys are in the transaction. -// -// It is safe to modify the contents of the arguments after Write returns. -func (tr *Transaction) Write(b *Batch, wo *opt.WriteOptions) error { - if b == nil || b.Len() == 0 { - return nil - } - - tr.lk.Lock() - defer tr.lk.Unlock() - if tr.closed { - return errTransactionDone - } - return b.replayInternal(func(i int, kt keyType, k, v []byte) error { - return tr.put(kt, k, v) - }) -} - -func (tr *Transaction) setDone() { - tr.closed = true - tr.db.tr = nil - tr.mem.decref() - <-tr.db.writeLockC -} - -// Commit commits the transaction. If error is not nil, then the transaction is -// not committed, it can then either be retried or discarded. -// -// Other methods should not be called after transaction has been committed. -func (tr *Transaction) Commit() error { - if err := tr.db.ok(); err != nil { - return err - } - - tr.lk.Lock() - defer tr.lk.Unlock() - if tr.closed { - return errTransactionDone - } - if err := tr.flush(); err != nil { - // Return error, lets user decide either to retry or discard - // transaction. - return err - } - if len(tr.tables) != 0 { - // Committing transaction. - tr.rec.setSeqNum(tr.seq) - tr.db.compCommitLk.Lock() - tr.stats.startTimer() - var cerr error - for retry := 0; retry < 3; retry++ { - cerr = tr.db.s.commit(&tr.rec) - if cerr != nil { - tr.db.logf("transaction@commit error R·%d %q", retry, cerr) - select { - case <-time.After(time.Second): - case <-tr.db.closeC: - tr.db.logf("transaction@commit exiting") - tr.db.compCommitLk.Unlock() - return cerr - } - } else { - // Success. Set db.seq. - tr.db.setSeq(tr.seq) - break - } - } - tr.stats.stopTimer() - if cerr != nil { - // Return error, lets user decide either to retry or discard - // transaction. - return cerr - } - - // Update compaction stats. This is safe as long as we hold compCommitLk. - tr.db.compStats.addStat(0, &tr.stats) - - // Trigger table auto-compaction. - tr.db.compTrigger(tr.db.tcompCmdC) - tr.db.compCommitLk.Unlock() - - // Additionally, wait compaction when certain threshold reached. - // Ignore error, returns error only if transaction can't be committed. - tr.db.waitCompaction() - } - // Only mark as done if transaction committed successfully. - tr.setDone() - return nil -} - -func (tr *Transaction) discard() { - // Discard transaction. - for _, t := range tr.tables { - tr.db.logf("transaction@discard @%d", t.fd.Num) - if err1 := tr.db.s.stor.Remove(t.fd); err1 == nil { - tr.db.s.reuseFileNum(t.fd.Num) - } - } -} - -// Discard discards the transaction. -// -// Other methods should not be called after transaction has been discarded. -func (tr *Transaction) Discard() { - tr.lk.Lock() - if !tr.closed { - tr.discard() - tr.setDone() - } - tr.lk.Unlock() -} - -func (db *DB) waitCompaction() error { - if db.s.tLen(0) >= db.s.o.GetWriteL0PauseTrigger() { - return db.compTriggerWait(db.tcompCmdC) - } - return nil -} - -// OpenTransaction opens an atomic DB transaction. Only one transaction can be -// opened at a time. Subsequent call to Write and OpenTransaction will be blocked -// until in-flight transaction is committed or discarded. -// The returned transaction handle is safe for concurrent use. -// -// Transaction is expensive and can overwhelm compaction, especially if -// transaction size is small. Use with caution. -// -// The transaction must be closed once done, either by committing or discarding -// the transaction. -// Closing the DB will discard open transaction. -func (db *DB) OpenTransaction() (*Transaction, error) { - if err := db.ok(); err != nil { - return nil, err - } - - // The write happen synchronously. - select { - case db.writeLockC <- struct{}{}: - case err := <-db.compPerErrC: - return nil, err - case <-db.closeC: - return nil, ErrClosed - } - - if db.tr != nil { - panic("leveldb: has open transaction") - } - - // Flush current memdb. - if db.mem != nil && db.mem.Len() != 0 { - if _, err := db.rotateMem(0, true); err != nil { - return nil, err - } - } - - // Wait compaction when certain threshold reached. - if err := db.waitCompaction(); err != nil { - return nil, err - } - - tr := &Transaction{ - db: db, - seq: db.seq, - mem: db.mpoolGet(0), - } - tr.mem.incref() - db.tr = tr - return tr, nil -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go deleted file mode 100644 index 3f065489..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/util" -) - -// Reader is the interface that wraps basic Get and NewIterator methods. -// This interface implemented by both DB and Snapshot. -type Reader interface { - Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) - NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator -} - -// Sizes is list of size. -type Sizes []int64 - -// Sum returns sum of the sizes. -func (sizes Sizes) Sum() int64 { - var sum int64 - for _, size := range sizes { - sum += size - } - return sum -} - -// Logging. -func (db *DB) log(v ...interface{}) { db.s.log(v...) } -func (db *DB) logf(format string, v ...interface{}) { db.s.logf(format, v...) } - -// Check and clean files. -func (db *DB) checkAndCleanFiles() error { - v := db.s.version() - defer v.release() - - tmap := make(map[int64]bool) - for _, tables := range v.levels { - for _, t := range tables { - tmap[t.fd.Num] = false - } - } - - fds, err := db.s.stor.List(storage.TypeAll) - if err != nil { - return err - } - - var nt int - var rem []storage.FileDesc - for _, fd := range fds { - keep := true - switch fd.Type { - case storage.TypeManifest: - keep = fd.Num >= db.s.manifestFd.Num - case storage.TypeJournal: - if !db.frozenJournalFd.Zero() { - keep = fd.Num >= db.frozenJournalFd.Num - } else { - keep = fd.Num >= db.journalFd.Num - } - case storage.TypeTable: - _, keep = tmap[fd.Num] - if keep { - tmap[fd.Num] = true - nt++ - } - } - - if !keep { - rem = append(rem, fd) - } - } - - if nt != len(tmap) { - var mfds []storage.FileDesc - for num, present := range tmap { - if !present { - mfds = append(mfds, storage.FileDesc{Type: storage.TypeTable, Num: num}) - db.logf("db@janitor table missing @%d", num) - } - } - return errors.NewErrCorrupted(storage.FileDesc{}, &errors.ErrMissingFiles{Fds: mfds}) - } - - db.logf("db@janitor F·%d G·%d", len(fds), len(rem)) - for _, fd := range rem { - db.logf("db@janitor removing %s-%d", fd.Type, fd.Num) - if err := db.s.stor.Remove(fd); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go deleted file mode 100644 index db0c1bec..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go +++ /dev/null @@ -1,464 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "sync/atomic" - "time" - - "github.com/syndtr/goleveldb/leveldb/memdb" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/util" -) - -func (db *DB) writeJournal(batches []*Batch, seq uint64, sync bool) error { - wr, err := db.journal.Next() - if err != nil { - return err - } - if err := writeBatchesWithHeader(wr, batches, seq); err != nil { - return err - } - if err := db.journal.Flush(); err != nil { - return err - } - if sync { - return db.journalWriter.Sync() - } - return nil -} - -func (db *DB) rotateMem(n int, wait bool) (mem *memDB, err error) { - retryLimit := 3 -retry: - // Wait for pending memdb compaction. - err = db.compTriggerWait(db.mcompCmdC) - if err != nil { - return - } - retryLimit-- - - // Create new memdb and journal. - mem, err = db.newMem(n) - if err != nil { - if err == errHasFrozenMem { - if retryLimit <= 0 { - panic("BUG: still has frozen memdb") - } - goto retry - } - return - } - - // Schedule memdb compaction. - if wait { - err = db.compTriggerWait(db.mcompCmdC) - } else { - db.compTrigger(db.mcompCmdC) - } - return -} - -func (db *DB) flush(n int) (mdb *memDB, mdbFree int, err error) { - delayed := false - slowdownTrigger := db.s.o.GetWriteL0SlowdownTrigger() - pauseTrigger := db.s.o.GetWriteL0PauseTrigger() - flush := func() (retry bool) { - mdb = db.getEffectiveMem() - if mdb == nil { - err = ErrClosed - return false - } - defer func() { - if retry { - mdb.decref() - mdb = nil - } - }() - tLen := db.s.tLen(0) - mdbFree = mdb.Free() - switch { - case tLen >= slowdownTrigger && !delayed: - delayed = true - time.Sleep(time.Millisecond) - case mdbFree >= n: - return false - case tLen >= pauseTrigger: - delayed = true - // Set the write paused flag explicitly. - atomic.StoreInt32(&db.inWritePaused, 1) - err = db.compTriggerWait(db.tcompCmdC) - // Unset the write paused flag. - atomic.StoreInt32(&db.inWritePaused, 0) - if err != nil { - return false - } - default: - // Allow memdb to grow if it has no entry. - if mdb.Len() == 0 { - mdbFree = n - } else { - mdb.decref() - mdb, err = db.rotateMem(n, false) - if err == nil { - mdbFree = mdb.Free() - } else { - mdbFree = 0 - } - } - return false - } - return true - } - start := time.Now() - for flush() { - } - if delayed { - db.writeDelay += time.Since(start) - db.writeDelayN++ - } else if db.writeDelayN > 0 { - db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay) - atomic.AddInt32(&db.cWriteDelayN, int32(db.writeDelayN)) - atomic.AddInt64(&db.cWriteDelay, int64(db.writeDelay)) - db.writeDelay = 0 - db.writeDelayN = 0 - } - return -} - -type writeMerge struct { - sync bool - batch *Batch - keyType keyType - key, value []byte -} - -func (db *DB) unlockWrite(overflow bool, merged int, err error) { - for i := 0; i < merged; i++ { - db.writeAckC <- err - } - if overflow { - // Pass lock to the next write (that failed to merge). - db.writeMergedC <- false - } else { - // Release lock. - <-db.writeLockC - } -} - -// ourBatch is batch that we can modify. -func (db *DB) writeLocked(batch, ourBatch *Batch, merge, sync bool) error { - // Try to flush memdb. This method would also trying to throttle writes - // if it is too fast and compaction cannot catch-up. - mdb, mdbFree, err := db.flush(batch.internalLen) - if err != nil { - db.unlockWrite(false, 0, err) - return err - } - defer mdb.decref() - - var ( - overflow bool - merged int - batches = []*Batch{batch} - ) - - if merge { - // Merge limit. - var mergeLimit int - if batch.internalLen > 128<<10 { - mergeLimit = (1 << 20) - batch.internalLen - } else { - mergeLimit = 128 << 10 - } - mergeCap := mdbFree - batch.internalLen - if mergeLimit > mergeCap { - mergeLimit = mergeCap - } - - merge: - for mergeLimit > 0 { - select { - case incoming := <-db.writeMergeC: - if incoming.batch != nil { - // Merge batch. - if incoming.batch.internalLen > mergeLimit { - overflow = true - break merge - } - batches = append(batches, incoming.batch) - mergeLimit -= incoming.batch.internalLen - } else { - // Merge put. - internalLen := len(incoming.key) + len(incoming.value) + 8 - if internalLen > mergeLimit { - overflow = true - break merge - } - if ourBatch == nil { - ourBatch = db.batchPool.Get().(*Batch) - ourBatch.Reset() - batches = append(batches, ourBatch) - } - // We can use same batch since concurrent write doesn't - // guarantee write order. - ourBatch.appendRec(incoming.keyType, incoming.key, incoming.value) - mergeLimit -= internalLen - } - sync = sync || incoming.sync - merged++ - db.writeMergedC <- true - - default: - break merge - } - } - } - - // Release ourBatch if any. - if ourBatch != nil { - defer db.batchPool.Put(ourBatch) - } - - // Seq number. - seq := db.seq + 1 - - // Write journal. - if err := db.writeJournal(batches, seq, sync); err != nil { - db.unlockWrite(overflow, merged, err) - return err - } - - // Put batches. - for _, batch := range batches { - if err := batch.putMem(seq, mdb.DB); err != nil { - panic(err) - } - seq += uint64(batch.Len()) - } - - // Incr seq number. - db.addSeq(uint64(batchesLen(batches))) - - // Rotate memdb if it's reach the threshold. - if batch.internalLen >= mdbFree { - db.rotateMem(0, false) - } - - db.unlockWrite(overflow, merged, nil) - return nil -} - -// Write apply the given batch to the DB. The batch records will be applied -// sequentially. Write might be used concurrently, when used concurrently and -// batch is small enough, write will try to merge the batches. Set NoWriteMerge -// option to true to disable write merge. -// -// It is safe to modify the contents of the arguments after Write returns but -// not before. Write will not modify content of the batch. -func (db *DB) Write(batch *Batch, wo *opt.WriteOptions) error { - if err := db.ok(); err != nil || batch == nil || batch.Len() == 0 { - return err - } - - // If the batch size is larger than write buffer, it may justified to write - // using transaction instead. Using transaction the batch will be written - // into tables directly, skipping the journaling. - if batch.internalLen > db.s.o.GetWriteBuffer() && !db.s.o.GetDisableLargeBatchTransaction() { - tr, err := db.OpenTransaction() - if err != nil { - return err - } - if err := tr.Write(batch, wo); err != nil { - tr.Discard() - return err - } - return tr.Commit() - } - - merge := !wo.GetNoWriteMerge() && !db.s.o.GetNoWriteMerge() - sync := wo.GetSync() && !db.s.o.GetNoSync() - - // Acquire write lock. - if merge { - select { - case db.writeMergeC <- writeMerge{sync: sync, batch: batch}: - if <-db.writeMergedC { - // Write is merged. - return <-db.writeAckC - } - // Write is not merged, the write lock is handed to us. Continue. - case db.writeLockC <- struct{}{}: - // Write lock acquired. - case err := <-db.compPerErrC: - // Compaction error. - return err - case <-db.closeC: - // Closed - return ErrClosed - } - } else { - select { - case db.writeLockC <- struct{}{}: - // Write lock acquired. - case err := <-db.compPerErrC: - // Compaction error. - return err - case <-db.closeC: - // Closed - return ErrClosed - } - } - - return db.writeLocked(batch, nil, merge, sync) -} - -func (db *DB) putRec(kt keyType, key, value []byte, wo *opt.WriteOptions) error { - if err := db.ok(); err != nil { - return err - } - - merge := !wo.GetNoWriteMerge() && !db.s.o.GetNoWriteMerge() - sync := wo.GetSync() && !db.s.o.GetNoSync() - - // Acquire write lock. - if merge { - select { - case db.writeMergeC <- writeMerge{sync: sync, keyType: kt, key: key, value: value}: - if <-db.writeMergedC { - // Write is merged. - return <-db.writeAckC - } - // Write is not merged, the write lock is handed to us. Continue. - case db.writeLockC <- struct{}{}: - // Write lock acquired. - case err := <-db.compPerErrC: - // Compaction error. - return err - case <-db.closeC: - // Closed - return ErrClosed - } - } else { - select { - case db.writeLockC <- struct{}{}: - // Write lock acquired. - case err := <-db.compPerErrC: - // Compaction error. - return err - case <-db.closeC: - // Closed - return ErrClosed - } - } - - batch := db.batchPool.Get().(*Batch) - batch.Reset() - batch.appendRec(kt, key, value) - return db.writeLocked(batch, batch, merge, sync) -} - -// Put sets the value for the given key. It overwrites any previous value -// for that key; a DB is not a multi-map. Write merge also applies for Put, see -// Write. -// -// It is safe to modify the contents of the arguments after Put returns but not -// before. -func (db *DB) Put(key, value []byte, wo *opt.WriteOptions) error { - return db.putRec(keyTypeVal, key, value, wo) -} - -// Delete deletes the value for the given key. Delete will not returns error if -// key doesn't exist. Write merge also applies for Delete, see Write. -// -// It is safe to modify the contents of the arguments after Delete returns but -// not before. -func (db *DB) Delete(key []byte, wo *opt.WriteOptions) error { - return db.putRec(keyTypeDel, key, nil, wo) -} - -func isMemOverlaps(icmp *iComparer, mem *memdb.DB, min, max []byte) bool { - iter := mem.NewIterator(nil) - defer iter.Release() - return (max == nil || (iter.First() && icmp.uCompare(max, internalKey(iter.Key()).ukey()) >= 0)) && - (min == nil || (iter.Last() && icmp.uCompare(min, internalKey(iter.Key()).ukey()) <= 0)) -} - -// CompactRange compacts the underlying DB for the given key range. -// In particular, deleted and overwritten versions are discarded, -// and the data is rearranged to reduce the cost of operations -// needed to access the data. This operation should typically only -// be invoked by users who understand the underlying implementation. -// -// A nil Range.Start is treated as a key before all keys in the DB. -// And a nil Range.Limit is treated as a key after all keys in the DB. -// Therefore if both is nil then it will compact entire DB. -func (db *DB) CompactRange(r util.Range) error { - if err := db.ok(); err != nil { - return err - } - - // Lock writer. - select { - case db.writeLockC <- struct{}{}: - case err := <-db.compPerErrC: - return err - case <-db.closeC: - return ErrClosed - } - - // Check for overlaps in memdb. - mdb := db.getEffectiveMem() - if mdb == nil { - return ErrClosed - } - defer mdb.decref() - if isMemOverlaps(db.s.icmp, mdb.DB, r.Start, r.Limit) { - // Memdb compaction. - if _, err := db.rotateMem(0, false); err != nil { - <-db.writeLockC - return err - } - <-db.writeLockC - if err := db.compTriggerWait(db.mcompCmdC); err != nil { - return err - } - } else { - <-db.writeLockC - } - - // Table compaction. - return db.compTriggerRange(db.tcompCmdC, -1, r.Start, r.Limit) -} - -// SetReadOnly makes DB read-only. It will stay read-only until reopened. -func (db *DB) SetReadOnly() error { - if err := db.ok(); err != nil { - return err - } - - // Lock writer. - select { - case db.writeLockC <- struct{}{}: - db.compWriteLocking = true - case err := <-db.compPerErrC: - return err - case <-db.closeC: - return ErrClosed - } - - // Set compaction read-only. - select { - case db.compErrSetC <- ErrReadOnly: - case perr := <-db.compPerErrC: - return perr - case <-db.closeC: - return ErrClosed - } - - return nil -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/doc.go b/vendor/github.com/syndtr/goleveldb/leveldb/doc.go deleted file mode 100644 index be768e57..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/doc.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package leveldb provides implementation of LevelDB key/value database. -// -// Create or open a database: -// -// // The returned DB instance is safe for concurrent use. Which mean that all -// // DB's methods may be called concurrently from multiple goroutine. -// db, err := leveldb.OpenFile("path/to/db", nil) -// ... -// defer db.Close() -// ... -// -// Read or modify the database content: -// -// // Remember that the contents of the returned slice should not be modified. -// data, err := db.Get([]byte("key"), nil) -// ... -// err = db.Put([]byte("key"), []byte("value"), nil) -// ... -// err = db.Delete([]byte("key"), nil) -// ... -// -// Iterate over database content: -// -// iter := db.NewIterator(nil, nil) -// for iter.Next() { -// // Remember that the contents of the returned slice should not be modified, and -// // only valid until the next call to Next. -// key := iter.Key() -// value := iter.Value() -// ... -// } -// iter.Release() -// err = iter.Error() -// ... -// -// Iterate over subset of database content with a particular prefix: -// iter := db.NewIterator(util.BytesPrefix([]byte("foo-")), nil) -// for iter.Next() { -// // Use key/value. -// ... -// } -// iter.Release() -// err = iter.Error() -// ... -// -// Seek-then-Iterate: -// -// iter := db.NewIterator(nil, nil) -// for ok := iter.Seek(key); ok; ok = iter.Next() { -// // Use key/value. -// ... -// } -// iter.Release() -// err = iter.Error() -// ... -// -// Iterate over subset of database content: -// -// iter := db.NewIterator(&util.Range{Start: []byte("foo"), Limit: []byte("xoo")}, nil) -// for iter.Next() { -// // Use key/value. -// ... -// } -// iter.Release() -// err = iter.Error() -// ... -// -// Batch writes: -// -// batch := new(leveldb.Batch) -// batch.Put([]byte("foo"), []byte("value")) -// batch.Put([]byte("bar"), []byte("another value")) -// batch.Delete([]byte("baz")) -// err = db.Write(batch, nil) -// ... -// -// Use bloom filter: -// -// o := &opt.Options{ -// Filter: filter.NewBloomFilter(10), -// } -// db, err := leveldb.OpenFile("path/to/db", o) -// ... -// defer db.Close() -// ... -package leveldb diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/errors.go b/vendor/github.com/syndtr/goleveldb/leveldb/errors.go deleted file mode 100644 index de264981..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/errors.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "github.com/syndtr/goleveldb/leveldb/errors" -) - -// Common errors. -var ( - ErrNotFound = errors.ErrNotFound - ErrReadOnly = errors.New("leveldb: read-only mode") - ErrSnapshotReleased = errors.New("leveldb: snapshot released") - ErrIterReleased = errors.New("leveldb: iterator released") - ErrClosed = errors.New("leveldb: closed") -) diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go b/vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go deleted file mode 100644 index 8d6146b6..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package errors provides common error types used throughout leveldb. -package errors - -import ( - "errors" - "fmt" - - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/util" -) - -// Common errors. -var ( - ErrNotFound = New("leveldb: not found") - ErrReleased = util.ErrReleased - ErrHasReleaser = util.ErrHasReleaser -) - -// New returns an error that formats as the given text. -func New(text string) error { - return errors.New(text) -} - -// ErrCorrupted is the type that wraps errors that indicate corruption in -// the database. -type ErrCorrupted struct { - Fd storage.FileDesc - Err error -} - -func (e *ErrCorrupted) Error() string { - if !e.Fd.Zero() { - return fmt.Sprintf("%v [file=%v]", e.Err, e.Fd) - } - return e.Err.Error() -} - -// NewErrCorrupted creates new ErrCorrupted error. -func NewErrCorrupted(fd storage.FileDesc, err error) error { - return &ErrCorrupted{fd, err} -} - -// IsCorrupted returns a boolean indicating whether the error is indicating -// a corruption. -func IsCorrupted(err error) bool { - switch err.(type) { - case *ErrCorrupted: - return true - case *storage.ErrCorrupted: - return true - } - return false -} - -// ErrMissingFiles is the type that indicating a corruption due to missing -// files. ErrMissingFiles always wrapped with ErrCorrupted. -type ErrMissingFiles struct { - Fds []storage.FileDesc -} - -func (e *ErrMissingFiles) Error() string { return "file missing" } - -// SetFd sets 'file info' of the given error with the given file. -// Currently only ErrCorrupted is supported, otherwise will do nothing. -func SetFd(err error, fd storage.FileDesc) error { - switch x := err.(type) { - case *ErrCorrupted: - x.Fd = fd - return x - } - return err -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/filter.go b/vendor/github.com/syndtr/goleveldb/leveldb/filter.go deleted file mode 100644 index e961e420..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/filter.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "github.com/syndtr/goleveldb/leveldb/filter" -) - -type iFilter struct { - filter.Filter -} - -func (f iFilter) Contains(filter, key []byte) bool { - return f.Filter.Contains(filter, internalKey(key).ukey()) -} - -func (f iFilter) NewGenerator() filter.FilterGenerator { - return iFilterGenerator{f.Filter.NewGenerator()} -} - -type iFilterGenerator struct { - filter.FilterGenerator -} - -func (g iFilterGenerator) Add(key []byte) { - g.FilterGenerator.Add(internalKey(key).ukey()) -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom.go b/vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom.go deleted file mode 100644 index bab0e997..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package filter - -import ( - "github.com/syndtr/goleveldb/leveldb/util" -) - -func bloomHash(key []byte) uint32 { - return util.Hash(key, 0xbc9f1d34) -} - -type bloomFilter int - -// The bloom filter serializes its parameters and is backward compatible -// with respect to them. Therefor, its parameters are not added to its -// name. -func (bloomFilter) Name() string { - return "leveldb.BuiltinBloomFilter" -} - -func (f bloomFilter) Contains(filter, key []byte) bool { - nBytes := len(filter) - 1 - if nBytes < 1 { - return false - } - nBits := uint32(nBytes * 8) - - // Use the encoded k so that we can read filters generated by - // bloom filters created using different parameters. - k := filter[nBytes] - if k > 30 { - // Reserved for potentially new encodings for short bloom filters. - // Consider it a match. - return true - } - - kh := bloomHash(key) - delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits - for j := uint8(0); j < k; j++ { - bitpos := kh % nBits - if (uint32(filter[bitpos/8]) & (1 << (bitpos % 8))) == 0 { - return false - } - kh += delta - } - return true -} - -func (f bloomFilter) NewGenerator() FilterGenerator { - // Round down to reduce probing cost a little bit. - k := uint8(f * 69 / 100) // 0.69 =~ ln(2) - if k < 1 { - k = 1 - } else if k > 30 { - k = 30 - } - return &bloomFilterGenerator{ - n: int(f), - k: k, - } -} - -type bloomFilterGenerator struct { - n int - k uint8 - - keyHashes []uint32 -} - -func (g *bloomFilterGenerator) Add(key []byte) { - // Use double-hashing to generate a sequence of hash values. - // See analysis in [Kirsch,Mitzenmacher 2006]. - g.keyHashes = append(g.keyHashes, bloomHash(key)) -} - -func (g *bloomFilterGenerator) Generate(b Buffer) { - // Compute bloom filter size (in both bits and bytes) - nBits := uint32(len(g.keyHashes) * g.n) - // For small n, we can see a very high false positive rate. Fix it - // by enforcing a minimum bloom filter length. - if nBits < 64 { - nBits = 64 - } - nBytes := (nBits + 7) / 8 - nBits = nBytes * 8 - - dest := b.Alloc(int(nBytes) + 1) - dest[nBytes] = g.k - for _, kh := range g.keyHashes { - delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits - for j := uint8(0); j < g.k; j++ { - bitpos := kh % nBits - dest[bitpos/8] |= (1 << (bitpos % 8)) - kh += delta - } - } - - g.keyHashes = g.keyHashes[:0] -} - -// NewBloomFilter creates a new initialized bloom filter for given -// bitsPerKey. -// -// Since bitsPerKey is persisted individually for each bloom filter -// serialization, bloom filters are backwards compatible with respect to -// changing bitsPerKey. This means that no big performance penalty will -// be experienced when changing the parameter. See documentation for -// opt.Options.Filter for more information. -func NewBloomFilter(bitsPerKey int) Filter { - return bloomFilter(bitsPerKey) -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/filter/filter.go b/vendor/github.com/syndtr/goleveldb/leveldb/filter/filter.go deleted file mode 100644 index 7a925c5a..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/filter/filter.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package filter provides interface and implementation of probabilistic -// data structure. -// -// The filter is resposible for creating small filter from a set of keys. -// These filter will then used to test whether a key is a member of the set. -// In many cases, a filter can cut down the number of disk seeks from a -// handful to a single disk seek per DB.Get call. -package filter - -// Buffer is the interface that wraps basic Alloc, Write and WriteByte methods. -type Buffer interface { - // Alloc allocs n bytes of slice from the buffer. This also advancing - // write offset. - Alloc(n int) []byte - - // Write appends the contents of p to the buffer. - Write(p []byte) (n int, err error) - - // WriteByte appends the byte c to the buffer. - WriteByte(c byte) error -} - -// Filter is the filter. -type Filter interface { - // Name returns the name of this policy. - // - // Note that if the filter encoding changes in an incompatible way, - // the name returned by this method must be changed. Otherwise, old - // incompatible filters may be passed to methods of this type. - Name() string - - // NewGenerator creates a new filter generator. - NewGenerator() FilterGenerator - - // Contains returns true if the filter contains the given key. - // - // The filter are filters generated by the filter generator. - Contains(filter, key []byte) bool -} - -// FilterGenerator is the filter generator. -type FilterGenerator interface { - // Add adds a key to the filter generator. - // - // The key may become invalid after call to this method end, therefor - // key must be copied if implementation require keeping key for later - // use. The key should not modified directly, doing so may cause - // undefined results. - Add(key []byte) - - // Generate generates filters based on keys passed so far. After call - // to Generate the filter generator maybe resetted, depends on implementation. - Generate(b Buffer) -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go deleted file mode 100644 index a23ab05f..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator - -import ( - "github.com/syndtr/goleveldb/leveldb/util" -) - -// BasicArray is the interface that wraps basic Len and Search method. -type BasicArray interface { - // Len returns length of the array. - Len() int - - // Search finds smallest index that point to a key that is greater - // than or equal to the given key. - Search(key []byte) int -} - -// Array is the interface that wraps BasicArray and basic Index method. -type Array interface { - BasicArray - - // Index returns key/value pair with index of i. - Index(i int) (key, value []byte) -} - -// Array is the interface that wraps BasicArray and basic Get method. -type ArrayIndexer interface { - BasicArray - - // Get returns a new data iterator with index of i. - Get(i int) Iterator -} - -type basicArrayIterator struct { - util.BasicReleaser - array BasicArray - pos int - err error -} - -func (i *basicArrayIterator) Valid() bool { - return i.pos >= 0 && i.pos < i.array.Len() && !i.Released() -} - -func (i *basicArrayIterator) First() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - if i.array.Len() == 0 { - i.pos = -1 - return false - } - i.pos = 0 - return true -} - -func (i *basicArrayIterator) Last() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - n := i.array.Len() - if n == 0 { - i.pos = 0 - return false - } - i.pos = n - 1 - return true -} - -func (i *basicArrayIterator) Seek(key []byte) bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - n := i.array.Len() - if n == 0 { - i.pos = 0 - return false - } - i.pos = i.array.Search(key) - if i.pos >= n { - return false - } - return true -} - -func (i *basicArrayIterator) Next() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - i.pos++ - if n := i.array.Len(); i.pos >= n { - i.pos = n - return false - } - return true -} - -func (i *basicArrayIterator) Prev() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - i.pos-- - if i.pos < 0 { - i.pos = -1 - return false - } - return true -} - -func (i *basicArrayIterator) Error() error { return i.err } - -type arrayIterator struct { - basicArrayIterator - array Array - pos int - key, value []byte -} - -func (i *arrayIterator) updateKV() { - if i.pos == i.basicArrayIterator.pos { - return - } - i.pos = i.basicArrayIterator.pos - if i.Valid() { - i.key, i.value = i.array.Index(i.pos) - } else { - i.key = nil - i.value = nil - } -} - -func (i *arrayIterator) Key() []byte { - i.updateKV() - return i.key -} - -func (i *arrayIterator) Value() []byte { - i.updateKV() - return i.value -} - -type arrayIteratorIndexer struct { - basicArrayIterator - array ArrayIndexer -} - -func (i *arrayIteratorIndexer) Get() Iterator { - if i.Valid() { - return i.array.Get(i.basicArrayIterator.pos) - } - return nil -} - -// NewArrayIterator returns an iterator from the given array. -func NewArrayIterator(array Array) Iterator { - return &arrayIterator{ - basicArrayIterator: basicArrayIterator{array: array, pos: -1}, - array: array, - pos: -1, - } -} - -// NewArrayIndexer returns an index iterator from the given array. -func NewArrayIndexer(array ArrayIndexer) IteratorIndexer { - return &arrayIteratorIndexer{ - basicArrayIterator: basicArrayIterator{array: array, pos: -1}, - array: array, - } -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go deleted file mode 100644 index 939adbb9..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go +++ /dev/null @@ -1,242 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator - -import ( - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/util" -) - -// IteratorIndexer is the interface that wraps CommonIterator and basic Get -// method. IteratorIndexer provides index for indexed iterator. -type IteratorIndexer interface { - CommonIterator - - // Get returns a new data iterator for the current position, or nil if - // done. - Get() Iterator -} - -type indexedIterator struct { - util.BasicReleaser - index IteratorIndexer - strict bool - - data Iterator - err error - errf func(err error) - closed bool -} - -func (i *indexedIterator) setData() { - if i.data != nil { - i.data.Release() - } - i.data = i.index.Get() -} - -func (i *indexedIterator) clearData() { - if i.data != nil { - i.data.Release() - } - i.data = nil -} - -func (i *indexedIterator) indexErr() { - if err := i.index.Error(); err != nil { - if i.errf != nil { - i.errf(err) - } - i.err = err - } -} - -func (i *indexedIterator) dataErr() bool { - if err := i.data.Error(); err != nil { - if i.errf != nil { - i.errf(err) - } - if i.strict || !errors.IsCorrupted(err) { - i.err = err - return true - } - } - return false -} - -func (i *indexedIterator) Valid() bool { - return i.data != nil && i.data.Valid() -} - -func (i *indexedIterator) First() bool { - if i.err != nil { - return false - } else if i.Released() { - i.err = ErrIterReleased - return false - } - - if !i.index.First() { - i.indexErr() - i.clearData() - return false - } - i.setData() - return i.Next() -} - -func (i *indexedIterator) Last() bool { - if i.err != nil { - return false - } else if i.Released() { - i.err = ErrIterReleased - return false - } - - if !i.index.Last() { - i.indexErr() - i.clearData() - return false - } - i.setData() - if !i.data.Last() { - if i.dataErr() { - return false - } - i.clearData() - return i.Prev() - } - return true -} - -func (i *indexedIterator) Seek(key []byte) bool { - if i.err != nil { - return false - } else if i.Released() { - i.err = ErrIterReleased - return false - } - - if !i.index.Seek(key) { - i.indexErr() - i.clearData() - return false - } - i.setData() - if !i.data.Seek(key) { - if i.dataErr() { - return false - } - i.clearData() - return i.Next() - } - return true -} - -func (i *indexedIterator) Next() bool { - if i.err != nil { - return false - } else if i.Released() { - i.err = ErrIterReleased - return false - } - - switch { - case i.data != nil && !i.data.Next(): - if i.dataErr() { - return false - } - i.clearData() - fallthrough - case i.data == nil: - if !i.index.Next() { - i.indexErr() - return false - } - i.setData() - return i.Next() - } - return true -} - -func (i *indexedIterator) Prev() bool { - if i.err != nil { - return false - } else if i.Released() { - i.err = ErrIterReleased - return false - } - - switch { - case i.data != nil && !i.data.Prev(): - if i.dataErr() { - return false - } - i.clearData() - fallthrough - case i.data == nil: - if !i.index.Prev() { - i.indexErr() - return false - } - i.setData() - if !i.data.Last() { - if i.dataErr() { - return false - } - i.clearData() - return i.Prev() - } - } - return true -} - -func (i *indexedIterator) Key() []byte { - if i.data == nil { - return nil - } - return i.data.Key() -} - -func (i *indexedIterator) Value() []byte { - if i.data == nil { - return nil - } - return i.data.Value() -} - -func (i *indexedIterator) Release() { - i.clearData() - i.index.Release() - i.BasicReleaser.Release() -} - -func (i *indexedIterator) Error() error { - if i.err != nil { - return i.err - } - if err := i.index.Error(); err != nil { - return err - } - return nil -} - -func (i *indexedIterator) SetErrorCallback(f func(err error)) { - i.errf = f -} - -// NewIndexedIterator returns an 'indexed iterator'. An index is iterator -// that returns another iterator, a 'data iterator'. A 'data iterator' is the -// iterator that contains actual key/value pairs. -// -// If strict is true the any 'corruption errors' (i.e errors.IsCorrupted(err) == true) -// won't be ignored and will halt 'indexed iterator', otherwise the iterator will -// continue to the next 'data iterator'. Corruption on 'index iterator' will not be -// ignored and will halt the iterator. -func NewIndexedIterator(index IteratorIndexer, strict bool) Iterator { - return &indexedIterator{index: index, strict: strict} -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go deleted file mode 100644 index 96fb0f68..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package iterator provides interface and implementation to traverse over -// contents of a database. -package iterator - -import ( - "errors" - - "github.com/syndtr/goleveldb/leveldb/util" -) - -var ( - ErrIterReleased = errors.New("leveldb/iterator: iterator released") -) - -// IteratorSeeker is the interface that wraps the 'seeks method'. -type IteratorSeeker interface { - // First moves the iterator to the first key/value pair. If the iterator - // only contains one key/value pair then First and Last would moves - // to the same key/value pair. - // It returns whether such pair exist. - First() bool - - // Last moves the iterator to the last key/value pair. If the iterator - // only contains one key/value pair then First and Last would moves - // to the same key/value pair. - // It returns whether such pair exist. - Last() bool - - // Seek moves the iterator to the first key/value pair whose key is greater - // than or equal to the given key. - // It returns whether such pair exist. - // - // It is safe to modify the contents of the argument after Seek returns. - Seek(key []byte) bool - - // Next moves the iterator to the next key/value pair. - // It returns false if the iterator is exhausted. - Next() bool - - // Prev moves the iterator to the previous key/value pair. - // It returns false if the iterator is exhausted. - Prev() bool -} - -// CommonIterator is the interface that wraps common iterator methods. -type CommonIterator interface { - IteratorSeeker - - // util.Releaser is the interface that wraps basic Release method. - // When called Release will releases any resources associated with the - // iterator. - util.Releaser - - // util.ReleaseSetter is the interface that wraps the basic SetReleaser - // method. - util.ReleaseSetter - - // TODO: Remove this when ready. - Valid() bool - - // Error returns any accumulated error. Exhausting all the key/value pairs - // is not considered to be an error. - Error() error -} - -// Iterator iterates over a DB's key/value pairs in key order. -// -// When encounter an error any 'seeks method' will return false and will -// yield no key/value pairs. The error can be queried by calling the Error -// method. Calling Release is still necessary. -// -// An iterator must be released after use, but it is not necessary to read -// an iterator until exhaustion. -// Also, an iterator is not necessarily safe for concurrent use, but it is -// safe to use multiple iterators concurrently, with each in a dedicated -// goroutine. -type Iterator interface { - CommonIterator - - // Key returns the key of the current key/value pair, or nil if done. - // The caller should not modify the contents of the returned slice, and - // its contents may change on the next call to any 'seeks method'. - Key() []byte - - // Value returns the value of the current key/value pair, or nil if done. - // The caller should not modify the contents of the returned slice, and - // its contents may change on the next call to any 'seeks method'. - Value() []byte -} - -// ErrorCallbackSetter is the interface that wraps basic SetErrorCallback -// method. -// -// ErrorCallbackSetter implemented by indexed and merged iterator. -type ErrorCallbackSetter interface { - // SetErrorCallback allows set an error callback of the corresponding - // iterator. Use nil to clear the callback. - SetErrorCallback(f func(err error)) -} - -type emptyIterator struct { - util.BasicReleaser - err error -} - -func (i *emptyIterator) rErr() { - if i.err == nil && i.Released() { - i.err = ErrIterReleased - } -} - -func (*emptyIterator) Valid() bool { return false } -func (i *emptyIterator) First() bool { i.rErr(); return false } -func (i *emptyIterator) Last() bool { i.rErr(); return false } -func (i *emptyIterator) Seek(key []byte) bool { i.rErr(); return false } -func (i *emptyIterator) Next() bool { i.rErr(); return false } -func (i *emptyIterator) Prev() bool { i.rErr(); return false } -func (*emptyIterator) Key() []byte { return nil } -func (*emptyIterator) Value() []byte { return nil } -func (i *emptyIterator) Error() error { return i.err } - -// NewEmptyIterator creates an empty iterator. The err parameter can be -// nil, but if not nil the given err will be returned by Error method. -func NewEmptyIterator(err error) Iterator { - return &emptyIterator{err: err} -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go deleted file mode 100644 index 1a7e29df..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator - -import ( - "github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/util" -) - -type dir int - -const ( - dirReleased dir = iota - 1 - dirSOI - dirEOI - dirBackward - dirForward -) - -type mergedIterator struct { - cmp comparer.Comparer - iters []Iterator - strict bool - - keys [][]byte - index int - dir dir - err error - errf func(err error) - releaser util.Releaser -} - -func assertKey(key []byte) []byte { - if key == nil { - panic("leveldb/iterator: nil key") - } - return key -} - -func (i *mergedIterator) iterErr(iter Iterator) bool { - if err := iter.Error(); err != nil { - if i.errf != nil { - i.errf(err) - } - if i.strict || !errors.IsCorrupted(err) { - i.err = err - return true - } - } - return false -} - -func (i *mergedIterator) Valid() bool { - return i.err == nil && i.dir > dirEOI -} - -func (i *mergedIterator) First() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - for x, iter := range i.iters { - switch { - case iter.First(): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - } - i.dir = dirSOI - return i.next() -} - -func (i *mergedIterator) Last() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - for x, iter := range i.iters { - switch { - case iter.Last(): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - } - i.dir = dirEOI - return i.prev() -} - -func (i *mergedIterator) Seek(key []byte) bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - for x, iter := range i.iters { - switch { - case iter.Seek(key): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - } - i.dir = dirSOI - return i.next() -} - -func (i *mergedIterator) next() bool { - var key []byte - if i.dir == dirForward { - key = i.keys[i.index] - } - for x, tkey := range i.keys { - if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) < 0) { - key = tkey - i.index = x - } - } - if key == nil { - i.dir = dirEOI - return false - } - i.dir = dirForward - return true -} - -func (i *mergedIterator) Next() bool { - if i.dir == dirEOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - switch i.dir { - case dirSOI: - return i.First() - case dirBackward: - key := append([]byte{}, i.keys[i.index]...) - if !i.Seek(key) { - return false - } - return i.Next() - } - - x := i.index - iter := i.iters[x] - switch { - case iter.Next(): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - return i.next() -} - -func (i *mergedIterator) prev() bool { - var key []byte - if i.dir == dirBackward { - key = i.keys[i.index] - } - for x, tkey := range i.keys { - if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) > 0) { - key = tkey - i.index = x - } - } - if key == nil { - i.dir = dirSOI - return false - } - i.dir = dirBackward - return true -} - -func (i *mergedIterator) Prev() bool { - if i.dir == dirSOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - switch i.dir { - case dirEOI: - return i.Last() - case dirForward: - key := append([]byte{}, i.keys[i.index]...) - for x, iter := range i.iters { - if x == i.index { - continue - } - seek := iter.Seek(key) - switch { - case seek && iter.Prev(), !seek && iter.Last(): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - } - } - - x := i.index - iter := i.iters[x] - switch { - case iter.Prev(): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - return i.prev() -} - -func (i *mergedIterator) Key() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.keys[i.index] -} - -func (i *mergedIterator) Value() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.iters[i.index].Value() -} - -func (i *mergedIterator) Release() { - if i.dir != dirReleased { - i.dir = dirReleased - for _, iter := range i.iters { - iter.Release() - } - i.iters = nil - i.keys = nil - if i.releaser != nil { - i.releaser.Release() - i.releaser = nil - } - } -} - -func (i *mergedIterator) SetReleaser(releaser util.Releaser) { - if i.dir == dirReleased { - panic(util.ErrReleased) - } - if i.releaser != nil && releaser != nil { - panic(util.ErrHasReleaser) - } - i.releaser = releaser -} - -func (i *mergedIterator) Error() error { - return i.err -} - -func (i *mergedIterator) SetErrorCallback(f func(err error)) { - i.errf = f -} - -// NewMergedIterator returns an iterator that merges its input. Walking the -// resultant iterator will return all key/value pairs of all input iterators -// in strictly increasing key order, as defined by cmp. -// The input's key ranges may overlap, but there are assumed to be no duplicate -// keys: if iters[i] contains a key k then iters[j] will not contain that key k. -// None of the iters may be nil. -// -// If strict is true the any 'corruption errors' (i.e errors.IsCorrupted(err) == true) -// won't be ignored and will halt 'merged iterator', otherwise the iterator will -// continue to the next 'input iterator'. -func NewMergedIterator(iters []Iterator, cmp comparer.Comparer, strict bool) Iterator { - return &mergedIterator{ - iters: iters, - cmp: cmp, - strict: strict, - keys: make([][]byte, len(iters)), - } -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go b/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go deleted file mode 100644 index d094c3d0..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go +++ /dev/null @@ -1,524 +0,0 @@ -// Copyright 2011 The LevelDB-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Taken from: https://code.google.com/p/leveldb-go/source/browse/leveldb/record/record.go?r=1d5ccbe03246da926391ee12d1c6caae054ff4b0 -// License, authors and contributors informations can be found at bellow URLs respectively: -// https://code.google.com/p/leveldb-go/source/browse/LICENSE -// https://code.google.com/p/leveldb-go/source/browse/AUTHORS -// https://code.google.com/p/leveldb-go/source/browse/CONTRIBUTORS - -// Package journal reads and writes sequences of journals. Each journal is a stream -// of bytes that completes before the next journal starts. -// -// When reading, call Next to obtain an io.Reader for the next journal. Next will -// return io.EOF when there are no more journals. It is valid to call Next -// without reading the current journal to exhaustion. -// -// When writing, call Next to obtain an io.Writer for the next journal. Calling -// Next finishes the current journal. Call Close to finish the final journal. -// -// Optionally, call Flush to finish the current journal and flush the underlying -// writer without starting a new journal. To start a new journal after flushing, -// call Next. -// -// Neither Readers or Writers are safe to use concurrently. -// -// Example code: -// func read(r io.Reader) ([]string, error) { -// var ss []string -// journals := journal.NewReader(r, nil, true, true) -// for { -// j, err := journals.Next() -// if err == io.EOF { -// break -// } -// if err != nil { -// return nil, err -// } -// s, err := ioutil.ReadAll(j) -// if err != nil { -// return nil, err -// } -// ss = append(ss, string(s)) -// } -// return ss, nil -// } -// -// func write(w io.Writer, ss []string) error { -// journals := journal.NewWriter(w) -// for _, s := range ss { -// j, err := journals.Next() -// if err != nil { -// return err -// } -// if _, err := j.Write([]byte(s)), err != nil { -// return err -// } -// } -// return journals.Close() -// } -// -// The wire format is that the stream is divided into 32KiB blocks, and each -// block contains a number of tightly packed chunks. Chunks cannot cross block -// boundaries. The last block may be shorter than 32 KiB. Any unused bytes in a -// block must be zero. -// -// A journal maps to one or more chunks. Each chunk has a 7 byte header (a 4 -// byte checksum, a 2 byte little-endian uint16 length, and a 1 byte chunk type) -// followed by a payload. The checksum is over the chunk type and the payload. -// -// There are four chunk types: whether the chunk is the full journal, or the -// first, middle or last chunk of a multi-chunk journal. A multi-chunk journal -// has one first chunk, zero or more middle chunks, and one last chunk. -// -// The wire format allows for limited recovery in the face of data corruption: -// on a format error (such as a checksum mismatch), the reader moves to the -// next block and looks for the next full or first chunk. -package journal - -import ( - "encoding/binary" - "fmt" - "io" - - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/util" -) - -// These constants are part of the wire format and should not be changed. -const ( - fullChunkType = 1 - firstChunkType = 2 - middleChunkType = 3 - lastChunkType = 4 -) - -const ( - blockSize = 32 * 1024 - headerSize = 7 -) - -type flusher interface { - Flush() error -} - -// ErrCorrupted is the error type that generated by corrupted block or chunk. -type ErrCorrupted struct { - Size int - Reason string -} - -func (e *ErrCorrupted) Error() string { - return fmt.Sprintf("leveldb/journal: block/chunk corrupted: %s (%d bytes)", e.Reason, e.Size) -} - -// Dropper is the interface that wrap simple Drop method. The Drop -// method will be called when the journal reader dropping a block or chunk. -type Dropper interface { - Drop(err error) -} - -// Reader reads journals from an underlying io.Reader. -type Reader struct { - // r is the underlying reader. - r io.Reader - // the dropper. - dropper Dropper - // strict flag. - strict bool - // checksum flag. - checksum bool - // seq is the sequence number of the current journal. - seq int - // buf[i:j] is the unread portion of the current chunk's payload. - // The low bound, i, excludes the chunk header. - i, j int - // n is the number of bytes of buf that are valid. Once reading has started, - // only the final block can have n < blockSize. - n int - // last is whether the current chunk is the last chunk of the journal. - last bool - // err is any accumulated error. - err error - // buf is the buffer. - buf [blockSize]byte -} - -// NewReader returns a new reader. The dropper may be nil, and if -// strict is true then corrupted or invalid chunk will halt the journal -// reader entirely. -func NewReader(r io.Reader, dropper Dropper, strict, checksum bool) *Reader { - return &Reader{ - r: r, - dropper: dropper, - strict: strict, - checksum: checksum, - last: true, - } -} - -var errSkip = errors.New("leveldb/journal: skipped") - -func (r *Reader) corrupt(n int, reason string, skip bool) error { - if r.dropper != nil { - r.dropper.Drop(&ErrCorrupted{n, reason}) - } - if r.strict && !skip { - r.err = errors.NewErrCorrupted(storage.FileDesc{}, &ErrCorrupted{n, reason}) - return r.err - } - return errSkip -} - -// nextChunk sets r.buf[r.i:r.j] to hold the next chunk's payload, reading the -// next block into the buffer if necessary. -func (r *Reader) nextChunk(first bool) error { - for { - if r.j+headerSize <= r.n { - checksum := binary.LittleEndian.Uint32(r.buf[r.j+0 : r.j+4]) - length := binary.LittleEndian.Uint16(r.buf[r.j+4 : r.j+6]) - chunkType := r.buf[r.j+6] - unprocBlock := r.n - r.j - if checksum == 0 && length == 0 && chunkType == 0 { - // Drop entire block. - r.i = r.n - r.j = r.n - return r.corrupt(unprocBlock, "zero header", false) - } - if chunkType < fullChunkType || chunkType > lastChunkType { - // Drop entire block. - r.i = r.n - r.j = r.n - return r.corrupt(unprocBlock, fmt.Sprintf("invalid chunk type %#x", chunkType), false) - } - r.i = r.j + headerSize - r.j = r.j + headerSize + int(length) - if r.j > r.n { - // Drop entire block. - r.i = r.n - r.j = r.n - return r.corrupt(unprocBlock, "chunk length overflows block", false) - } else if r.checksum && checksum != util.NewCRC(r.buf[r.i-1:r.j]).Value() { - // Drop entire block. - r.i = r.n - r.j = r.n - return r.corrupt(unprocBlock, "checksum mismatch", false) - } - if first && chunkType != fullChunkType && chunkType != firstChunkType { - chunkLength := (r.j - r.i) + headerSize - r.i = r.j - // Report the error, but skip it. - return r.corrupt(chunkLength, "orphan chunk", true) - } - r.last = chunkType == fullChunkType || chunkType == lastChunkType - return nil - } - - // The last block. - if r.n < blockSize && r.n > 0 { - if !first { - return r.corrupt(0, "missing chunk part", false) - } - r.err = io.EOF - return r.err - } - - // Read block. - n, err := io.ReadFull(r.r, r.buf[:]) - if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { - return err - } - if n == 0 { - if !first { - return r.corrupt(0, "missing chunk part", false) - } - r.err = io.EOF - return r.err - } - r.i, r.j, r.n = 0, 0, n - } -} - -// Next returns a reader for the next journal. It returns io.EOF if there are no -// more journals. The reader returned becomes stale after the next Next call, -// and should no longer be used. If strict is false, the reader will returns -// io.ErrUnexpectedEOF error when found corrupted journal. -func (r *Reader) Next() (io.Reader, error) { - r.seq++ - if r.err != nil { - return nil, r.err - } - r.i = r.j - for { - if err := r.nextChunk(true); err == nil { - break - } else if err != errSkip { - return nil, err - } - } - return &singleReader{r, r.seq, nil}, nil -} - -// Reset resets the journal reader, allows reuse of the journal reader. Reset returns -// last accumulated error. -func (r *Reader) Reset(reader io.Reader, dropper Dropper, strict, checksum bool) error { - r.seq++ - err := r.err - r.r = reader - r.dropper = dropper - r.strict = strict - r.checksum = checksum - r.i = 0 - r.j = 0 - r.n = 0 - r.last = true - r.err = nil - return err -} - -type singleReader struct { - r *Reader - seq int - err error -} - -func (x *singleReader) Read(p []byte) (int, error) { - r := x.r - if r.seq != x.seq { - return 0, errors.New("leveldb/journal: stale reader") - } - if x.err != nil { - return 0, x.err - } - if r.err != nil { - return 0, r.err - } - for r.i == r.j { - if r.last { - return 0, io.EOF - } - x.err = r.nextChunk(false) - if x.err != nil { - if x.err == errSkip { - x.err = io.ErrUnexpectedEOF - } - return 0, x.err - } - } - n := copy(p, r.buf[r.i:r.j]) - r.i += n - return n, nil -} - -func (x *singleReader) ReadByte() (byte, error) { - r := x.r - if r.seq != x.seq { - return 0, errors.New("leveldb/journal: stale reader") - } - if x.err != nil { - return 0, x.err - } - if r.err != nil { - return 0, r.err - } - for r.i == r.j { - if r.last { - return 0, io.EOF - } - x.err = r.nextChunk(false) - if x.err != nil { - if x.err == errSkip { - x.err = io.ErrUnexpectedEOF - } - return 0, x.err - } - } - c := r.buf[r.i] - r.i++ - return c, nil -} - -// Writer writes journals to an underlying io.Writer. -type Writer struct { - // w is the underlying writer. - w io.Writer - // seq is the sequence number of the current journal. - seq int - // f is w as a flusher. - f flusher - // buf[i:j] is the bytes that will become the current chunk. - // The low bound, i, includes the chunk header. - i, j int - // buf[:written] has already been written to w. - // written is zero unless Flush has been called. - written int - // first is whether the current chunk is the first chunk of the journal. - first bool - // pending is whether a chunk is buffered but not yet written. - pending bool - // err is any accumulated error. - err error - // buf is the buffer. - buf [blockSize]byte -} - -// NewWriter returns a new Writer. -func NewWriter(w io.Writer) *Writer { - f, _ := w.(flusher) - return &Writer{ - w: w, - f: f, - } -} - -// fillHeader fills in the header for the pending chunk. -func (w *Writer) fillHeader(last bool) { - if w.i+headerSize > w.j || w.j > blockSize { - panic("leveldb/journal: bad writer state") - } - if last { - if w.first { - w.buf[w.i+6] = fullChunkType - } else { - w.buf[w.i+6] = lastChunkType - } - } else { - if w.first { - w.buf[w.i+6] = firstChunkType - } else { - w.buf[w.i+6] = middleChunkType - } - } - binary.LittleEndian.PutUint32(w.buf[w.i+0:w.i+4], util.NewCRC(w.buf[w.i+6:w.j]).Value()) - binary.LittleEndian.PutUint16(w.buf[w.i+4:w.i+6], uint16(w.j-w.i-headerSize)) -} - -// writeBlock writes the buffered block to the underlying writer, and reserves -// space for the next chunk's header. -func (w *Writer) writeBlock() { - _, w.err = w.w.Write(w.buf[w.written:]) - w.i = 0 - w.j = headerSize - w.written = 0 -} - -// writePending finishes the current journal and writes the buffer to the -// underlying writer. -func (w *Writer) writePending() { - if w.err != nil { - return - } - if w.pending { - w.fillHeader(true) - w.pending = false - } - _, w.err = w.w.Write(w.buf[w.written:w.j]) - w.written = w.j -} - -// Close finishes the current journal and closes the writer. -func (w *Writer) Close() error { - w.seq++ - w.writePending() - if w.err != nil { - return w.err - } - w.err = errors.New("leveldb/journal: closed Writer") - return nil -} - -// Flush finishes the current journal, writes to the underlying writer, and -// flushes it if that writer implements interface{ Flush() error }. -func (w *Writer) Flush() error { - w.seq++ - w.writePending() - if w.err != nil { - return w.err - } - if w.f != nil { - w.err = w.f.Flush() - return w.err - } - return nil -} - -// Reset resets the journal writer, allows reuse of the journal writer. Reset -// will also closes the journal writer if not already. -func (w *Writer) Reset(writer io.Writer) (err error) { - w.seq++ - if w.err == nil { - w.writePending() - err = w.err - } - w.w = writer - w.f, _ = writer.(flusher) - w.i = 0 - w.j = 0 - w.written = 0 - w.first = false - w.pending = false - w.err = nil - return -} - -// Next returns a writer for the next journal. The writer returned becomes stale -// after the next Close, Flush or Next call, and should no longer be used. -func (w *Writer) Next() (io.Writer, error) { - w.seq++ - if w.err != nil { - return nil, w.err - } - if w.pending { - w.fillHeader(true) - } - w.i = w.j - w.j = w.j + headerSize - // Check if there is room in the block for the header. - if w.j > blockSize { - // Fill in the rest of the block with zeroes. - for k := w.i; k < blockSize; k++ { - w.buf[k] = 0 - } - w.writeBlock() - if w.err != nil { - return nil, w.err - } - } - w.first = true - w.pending = true - return singleWriter{w, w.seq}, nil -} - -type singleWriter struct { - w *Writer - seq int -} - -func (x singleWriter) Write(p []byte) (int, error) { - w := x.w - if w.seq != x.seq { - return 0, errors.New("leveldb/journal: stale writer") - } - if w.err != nil { - return 0, w.err - } - n0 := len(p) - for len(p) > 0 { - // Write a block, if it is full. - if w.j == blockSize { - w.fillHeader(false) - w.writeBlock() - if w.err != nil { - return 0, w.err - } - w.first = false - } - // Copy bytes into the buffer. - n := copy(w.buf[w.j:], p) - w.j += n - p = p[n:] - } - return n0, nil -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/key.go b/vendor/github.com/syndtr/goleveldb/leveldb/key.go deleted file mode 100644 index ad8f51ec..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/key.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "encoding/binary" - "fmt" - - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/storage" -) - -// ErrInternalKeyCorrupted records internal key corruption. -type ErrInternalKeyCorrupted struct { - Ikey []byte - Reason string -} - -func (e *ErrInternalKeyCorrupted) Error() string { - return fmt.Sprintf("leveldb: internal key %q corrupted: %s", e.Ikey, e.Reason) -} - -func newErrInternalKeyCorrupted(ikey []byte, reason string) error { - return errors.NewErrCorrupted(storage.FileDesc{}, &ErrInternalKeyCorrupted{append([]byte{}, ikey...), reason}) -} - -type keyType uint - -func (kt keyType) String() string { - switch kt { - case keyTypeDel: - return "d" - case keyTypeVal: - return "v" - } - return fmt.Sprintf("", uint(kt)) -} - -// Value types encoded as the last component of internal keys. -// Don't modify; this value are saved to disk. -const ( - keyTypeDel = keyType(0) - keyTypeVal = keyType(1) -) - -// keyTypeSeek defines the keyType that should be passed when constructing an -// internal key for seeking to a particular sequence number (since we -// sort sequence numbers in decreasing order and the value type is -// embedded as the low 8 bits in the sequence number in internal keys, -// we need to use the highest-numbered ValueType, not the lowest). -const keyTypeSeek = keyTypeVal - -const ( - // Maximum value possible for sequence number; the 8-bits are - // used by value type, so its can packed together in single - // 64-bit integer. - keyMaxSeq = (uint64(1) << 56) - 1 - // Maximum value possible for packed sequence number and type. - keyMaxNum = (keyMaxSeq << 8) | uint64(keyTypeSeek) -) - -// Maximum number encoded in bytes. -var keyMaxNumBytes = make([]byte, 8) - -func init() { - binary.LittleEndian.PutUint64(keyMaxNumBytes, keyMaxNum) -} - -type internalKey []byte - -func makeInternalKey(dst, ukey []byte, seq uint64, kt keyType) internalKey { - if seq > keyMaxSeq { - panic("leveldb: invalid sequence number") - } else if kt > keyTypeVal { - panic("leveldb: invalid type") - } - - dst = ensureBuffer(dst, len(ukey)+8) - copy(dst, ukey) - binary.LittleEndian.PutUint64(dst[len(ukey):], (seq<<8)|uint64(kt)) - return internalKey(dst) -} - -func parseInternalKey(ik []byte) (ukey []byte, seq uint64, kt keyType, err error) { - if len(ik) < 8 { - return nil, 0, 0, newErrInternalKeyCorrupted(ik, "invalid length") - } - num := binary.LittleEndian.Uint64(ik[len(ik)-8:]) - seq, kt = uint64(num>>8), keyType(num&0xff) - if kt > keyTypeVal { - return nil, 0, 0, newErrInternalKeyCorrupted(ik, "invalid type") - } - ukey = ik[:len(ik)-8] - return -} - -func validInternalKey(ik []byte) bool { - _, _, _, err := parseInternalKey(ik) - return err == nil -} - -func (ik internalKey) assert() { - if ik == nil { - panic("leveldb: nil internalKey") - } - if len(ik) < 8 { - panic(fmt.Sprintf("leveldb: internal key %q, len=%d: invalid length", []byte(ik), len(ik))) - } -} - -func (ik internalKey) ukey() []byte { - ik.assert() - return ik[:len(ik)-8] -} - -func (ik internalKey) num() uint64 { - ik.assert() - return binary.LittleEndian.Uint64(ik[len(ik)-8:]) -} - -func (ik internalKey) parseNum() (seq uint64, kt keyType) { - num := ik.num() - seq, kt = uint64(num>>8), keyType(num&0xff) - if kt > keyTypeVal { - panic(fmt.Sprintf("leveldb: internal key %q, len=%d: invalid type %#x", []byte(ik), len(ik), kt)) - } - return -} - -func (ik internalKey) String() string { - if ik == nil { - return "" - } - - if ukey, seq, kt, err := parseInternalKey(ik); err == nil { - return fmt.Sprintf("%s,%s%d", shorten(string(ukey)), kt, seq) - } - return fmt.Sprintf("", []byte(ik)) -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go b/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go deleted file mode 100644 index 824e47f5..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go +++ /dev/null @@ -1,479 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package memdb provides in-memory key/value database implementation. -package memdb - -import ( - "math/rand" - "sync" - - "github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/util" -) - -// Common errors. -var ( - ErrNotFound = errors.ErrNotFound - ErrIterReleased = errors.New("leveldb/memdb: iterator released") -) - -const tMaxHeight = 12 - -type dbIter struct { - util.BasicReleaser - p *DB - slice *util.Range - node int - forward bool - key, value []byte - err error -} - -func (i *dbIter) fill(checkStart, checkLimit bool) bool { - if i.node != 0 { - n := i.p.nodeData[i.node] - m := n + i.p.nodeData[i.node+nKey] - i.key = i.p.kvData[n:m] - if i.slice != nil { - switch { - case checkLimit && i.slice.Limit != nil && i.p.cmp.Compare(i.key, i.slice.Limit) >= 0: - fallthrough - case checkStart && i.slice.Start != nil && i.p.cmp.Compare(i.key, i.slice.Start) < 0: - i.node = 0 - goto bail - } - } - i.value = i.p.kvData[m : m+i.p.nodeData[i.node+nVal]] - return true - } -bail: - i.key = nil - i.value = nil - return false -} - -func (i *dbIter) Valid() bool { - return i.node != 0 -} - -func (i *dbIter) First() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - i.forward = true - i.p.mu.RLock() - defer i.p.mu.RUnlock() - if i.slice != nil && i.slice.Start != nil { - i.node, _ = i.p.findGE(i.slice.Start, false) - } else { - i.node = i.p.nodeData[nNext] - } - return i.fill(false, true) -} - -func (i *dbIter) Last() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - i.forward = false - i.p.mu.RLock() - defer i.p.mu.RUnlock() - if i.slice != nil && i.slice.Limit != nil { - i.node = i.p.findLT(i.slice.Limit) - } else { - i.node = i.p.findLast() - } - return i.fill(true, false) -} - -func (i *dbIter) Seek(key []byte) bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - i.forward = true - i.p.mu.RLock() - defer i.p.mu.RUnlock() - if i.slice != nil && i.slice.Start != nil && i.p.cmp.Compare(key, i.slice.Start) < 0 { - key = i.slice.Start - } - i.node, _ = i.p.findGE(key, false) - return i.fill(false, true) -} - -func (i *dbIter) Next() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - if i.node == 0 { - if !i.forward { - return i.First() - } - return false - } - i.forward = true - i.p.mu.RLock() - defer i.p.mu.RUnlock() - i.node = i.p.nodeData[i.node+nNext] - return i.fill(false, true) -} - -func (i *dbIter) Prev() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - if i.node == 0 { - if i.forward { - return i.Last() - } - return false - } - i.forward = false - i.p.mu.RLock() - defer i.p.mu.RUnlock() - i.node = i.p.findLT(i.key) - return i.fill(true, false) -} - -func (i *dbIter) Key() []byte { - return i.key -} - -func (i *dbIter) Value() []byte { - return i.value -} - -func (i *dbIter) Error() error { return i.err } - -func (i *dbIter) Release() { - if !i.Released() { - i.p = nil - i.node = 0 - i.key = nil - i.value = nil - i.BasicReleaser.Release() - } -} - -const ( - nKV = iota - nKey - nVal - nHeight - nNext -) - -// DB is an in-memory key/value database. -type DB struct { - cmp comparer.BasicComparer - rnd *rand.Rand - - mu sync.RWMutex - kvData []byte - // Node data: - // [0] : KV offset - // [1] : Key length - // [2] : Value length - // [3] : Height - // [3..height] : Next nodes - nodeData []int - prevNode [tMaxHeight]int - maxHeight int - n int - kvSize int -} - -func (p *DB) randHeight() (h int) { - const branching = 4 - h = 1 - for h < tMaxHeight && p.rnd.Int()%branching == 0 { - h++ - } - return -} - -// Must hold RW-lock if prev == true, as it use shared prevNode slice. -func (p *DB) findGE(key []byte, prev bool) (int, bool) { - node := 0 - h := p.maxHeight - 1 - for { - next := p.nodeData[node+nNext+h] - cmp := 1 - if next != 0 { - o := p.nodeData[next] - cmp = p.cmp.Compare(p.kvData[o:o+p.nodeData[next+nKey]], key) - } - if cmp < 0 { - // Keep searching in this list - node = next - } else { - if prev { - p.prevNode[h] = node - } else if cmp == 0 { - return next, true - } - if h == 0 { - return next, cmp == 0 - } - h-- - } - } -} - -func (p *DB) findLT(key []byte) int { - node := 0 - h := p.maxHeight - 1 - for { - next := p.nodeData[node+nNext+h] - o := p.nodeData[next] - if next == 0 || p.cmp.Compare(p.kvData[o:o+p.nodeData[next+nKey]], key) >= 0 { - if h == 0 { - break - } - h-- - } else { - node = next - } - } - return node -} - -func (p *DB) findLast() int { - node := 0 - h := p.maxHeight - 1 - for { - next := p.nodeData[node+nNext+h] - if next == 0 { - if h == 0 { - break - } - h-- - } else { - node = next - } - } - return node -} - -// Put sets the value for the given key. It overwrites any previous value -// for that key; a DB is not a multi-map. -// -// It is safe to modify the contents of the arguments after Put returns. -func (p *DB) Put(key []byte, value []byte) error { - p.mu.Lock() - defer p.mu.Unlock() - - if node, exact := p.findGE(key, true); exact { - kvOffset := len(p.kvData) - p.kvData = append(p.kvData, key...) - p.kvData = append(p.kvData, value...) - p.nodeData[node] = kvOffset - m := p.nodeData[node+nVal] - p.nodeData[node+nVal] = len(value) - p.kvSize += len(value) - m - return nil - } - - h := p.randHeight() - if h > p.maxHeight { - for i := p.maxHeight; i < h; i++ { - p.prevNode[i] = 0 - } - p.maxHeight = h - } - - kvOffset := len(p.kvData) - p.kvData = append(p.kvData, key...) - p.kvData = append(p.kvData, value...) - // Node - node := len(p.nodeData) - p.nodeData = append(p.nodeData, kvOffset, len(key), len(value), h) - for i, n := range p.prevNode[:h] { - m := n + nNext + i - p.nodeData = append(p.nodeData, p.nodeData[m]) - p.nodeData[m] = node - } - - p.kvSize += len(key) + len(value) - p.n++ - return nil -} - -// Delete deletes the value for the given key. It returns ErrNotFound if -// the DB does not contain the key. -// -// It is safe to modify the contents of the arguments after Delete returns. -func (p *DB) Delete(key []byte) error { - p.mu.Lock() - defer p.mu.Unlock() - - node, exact := p.findGE(key, true) - if !exact { - return ErrNotFound - } - - h := p.nodeData[node+nHeight] - for i, n := range p.prevNode[:h] { - m := n + nNext + i - p.nodeData[m] = p.nodeData[p.nodeData[m]+nNext+i] - } - - p.kvSize -= p.nodeData[node+nKey] + p.nodeData[node+nVal] - p.n-- - return nil -} - -// Contains returns true if the given key are in the DB. -// -// It is safe to modify the contents of the arguments after Contains returns. -func (p *DB) Contains(key []byte) bool { - p.mu.RLock() - _, exact := p.findGE(key, false) - p.mu.RUnlock() - return exact -} - -// Get gets the value for the given key. It returns error.ErrNotFound if the -// DB does not contain the key. -// -// The caller should not modify the contents of the returned slice, but -// it is safe to modify the contents of the argument after Get returns. -func (p *DB) Get(key []byte) (value []byte, err error) { - p.mu.RLock() - if node, exact := p.findGE(key, false); exact { - o := p.nodeData[node] + p.nodeData[node+nKey] - value = p.kvData[o : o+p.nodeData[node+nVal]] - } else { - err = ErrNotFound - } - p.mu.RUnlock() - return -} - -// Find finds key/value pair whose key is greater than or equal to the -// given key. It returns ErrNotFound if the table doesn't contain -// such pair. -// -// The caller should not modify the contents of the returned slice, but -// it is safe to modify the contents of the argument after Find returns. -func (p *DB) Find(key []byte) (rkey, value []byte, err error) { - p.mu.RLock() - if node, _ := p.findGE(key, false); node != 0 { - n := p.nodeData[node] - m := n + p.nodeData[node+nKey] - rkey = p.kvData[n:m] - value = p.kvData[m : m+p.nodeData[node+nVal]] - } else { - err = ErrNotFound - } - p.mu.RUnlock() - return -} - -// NewIterator returns an iterator of the DB. -// The returned iterator is not safe for concurrent use, but it is safe to use -// multiple iterators concurrently, with each in a dedicated goroutine. -// It is also safe to use an iterator concurrently with modifying its -// underlying DB. However, the resultant key/value pairs are not guaranteed -// to be a consistent snapshot of the DB at a particular point in time. -// -// Slice allows slicing the iterator to only contains keys in the given -// range. A nil Range.Start is treated as a key before all keys in the -// DB. And a nil Range.Limit is treated as a key after all keys in -// the DB. -// -// WARNING: Any slice returned by interator (e.g. slice returned by calling -// Iterator.Key() or Iterator.Key() methods), its content should not be modified -// unless noted otherwise. -// -// The iterator must be released after use, by calling Release method. -// -// Also read Iterator documentation of the leveldb/iterator package. -func (p *DB) NewIterator(slice *util.Range) iterator.Iterator { - return &dbIter{p: p, slice: slice} -} - -// Capacity returns keys/values buffer capacity. -func (p *DB) Capacity() int { - p.mu.RLock() - defer p.mu.RUnlock() - return cap(p.kvData) -} - -// Size returns sum of keys and values length. Note that deleted -// key/value will not be accounted for, but it will still consume -// the buffer, since the buffer is append only. -func (p *DB) Size() int { - p.mu.RLock() - defer p.mu.RUnlock() - return p.kvSize -} - -// Free returns keys/values free buffer before need to grow. -func (p *DB) Free() int { - p.mu.RLock() - defer p.mu.RUnlock() - return cap(p.kvData) - len(p.kvData) -} - -// Len returns the number of entries in the DB. -func (p *DB) Len() int { - p.mu.RLock() - defer p.mu.RUnlock() - return p.n -} - -// Reset resets the DB to initial empty state. Allows reuse the buffer. -func (p *DB) Reset() { - p.mu.Lock() - p.rnd = rand.New(rand.NewSource(0xdeadbeef)) - p.maxHeight = 1 - p.n = 0 - p.kvSize = 0 - p.kvData = p.kvData[:0] - p.nodeData = p.nodeData[:nNext+tMaxHeight] - p.nodeData[nKV] = 0 - p.nodeData[nKey] = 0 - p.nodeData[nVal] = 0 - p.nodeData[nHeight] = tMaxHeight - for n := 0; n < tMaxHeight; n++ { - p.nodeData[nNext+n] = 0 - p.prevNode[n] = 0 - } - p.mu.Unlock() -} - -// New creates a new initialized in-memory key/value DB. The capacity -// is the initial key/value buffer capacity. The capacity is advisory, -// not enforced. -// -// This DB is append-only, deleting an entry would remove entry node but not -// reclaim KV buffer. -// -// The returned DB instance is safe for concurrent use. -func New(cmp comparer.BasicComparer, capacity int) *DB { - p := &DB{ - cmp: cmp, - rnd: rand.New(rand.NewSource(0xdeadbeef)), - maxHeight: 1, - kvData: make([]byte, 0, capacity), - nodeData: make([]int, 4+tMaxHeight), - } - p.nodeData[nHeight] = tMaxHeight - return p -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go b/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go deleted file mode 100644 index 528b1642..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go +++ /dev/null @@ -1,697 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package opt provides sets of options used by LevelDB. -package opt - -import ( - "math" - - "github.com/syndtr/goleveldb/leveldb/cache" - "github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/syndtr/goleveldb/leveldb/filter" -) - -const ( - KiB = 1024 - MiB = KiB * 1024 - GiB = MiB * 1024 -) - -var ( - DefaultBlockCacher = LRUCacher - DefaultBlockCacheCapacity = 8 * MiB - DefaultBlockRestartInterval = 16 - DefaultBlockSize = 4 * KiB - DefaultCompactionExpandLimitFactor = 25 - DefaultCompactionGPOverlapsFactor = 10 - DefaultCompactionL0Trigger = 4 - DefaultCompactionSourceLimitFactor = 1 - DefaultCompactionTableSize = 2 * MiB - DefaultCompactionTableSizeMultiplier = 1.0 - DefaultCompactionTotalSize = 10 * MiB - DefaultCompactionTotalSizeMultiplier = 10.0 - DefaultCompressionType = SnappyCompression - DefaultIteratorSamplingRate = 1 * MiB - DefaultOpenFilesCacher = LRUCacher - DefaultOpenFilesCacheCapacity = 500 - DefaultWriteBuffer = 4 * MiB - DefaultWriteL0PauseTrigger = 12 - DefaultWriteL0SlowdownTrigger = 8 -) - -// Cacher is a caching algorithm. -type Cacher interface { - New(capacity int) cache.Cacher -} - -type CacherFunc struct { - NewFunc func(capacity int) cache.Cacher -} - -func (f *CacherFunc) New(capacity int) cache.Cacher { - if f.NewFunc != nil { - return f.NewFunc(capacity) - } - return nil -} - -func noCacher(int) cache.Cacher { return nil } - -var ( - // LRUCacher is the LRU-cache algorithm. - LRUCacher = &CacherFunc{cache.NewLRU} - - // NoCacher is the value to disable caching algorithm. - NoCacher = &CacherFunc{} -) - -// Compression is the 'sorted table' block compression algorithm to use. -type Compression uint - -func (c Compression) String() string { - switch c { - case DefaultCompression: - return "default" - case NoCompression: - return "none" - case SnappyCompression: - return "snappy" - } - return "invalid" -} - -const ( - DefaultCompression Compression = iota - NoCompression - SnappyCompression - nCompression -) - -// Strict is the DB 'strict level'. -type Strict uint - -const ( - // If present then a corrupted or invalid chunk or block in manifest - // journal will cause an error instead of being dropped. - // This will prevent database with corrupted manifest to be opened. - StrictManifest Strict = 1 << iota - - // If present then journal chunk checksum will be verified. - StrictJournalChecksum - - // If present then a corrupted or invalid chunk or block in journal - // will cause an error instead of being dropped. - // This will prevent database with corrupted journal to be opened. - StrictJournal - - // If present then 'sorted table' block checksum will be verified. - // This has effect on both 'read operation' and compaction. - StrictBlockChecksum - - // If present then a corrupted 'sorted table' will fails compaction. - // The database will enter read-only mode. - StrictCompaction - - // If present then a corrupted 'sorted table' will halts 'read operation'. - StrictReader - - // If present then leveldb.Recover will drop corrupted 'sorted table'. - StrictRecovery - - // This only applicable for ReadOptions, if present then this ReadOptions - // 'strict level' will override global ones. - StrictOverride - - // StrictAll enables all strict flags. - StrictAll = StrictManifest | StrictJournalChecksum | StrictJournal | StrictBlockChecksum | StrictCompaction | StrictReader | StrictRecovery - - // DefaultStrict is the default strict flags. Specify any strict flags - // will override default strict flags as whole (i.e. not OR'ed). - DefaultStrict = StrictJournalChecksum | StrictBlockChecksum | StrictCompaction | StrictReader - - // NoStrict disables all strict flags. Override default strict flags. - NoStrict = ^StrictAll -) - -// Options holds the optional parameters for the DB at large. -type Options struct { - // AltFilters defines one or more 'alternative filters'. - // 'alternative filters' will be used during reads if a filter block - // does not match with the 'effective filter'. - // - // The default value is nil - AltFilters []filter.Filter - - // BlockCacher provides cache algorithm for LevelDB 'sorted table' block caching. - // Specify NoCacher to disable caching algorithm. - // - // The default value is LRUCacher. - BlockCacher Cacher - - // BlockCacheCapacity defines the capacity of the 'sorted table' block caching. - // Use -1 for zero, this has same effect as specifying NoCacher to BlockCacher. - // - // The default value is 8MiB. - BlockCacheCapacity int - - // BlockCacheEvictRemoved allows enable forced-eviction on cached block belonging - // to removed 'sorted table'. - // - // The default if false. - BlockCacheEvictRemoved bool - - // BlockRestartInterval is the number of keys between restart points for - // delta encoding of keys. - // - // The default value is 16. - BlockRestartInterval int - - // BlockSize is the minimum uncompressed size in bytes of each 'sorted table' - // block. - // - // The default value is 4KiB. - BlockSize int - - // CompactionExpandLimitFactor limits compaction size after expanded. - // This will be multiplied by table size limit at compaction target level. - // - // The default value is 25. - CompactionExpandLimitFactor int - - // CompactionGPOverlapsFactor limits overlaps in grandparent (Level + 2) that a - // single 'sorted table' generates. - // This will be multiplied by table size limit at grandparent level. - // - // The default value is 10. - CompactionGPOverlapsFactor int - - // CompactionL0Trigger defines number of 'sorted table' at level-0 that will - // trigger compaction. - // - // The default value is 4. - CompactionL0Trigger int - - // CompactionSourceLimitFactor limits compaction source size. This doesn't apply to - // level-0. - // This will be multiplied by table size limit at compaction target level. - // - // The default value is 1. - CompactionSourceLimitFactor int - - // CompactionTableSize limits size of 'sorted table' that compaction generates. - // The limits for each level will be calculated as: - // CompactionTableSize * (CompactionTableSizeMultiplier ^ Level) - // The multiplier for each level can also fine-tuned using CompactionTableSizeMultiplierPerLevel. - // - // The default value is 2MiB. - CompactionTableSize int - - // CompactionTableSizeMultiplier defines multiplier for CompactionTableSize. - // - // The default value is 1. - CompactionTableSizeMultiplier float64 - - // CompactionTableSizeMultiplierPerLevel defines per-level multiplier for - // CompactionTableSize. - // Use zero to skip a level. - // - // The default value is nil. - CompactionTableSizeMultiplierPerLevel []float64 - - // CompactionTotalSize limits total size of 'sorted table' for each level. - // The limits for each level will be calculated as: - // CompactionTotalSize * (CompactionTotalSizeMultiplier ^ Level) - // The multiplier for each level can also fine-tuned using - // CompactionTotalSizeMultiplierPerLevel. - // - // The default value is 10MiB. - CompactionTotalSize int - - // CompactionTotalSizeMultiplier defines multiplier for CompactionTotalSize. - // - // The default value is 10. - CompactionTotalSizeMultiplier float64 - - // CompactionTotalSizeMultiplierPerLevel defines per-level multiplier for - // CompactionTotalSize. - // Use zero to skip a level. - // - // The default value is nil. - CompactionTotalSizeMultiplierPerLevel []float64 - - // Comparer defines a total ordering over the space of []byte keys: a 'less - // than' relationship. The same comparison algorithm must be used for reads - // and writes over the lifetime of the DB. - // - // The default value uses the same ordering as bytes.Compare. - Comparer comparer.Comparer - - // Compression defines the 'sorted table' block compression to use. - // - // The default value (DefaultCompression) uses snappy compression. - Compression Compression - - // DisableBufferPool allows disable use of util.BufferPool functionality. - // - // The default value is false. - DisableBufferPool bool - - // DisableBlockCache allows disable use of cache.Cache functionality on - // 'sorted table' block. - // - // The default value is false. - DisableBlockCache bool - - // DisableCompactionBackoff allows disable compaction retry backoff. - // - // The default value is false. - DisableCompactionBackoff bool - - // DisableLargeBatchTransaction allows disabling switch-to-transaction mode - // on large batch write. If enable batch writes large than WriteBuffer will - // use transaction. - // - // The default is false. - DisableLargeBatchTransaction bool - - // ErrorIfExist defines whether an error should returned if the DB already - // exist. - // - // The default value is false. - ErrorIfExist bool - - // ErrorIfMissing defines whether an error should returned if the DB is - // missing. If false then the database will be created if missing, otherwise - // an error will be returned. - // - // The default value is false. - ErrorIfMissing bool - - // Filter defines an 'effective filter' to use. An 'effective filter' - // if defined will be used to generate per-table filter block. - // The filter name will be stored on disk. - // During reads LevelDB will try to find matching filter from - // 'effective filter' and 'alternative filters'. - // - // Filter can be changed after a DB has been created. It is recommended - // to put old filter to the 'alternative filters' to mitigate lack of - // filter during transition period. - // - // A filter is used to reduce disk reads when looking for a specific key. - // - // The default value is nil. - Filter filter.Filter - - // IteratorSamplingRate defines approximate gap (in bytes) between read - // sampling of an iterator. The samples will be used to determine when - // compaction should be triggered. - // - // The default is 1MiB. - IteratorSamplingRate int - - // NoSync allows completely disable fsync. - // - // The default is false. - NoSync bool - - // NoWriteMerge allows disabling write merge. - // - // The default is false. - NoWriteMerge bool - - // OpenFilesCacher provides cache algorithm for open files caching. - // Specify NoCacher to disable caching algorithm. - // - // The default value is LRUCacher. - OpenFilesCacher Cacher - - // OpenFilesCacheCapacity defines the capacity of the open files caching. - // Use -1 for zero, this has same effect as specifying NoCacher to OpenFilesCacher. - // - // The default value is 500. - OpenFilesCacheCapacity int - - // If true then opens DB in read-only mode. - // - // The default value is false. - ReadOnly bool - - // Strict defines the DB strict level. - Strict Strict - - // WriteBuffer defines maximum size of a 'memdb' before flushed to - // 'sorted table'. 'memdb' is an in-memory DB backed by an on-disk - // unsorted journal. - // - // LevelDB may held up to two 'memdb' at the same time. - // - // The default value is 4MiB. - WriteBuffer int - - // WriteL0StopTrigger defines number of 'sorted table' at level-0 that will - // pause write. - // - // The default value is 12. - WriteL0PauseTrigger int - - // WriteL0SlowdownTrigger defines number of 'sorted table' at level-0 that - // will trigger write slowdown. - // - // The default value is 8. - WriteL0SlowdownTrigger int -} - -func (o *Options) GetAltFilters() []filter.Filter { - if o == nil { - return nil - } - return o.AltFilters -} - -func (o *Options) GetBlockCacher() Cacher { - if o == nil || o.BlockCacher == nil { - return DefaultBlockCacher - } else if o.BlockCacher == NoCacher { - return nil - } - return o.BlockCacher -} - -func (o *Options) GetBlockCacheCapacity() int { - if o == nil || o.BlockCacheCapacity == 0 { - return DefaultBlockCacheCapacity - } else if o.BlockCacheCapacity < 0 { - return 0 - } - return o.BlockCacheCapacity -} - -func (o *Options) GetBlockCacheEvictRemoved() bool { - if o == nil { - return false - } - return o.BlockCacheEvictRemoved -} - -func (o *Options) GetBlockRestartInterval() int { - if o == nil || o.BlockRestartInterval <= 0 { - return DefaultBlockRestartInterval - } - return o.BlockRestartInterval -} - -func (o *Options) GetBlockSize() int { - if o == nil || o.BlockSize <= 0 { - return DefaultBlockSize - } - return o.BlockSize -} - -func (o *Options) GetCompactionExpandLimit(level int) int { - factor := DefaultCompactionExpandLimitFactor - if o != nil && o.CompactionExpandLimitFactor > 0 { - factor = o.CompactionExpandLimitFactor - } - return o.GetCompactionTableSize(level+1) * factor -} - -func (o *Options) GetCompactionGPOverlaps(level int) int { - factor := DefaultCompactionGPOverlapsFactor - if o != nil && o.CompactionGPOverlapsFactor > 0 { - factor = o.CompactionGPOverlapsFactor - } - return o.GetCompactionTableSize(level+2) * factor -} - -func (o *Options) GetCompactionL0Trigger() int { - if o == nil || o.CompactionL0Trigger == 0 { - return DefaultCompactionL0Trigger - } - return o.CompactionL0Trigger -} - -func (o *Options) GetCompactionSourceLimit(level int) int { - factor := DefaultCompactionSourceLimitFactor - if o != nil && o.CompactionSourceLimitFactor > 0 { - factor = o.CompactionSourceLimitFactor - } - return o.GetCompactionTableSize(level+1) * factor -} - -func (o *Options) GetCompactionTableSize(level int) int { - var ( - base = DefaultCompactionTableSize - mult float64 - ) - if o != nil { - if o.CompactionTableSize > 0 { - base = o.CompactionTableSize - } - if level < len(o.CompactionTableSizeMultiplierPerLevel) && o.CompactionTableSizeMultiplierPerLevel[level] > 0 { - mult = o.CompactionTableSizeMultiplierPerLevel[level] - } else if o.CompactionTableSizeMultiplier > 0 { - mult = math.Pow(o.CompactionTableSizeMultiplier, float64(level)) - } - } - if mult == 0 { - mult = math.Pow(DefaultCompactionTableSizeMultiplier, float64(level)) - } - return int(float64(base) * mult) -} - -func (o *Options) GetCompactionTotalSize(level int) int64 { - var ( - base = DefaultCompactionTotalSize - mult float64 - ) - if o != nil { - if o.CompactionTotalSize > 0 { - base = o.CompactionTotalSize - } - if level < len(o.CompactionTotalSizeMultiplierPerLevel) && o.CompactionTotalSizeMultiplierPerLevel[level] > 0 { - mult = o.CompactionTotalSizeMultiplierPerLevel[level] - } else if o.CompactionTotalSizeMultiplier > 0 { - mult = math.Pow(o.CompactionTotalSizeMultiplier, float64(level)) - } - } - if mult == 0 { - mult = math.Pow(DefaultCompactionTotalSizeMultiplier, float64(level)) - } - return int64(float64(base) * mult) -} - -func (o *Options) GetComparer() comparer.Comparer { - if o == nil || o.Comparer == nil { - return comparer.DefaultComparer - } - return o.Comparer -} - -func (o *Options) GetCompression() Compression { - if o == nil || o.Compression <= DefaultCompression || o.Compression >= nCompression { - return DefaultCompressionType - } - return o.Compression -} - -func (o *Options) GetDisableBufferPool() bool { - if o == nil { - return false - } - return o.DisableBufferPool -} - -func (o *Options) GetDisableBlockCache() bool { - if o == nil { - return false - } - return o.DisableBlockCache -} - -func (o *Options) GetDisableCompactionBackoff() bool { - if o == nil { - return false - } - return o.DisableCompactionBackoff -} - -func (o *Options) GetDisableLargeBatchTransaction() bool { - if o == nil { - return false - } - return o.DisableLargeBatchTransaction -} - -func (o *Options) GetErrorIfExist() bool { - if o == nil { - return false - } - return o.ErrorIfExist -} - -func (o *Options) GetErrorIfMissing() bool { - if o == nil { - return false - } - return o.ErrorIfMissing -} - -func (o *Options) GetFilter() filter.Filter { - if o == nil { - return nil - } - return o.Filter -} - -func (o *Options) GetIteratorSamplingRate() int { - if o == nil || o.IteratorSamplingRate <= 0 { - return DefaultIteratorSamplingRate - } - return o.IteratorSamplingRate -} - -func (o *Options) GetNoSync() bool { - if o == nil { - return false - } - return o.NoSync -} - -func (o *Options) GetNoWriteMerge() bool { - if o == nil { - return false - } - return o.NoWriteMerge -} - -func (o *Options) GetOpenFilesCacher() Cacher { - if o == nil || o.OpenFilesCacher == nil { - return DefaultOpenFilesCacher - } - if o.OpenFilesCacher == NoCacher { - return nil - } - return o.OpenFilesCacher -} - -func (o *Options) GetOpenFilesCacheCapacity() int { - if o == nil || o.OpenFilesCacheCapacity == 0 { - return DefaultOpenFilesCacheCapacity - } else if o.OpenFilesCacheCapacity < 0 { - return 0 - } - return o.OpenFilesCacheCapacity -} - -func (o *Options) GetReadOnly() bool { - if o == nil { - return false - } - return o.ReadOnly -} - -func (o *Options) GetStrict(strict Strict) bool { - if o == nil || o.Strict == 0 { - return DefaultStrict&strict != 0 - } - return o.Strict&strict != 0 -} - -func (o *Options) GetWriteBuffer() int { - if o == nil || o.WriteBuffer <= 0 { - return DefaultWriteBuffer - } - return o.WriteBuffer -} - -func (o *Options) GetWriteL0PauseTrigger() int { - if o == nil || o.WriteL0PauseTrigger == 0 { - return DefaultWriteL0PauseTrigger - } - return o.WriteL0PauseTrigger -} - -func (o *Options) GetWriteL0SlowdownTrigger() int { - if o == nil || o.WriteL0SlowdownTrigger == 0 { - return DefaultWriteL0SlowdownTrigger - } - return o.WriteL0SlowdownTrigger -} - -// ReadOptions holds the optional parameters for 'read operation'. The -// 'read operation' includes Get, Find and NewIterator. -type ReadOptions struct { - // DontFillCache defines whether block reads for this 'read operation' - // should be cached. If false then the block will be cached. This does - // not affects already cached block. - // - // The default value is false. - DontFillCache bool - - // Strict will be OR'ed with global DB 'strict level' unless StrictOverride - // is present. Currently only StrictReader that has effect here. - Strict Strict -} - -func (ro *ReadOptions) GetDontFillCache() bool { - if ro == nil { - return false - } - return ro.DontFillCache -} - -func (ro *ReadOptions) GetStrict(strict Strict) bool { - if ro == nil { - return false - } - return ro.Strict&strict != 0 -} - -// WriteOptions holds the optional parameters for 'write operation'. The -// 'write operation' includes Write, Put and Delete. -type WriteOptions struct { - // NoWriteMerge allows disabling write merge. - // - // The default is false. - NoWriteMerge bool - - // Sync is whether to sync underlying writes from the OS buffer cache - // through to actual disk, if applicable. Setting Sync can result in - // slower writes. - // - // If false, and the machine crashes, then some recent writes may be lost. - // Note that if it is just the process that crashes (and the machine does - // not) then no writes will be lost. - // - // In other words, Sync being false has the same semantics as a write - // system call. Sync being true means write followed by fsync. - // - // The default value is false. - Sync bool -} - -func (wo *WriteOptions) GetNoWriteMerge() bool { - if wo == nil { - return false - } - return wo.NoWriteMerge -} - -func (wo *WriteOptions) GetSync() bool { - if wo == nil { - return false - } - return wo.Sync -} - -func GetStrict(o *Options, ro *ReadOptions, strict Strict) bool { - if ro.GetStrict(StrictOverride) { - return ro.GetStrict(strict) - } else { - return o.GetStrict(strict) || ro.GetStrict(strict) - } -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/options.go b/vendor/github.com/syndtr/goleveldb/leveldb/options.go deleted file mode 100644 index b072b1ac..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/options.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "github.com/syndtr/goleveldb/leveldb/filter" - "github.com/syndtr/goleveldb/leveldb/opt" -) - -func dupOptions(o *opt.Options) *opt.Options { - newo := &opt.Options{} - if o != nil { - *newo = *o - } - if newo.Strict == 0 { - newo.Strict = opt.DefaultStrict - } - return newo -} - -func (s *session) setOptions(o *opt.Options) { - no := dupOptions(o) - // Alternative filters. - if filters := o.GetAltFilters(); len(filters) > 0 { - no.AltFilters = make([]filter.Filter, len(filters)) - for i, filter := range filters { - no.AltFilters[i] = &iFilter{filter} - } - } - // Comparer. - s.icmp = &iComparer{o.GetComparer()} - no.Comparer = s.icmp - // Filter. - if filter := o.GetFilter(); filter != nil { - no.Filter = &iFilter{filter} - } - - s.o = &cachedOptions{Options: no} - s.o.cache() -} - -const optCachedLevel = 7 - -type cachedOptions struct { - *opt.Options - - compactionExpandLimit []int - compactionGPOverlaps []int - compactionSourceLimit []int - compactionTableSize []int - compactionTotalSize []int64 -} - -func (co *cachedOptions) cache() { - co.compactionExpandLimit = make([]int, optCachedLevel) - co.compactionGPOverlaps = make([]int, optCachedLevel) - co.compactionSourceLimit = make([]int, optCachedLevel) - co.compactionTableSize = make([]int, optCachedLevel) - co.compactionTotalSize = make([]int64, optCachedLevel) - - for level := 0; level < optCachedLevel; level++ { - co.compactionExpandLimit[level] = co.Options.GetCompactionExpandLimit(level) - co.compactionGPOverlaps[level] = co.Options.GetCompactionGPOverlaps(level) - co.compactionSourceLimit[level] = co.Options.GetCompactionSourceLimit(level) - co.compactionTableSize[level] = co.Options.GetCompactionTableSize(level) - co.compactionTotalSize[level] = co.Options.GetCompactionTotalSize(level) - } -} - -func (co *cachedOptions) GetCompactionExpandLimit(level int) int { - if level < optCachedLevel { - return co.compactionExpandLimit[level] - } - return co.Options.GetCompactionExpandLimit(level) -} - -func (co *cachedOptions) GetCompactionGPOverlaps(level int) int { - if level < optCachedLevel { - return co.compactionGPOverlaps[level] - } - return co.Options.GetCompactionGPOverlaps(level) -} - -func (co *cachedOptions) GetCompactionSourceLimit(level int) int { - if level < optCachedLevel { - return co.compactionSourceLimit[level] - } - return co.Options.GetCompactionSourceLimit(level) -} - -func (co *cachedOptions) GetCompactionTableSize(level int) int { - if level < optCachedLevel { - return co.compactionTableSize[level] - } - return co.Options.GetCompactionTableSize(level) -} - -func (co *cachedOptions) GetCompactionTotalSize(level int) int64 { - if level < optCachedLevel { - return co.compactionTotalSize[level] - } - return co.Options.GetCompactionTotalSize(level) -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session.go b/vendor/github.com/syndtr/goleveldb/leveldb/session.go deleted file mode 100644 index 3f391f93..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/session.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "fmt" - "io" - "os" - "sync" - - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/journal" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" -) - -// ErrManifestCorrupted records manifest corruption. This error will be -// wrapped with errors.ErrCorrupted. -type ErrManifestCorrupted struct { - Field string - Reason string -} - -func (e *ErrManifestCorrupted) Error() string { - return fmt.Sprintf("leveldb: manifest corrupted (field '%s'): %s", e.Field, e.Reason) -} - -func newErrManifestCorrupted(fd storage.FileDesc, field, reason string) error { - return errors.NewErrCorrupted(fd, &ErrManifestCorrupted{field, reason}) -} - -// session represent a persistent database session. -type session struct { - // Need 64-bit alignment. - stNextFileNum int64 // current unused file number - stJournalNum int64 // current journal file number; need external synchronization - stPrevJournalNum int64 // prev journal file number; no longer used; for compatibility with older version of leveldb - stTempFileNum int64 - stSeqNum uint64 // last mem compacted seq; need external synchronization - - stor *iStorage - storLock storage.Locker - o *cachedOptions - icmp *iComparer - tops *tOps - fileRef map[int64]int - - manifest *journal.Writer - manifestWriter storage.Writer - manifestFd storage.FileDesc - - stCompPtrs []internalKey // compaction pointers; need external synchronization - stVersion *version // current version - vmu sync.Mutex -} - -// Creates new initialized session instance. -func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) { - if stor == nil { - return nil, os.ErrInvalid - } - storLock, err := stor.Lock() - if err != nil { - return - } - s = &session{ - stor: newIStorage(stor), - storLock: storLock, - fileRef: make(map[int64]int), - } - s.setOptions(o) - s.tops = newTableOps(s) - s.setVersion(newVersion(s)) - s.log("log@legend F·NumFile S·FileSize N·Entry C·BadEntry B·BadBlock Ke·KeyError D·DroppedEntry L·Level Q·SeqNum T·TimeElapsed") - return -} - -// Close session. -func (s *session) close() { - s.tops.close() - if s.manifest != nil { - s.manifest.Close() - } - if s.manifestWriter != nil { - s.manifestWriter.Close() - } - s.manifest = nil - s.manifestWriter = nil - s.setVersion(&version{s: s, closing: true}) -} - -// Release session lock. -func (s *session) release() { - s.storLock.Unlock() -} - -// Create a new database session; need external synchronization. -func (s *session) create() error { - // create manifest - return s.newManifest(nil, nil) -} - -// Recover a database session; need external synchronization. -func (s *session) recover() (err error) { - defer func() { - if os.IsNotExist(err) { - // Don't return os.ErrNotExist if the underlying storage contains - // other files that belong to LevelDB. So the DB won't get trashed. - if fds, _ := s.stor.List(storage.TypeAll); len(fds) > 0 { - err = &errors.ErrCorrupted{Fd: storage.FileDesc{Type: storage.TypeManifest}, Err: &errors.ErrMissingFiles{}} - } - } - }() - - fd, err := s.stor.GetMeta() - if err != nil { - return - } - - reader, err := s.stor.Open(fd) - if err != nil { - return - } - defer reader.Close() - - var ( - // Options. - strict = s.o.GetStrict(opt.StrictManifest) - - jr = journal.NewReader(reader, dropper{s, fd}, strict, true) - rec = &sessionRecord{} - staging = s.stVersion.newStaging() - ) - for { - var r io.Reader - r, err = jr.Next() - if err != nil { - if err == io.EOF { - err = nil - break - } - return errors.SetFd(err, fd) - } - - err = rec.decode(r) - if err == nil { - // save compact pointers - for _, r := range rec.compPtrs { - s.setCompPtr(r.level, internalKey(r.ikey)) - } - // commit record to version staging - staging.commit(rec) - } else { - err = errors.SetFd(err, fd) - if strict || !errors.IsCorrupted(err) { - return - } - s.logf("manifest error: %v (skipped)", errors.SetFd(err, fd)) - } - rec.resetCompPtrs() - rec.resetAddedTables() - rec.resetDeletedTables() - } - - switch { - case !rec.has(recComparer): - return newErrManifestCorrupted(fd, "comparer", "missing") - case rec.comparer != s.icmp.uName(): - return newErrManifestCorrupted(fd, "comparer", fmt.Sprintf("mismatch: want '%s', got '%s'", s.icmp.uName(), rec.comparer)) - case !rec.has(recNextFileNum): - return newErrManifestCorrupted(fd, "next-file-num", "missing") - case !rec.has(recJournalNum): - return newErrManifestCorrupted(fd, "journal-file-num", "missing") - case !rec.has(recSeqNum): - return newErrManifestCorrupted(fd, "seq-num", "missing") - } - - s.manifestFd = fd - s.setVersion(staging.finish()) - s.setNextFileNum(rec.nextFileNum) - s.recordCommited(rec) - return nil -} - -// Commit session; need external synchronization. -func (s *session) commit(r *sessionRecord) (err error) { - v := s.version() - defer v.release() - - // spawn new version based on current version - nv := v.spawn(r) - - if s.manifest == nil { - // manifest journal writer not yet created, create one - err = s.newManifest(r, nv) - } else { - err = s.flushManifest(r) - } - - // finally, apply new version if no error rise - if err == nil { - s.setVersion(nv) - } - - return -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go b/vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go deleted file mode 100644 index 089cd00b..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go +++ /dev/null @@ -1,302 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "sync/atomic" - - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/memdb" - "github.com/syndtr/goleveldb/leveldb/opt" -) - -func (s *session) pickMemdbLevel(umin, umax []byte, maxLevel int) int { - v := s.version() - defer v.release() - return v.pickMemdbLevel(umin, umax, maxLevel) -} - -func (s *session) flushMemdb(rec *sessionRecord, mdb *memdb.DB, maxLevel int) (int, error) { - // Create sorted table. - iter := mdb.NewIterator(nil) - defer iter.Release() - t, n, err := s.tops.createFrom(iter) - if err != nil { - return 0, err - } - - // Pick level other than zero can cause compaction issue with large - // bulk insert and delete on strictly incrementing key-space. The - // problem is that the small deletion markers trapped at lower level, - // while key/value entries keep growing at higher level. Since the - // key-space is strictly incrementing it will not overlaps with - // higher level, thus maximum possible level is always picked, while - // overlapping deletion marker pushed into lower level. - // See: https://github.com/syndtr/goleveldb/issues/127. - flushLevel := s.pickMemdbLevel(t.imin.ukey(), t.imax.ukey(), maxLevel) - rec.addTableFile(flushLevel, t) - - s.logf("memdb@flush created L%d@%d N·%d S·%s %q:%q", flushLevel, t.fd.Num, n, shortenb(int(t.size)), t.imin, t.imax) - return flushLevel, nil -} - -// Pick a compaction based on current state; need external synchronization. -func (s *session) pickCompaction() *compaction { - v := s.version() - - var sourceLevel int - var t0 tFiles - if v.cScore >= 1 { - sourceLevel = v.cLevel - cptr := s.getCompPtr(sourceLevel) - tables := v.levels[sourceLevel] - for _, t := range tables { - if cptr == nil || s.icmp.Compare(t.imax, cptr) > 0 { - t0 = append(t0, t) - break - } - } - if len(t0) == 0 { - t0 = append(t0, tables[0]) - } - } else { - if p := atomic.LoadPointer(&v.cSeek); p != nil { - ts := (*tSet)(p) - sourceLevel = ts.level - t0 = append(t0, ts.table) - } else { - v.release() - return nil - } - } - - return newCompaction(s, v, sourceLevel, t0) -} - -// Create compaction from given level and range; need external synchronization. -func (s *session) getCompactionRange(sourceLevel int, umin, umax []byte, noLimit bool) *compaction { - v := s.version() - - if sourceLevel >= len(v.levels) { - v.release() - return nil - } - - t0 := v.levels[sourceLevel].getOverlaps(nil, s.icmp, umin, umax, sourceLevel == 0) - if len(t0) == 0 { - v.release() - return nil - } - - // Avoid compacting too much in one shot in case the range is large. - // But we cannot do this for level-0 since level-0 files can overlap - // and we must not pick one file and drop another older file if the - // two files overlap. - if !noLimit && sourceLevel > 0 { - limit := int64(v.s.o.GetCompactionSourceLimit(sourceLevel)) - total := int64(0) - for i, t := range t0 { - total += t.size - if total >= limit { - s.logf("table@compaction limiting F·%d -> F·%d", len(t0), i+1) - t0 = t0[:i+1] - break - } - } - } - - return newCompaction(s, v, sourceLevel, t0) -} - -func newCompaction(s *session, v *version, sourceLevel int, t0 tFiles) *compaction { - c := &compaction{ - s: s, - v: v, - sourceLevel: sourceLevel, - levels: [2]tFiles{t0, nil}, - maxGPOverlaps: int64(s.o.GetCompactionGPOverlaps(sourceLevel)), - tPtrs: make([]int, len(v.levels)), - } - c.expand() - c.save() - return c -} - -// compaction represent a compaction state. -type compaction struct { - s *session - v *version - - sourceLevel int - levels [2]tFiles - maxGPOverlaps int64 - - gp tFiles - gpi int - seenKey bool - gpOverlappedBytes int64 - imin, imax internalKey - tPtrs []int - released bool - - snapGPI int - snapSeenKey bool - snapGPOverlappedBytes int64 - snapTPtrs []int -} - -func (c *compaction) save() { - c.snapGPI = c.gpi - c.snapSeenKey = c.seenKey - c.snapGPOverlappedBytes = c.gpOverlappedBytes - c.snapTPtrs = append(c.snapTPtrs[:0], c.tPtrs...) -} - -func (c *compaction) restore() { - c.gpi = c.snapGPI - c.seenKey = c.snapSeenKey - c.gpOverlappedBytes = c.snapGPOverlappedBytes - c.tPtrs = append(c.tPtrs[:0], c.snapTPtrs...) -} - -func (c *compaction) release() { - if !c.released { - c.released = true - c.v.release() - } -} - -// Expand compacted tables; need external synchronization. -func (c *compaction) expand() { - limit := int64(c.s.o.GetCompactionExpandLimit(c.sourceLevel)) - vt0 := c.v.levels[c.sourceLevel] - vt1 := tFiles{} - if level := c.sourceLevel + 1; level < len(c.v.levels) { - vt1 = c.v.levels[level] - } - - t0, t1 := c.levels[0], c.levels[1] - imin, imax := t0.getRange(c.s.icmp) - // We expand t0 here just incase ukey hop across tables. - t0 = vt0.getOverlaps(t0, c.s.icmp, imin.ukey(), imax.ukey(), c.sourceLevel == 0) - if len(t0) != len(c.levels[0]) { - imin, imax = t0.getRange(c.s.icmp) - } - t1 = vt1.getOverlaps(t1, c.s.icmp, imin.ukey(), imax.ukey(), false) - // Get entire range covered by compaction. - amin, amax := append(t0, t1...).getRange(c.s.icmp) - - // See if we can grow the number of inputs in "sourceLevel" without - // changing the number of "sourceLevel+1" files we pick up. - if len(t1) > 0 { - exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), c.sourceLevel == 0) - if len(exp0) > len(t0) && t1.size()+exp0.size() < limit { - xmin, xmax := exp0.getRange(c.s.icmp) - exp1 := vt1.getOverlaps(nil, c.s.icmp, xmin.ukey(), xmax.ukey(), false) - if len(exp1) == len(t1) { - c.s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)", - c.sourceLevel, c.sourceLevel+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())), - len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size()))) - imin, imax = xmin, xmax - t0, t1 = exp0, exp1 - amin, amax = append(t0, t1...).getRange(c.s.icmp) - } - } - } - - // Compute the set of grandparent files that overlap this compaction - // (parent == sourceLevel+1; grandparent == sourceLevel+2) - if level := c.sourceLevel + 2; level < len(c.v.levels) { - c.gp = c.v.levels[level].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false) - } - - c.levels[0], c.levels[1] = t0, t1 - c.imin, c.imax = imin, imax -} - -// Check whether compaction is trivial. -func (c *compaction) trivial() bool { - return len(c.levels[0]) == 1 && len(c.levels[1]) == 0 && c.gp.size() <= c.maxGPOverlaps -} - -func (c *compaction) baseLevelForKey(ukey []byte) bool { - for level := c.sourceLevel + 2; level < len(c.v.levels); level++ { - tables := c.v.levels[level] - for c.tPtrs[level] < len(tables) { - t := tables[c.tPtrs[level]] - if c.s.icmp.uCompare(ukey, t.imax.ukey()) <= 0 { - // We've advanced far enough. - if c.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 { - // Key falls in this file's range, so definitely not base level. - return false - } - break - } - c.tPtrs[level]++ - } - } - return true -} - -func (c *compaction) shouldStopBefore(ikey internalKey) bool { - for ; c.gpi < len(c.gp); c.gpi++ { - gp := c.gp[c.gpi] - if c.s.icmp.Compare(ikey, gp.imax) <= 0 { - break - } - if c.seenKey { - c.gpOverlappedBytes += gp.size - } - } - c.seenKey = true - - if c.gpOverlappedBytes > c.maxGPOverlaps { - // Too much overlap for current output; start new output. - c.gpOverlappedBytes = 0 - return true - } - return false -} - -// Creates an iterator. -func (c *compaction) newIterator() iterator.Iterator { - // Creates iterator slice. - icap := len(c.levels) - if c.sourceLevel == 0 { - // Special case for level-0. - icap = len(c.levels[0]) + 1 - } - its := make([]iterator.Iterator, 0, icap) - - // Options. - ro := &opt.ReadOptions{ - DontFillCache: true, - Strict: opt.StrictOverride, - } - strict := c.s.o.GetStrict(opt.StrictCompaction) - if strict { - ro.Strict |= opt.StrictReader - } - - for i, tables := range c.levels { - if len(tables) == 0 { - continue - } - - // Level-0 is not sorted and may overlaps each other. - if c.sourceLevel+i == 0 { - for _, t := range tables { - its = append(its, c.s.tops.newIterator(t, nil, ro)) - } - } else { - it := iterator.NewIndexedIterator(tables.newIndexIterator(c.s.tops, c.s.icmp, nil, ro), strict) - its = append(its, it) - } - } - - return iterator.NewMergedIterator(its, c.s.icmp, strict) -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session_record.go b/vendor/github.com/syndtr/goleveldb/leveldb/session_record.go deleted file mode 100644 index 854e1aa6..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/session_record.go +++ /dev/null @@ -1,323 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bufio" - "encoding/binary" - "io" - "strings" - - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/storage" -) - -type byteReader interface { - io.Reader - io.ByteReader -} - -// These numbers are written to disk and should not be changed. -const ( - recComparer = 1 - recJournalNum = 2 - recNextFileNum = 3 - recSeqNum = 4 - recCompPtr = 5 - recDelTable = 6 - recAddTable = 7 - // 8 was used for large value refs - recPrevJournalNum = 9 -) - -type cpRecord struct { - level int - ikey internalKey -} - -type atRecord struct { - level int - num int64 - size int64 - imin internalKey - imax internalKey -} - -type dtRecord struct { - level int - num int64 -} - -type sessionRecord struct { - hasRec int - comparer string - journalNum int64 - prevJournalNum int64 - nextFileNum int64 - seqNum uint64 - compPtrs []cpRecord - addedTables []atRecord - deletedTables []dtRecord - - scratch [binary.MaxVarintLen64]byte - err error -} - -func (p *sessionRecord) has(rec int) bool { - return p.hasRec&(1< -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "fmt" - "sync/atomic" - - "github.com/syndtr/goleveldb/leveldb/journal" - "github.com/syndtr/goleveldb/leveldb/storage" -) - -// Logging. - -type dropper struct { - s *session - fd storage.FileDesc -} - -func (d dropper) Drop(err error) { - if e, ok := err.(*journal.ErrCorrupted); ok { - d.s.logf("journal@drop %s-%d S·%s %q", d.fd.Type, d.fd.Num, shortenb(e.Size), e.Reason) - } else { - d.s.logf("journal@drop %s-%d %q", d.fd.Type, d.fd.Num, err) - } -} - -func (s *session) log(v ...interface{}) { s.stor.Log(fmt.Sprint(v...)) } -func (s *session) logf(format string, v ...interface{}) { s.stor.Log(fmt.Sprintf(format, v...)) } - -// File utils. - -func (s *session) newTemp() storage.FileDesc { - num := atomic.AddInt64(&s.stTempFileNum, 1) - 1 - return storage.FileDesc{Type: storage.TypeTemp, Num: num} -} - -func (s *session) addFileRef(fd storage.FileDesc, ref int) int { - ref += s.fileRef[fd.Num] - if ref > 0 { - s.fileRef[fd.Num] = ref - } else if ref == 0 { - delete(s.fileRef, fd.Num) - } else { - panic(fmt.Sprintf("negative ref: %v", fd)) - } - return ref -} - -// Session state. - -// Get current version. This will incr version ref, must call -// version.release (exactly once) after use. -func (s *session) version() *version { - s.vmu.Lock() - defer s.vmu.Unlock() - s.stVersion.incref() - return s.stVersion -} - -func (s *session) tLen(level int) int { - s.vmu.Lock() - defer s.vmu.Unlock() - return s.stVersion.tLen(level) -} - -// Set current version to v. -func (s *session) setVersion(v *version) { - s.vmu.Lock() - defer s.vmu.Unlock() - // Hold by session. It is important to call this first before releasing - // current version, otherwise the still used files might get released. - v.incref() - if s.stVersion != nil { - // Release current version. - s.stVersion.releaseNB() - } - s.stVersion = v -} - -// Get current unused file number. -func (s *session) nextFileNum() int64 { - return atomic.LoadInt64(&s.stNextFileNum) -} - -// Set current unused file number to num. -func (s *session) setNextFileNum(num int64) { - atomic.StoreInt64(&s.stNextFileNum, num) -} - -// Mark file number as used. -func (s *session) markFileNum(num int64) { - nextFileNum := num + 1 - for { - old, x := s.stNextFileNum, nextFileNum - if old > x { - x = old - } - if atomic.CompareAndSwapInt64(&s.stNextFileNum, old, x) { - break - } - } -} - -// Allocate a file number. -func (s *session) allocFileNum() int64 { - return atomic.AddInt64(&s.stNextFileNum, 1) - 1 -} - -// Reuse given file number. -func (s *session) reuseFileNum(num int64) { - for { - old, x := s.stNextFileNum, num - if old != x+1 { - x = old - } - if atomic.CompareAndSwapInt64(&s.stNextFileNum, old, x) { - break - } - } -} - -// Set compaction ptr at given level; need external synchronization. -func (s *session) setCompPtr(level int, ik internalKey) { - if level >= len(s.stCompPtrs) { - newCompPtrs := make([]internalKey, level+1) - copy(newCompPtrs, s.stCompPtrs) - s.stCompPtrs = newCompPtrs - } - s.stCompPtrs[level] = append(internalKey{}, ik...) -} - -// Get compaction ptr at given level; need external synchronization. -func (s *session) getCompPtr(level int) internalKey { - if level >= len(s.stCompPtrs) { - return nil - } - return s.stCompPtrs[level] -} - -// Manifest related utils. - -// Fill given session record obj with current states; need external -// synchronization. -func (s *session) fillRecord(r *sessionRecord, snapshot bool) { - r.setNextFileNum(s.nextFileNum()) - - if snapshot { - if !r.has(recJournalNum) { - r.setJournalNum(s.stJournalNum) - } - - if !r.has(recSeqNum) { - r.setSeqNum(s.stSeqNum) - } - - for level, ik := range s.stCompPtrs { - if ik != nil { - r.addCompPtr(level, ik) - } - } - - r.setComparer(s.icmp.uName()) - } -} - -// Mark if record has been committed, this will update session state; -// need external synchronization. -func (s *session) recordCommited(rec *sessionRecord) { - if rec.has(recJournalNum) { - s.stJournalNum = rec.journalNum - } - - if rec.has(recPrevJournalNum) { - s.stPrevJournalNum = rec.prevJournalNum - } - - if rec.has(recSeqNum) { - s.stSeqNum = rec.seqNum - } - - for _, r := range rec.compPtrs { - s.setCompPtr(r.level, internalKey(r.ikey)) - } -} - -// Create a new manifest file; need external synchronization. -func (s *session) newManifest(rec *sessionRecord, v *version) (err error) { - fd := storage.FileDesc{Type: storage.TypeManifest, Num: s.allocFileNum()} - writer, err := s.stor.Create(fd) - if err != nil { - return - } - jw := journal.NewWriter(writer) - - if v == nil { - v = s.version() - defer v.release() - } - if rec == nil { - rec = &sessionRecord{} - } - s.fillRecord(rec, true) - v.fillRecord(rec) - - defer func() { - if err == nil { - s.recordCommited(rec) - if s.manifest != nil { - s.manifest.Close() - } - if s.manifestWriter != nil { - s.manifestWriter.Close() - } - if !s.manifestFd.Zero() { - s.stor.Remove(s.manifestFd) - } - s.manifestFd = fd - s.manifestWriter = writer - s.manifest = jw - } else { - writer.Close() - s.stor.Remove(fd) - s.reuseFileNum(fd.Num) - } - }() - - w, err := jw.Next() - if err != nil { - return - } - err = rec.encode(w) - if err != nil { - return - } - err = jw.Flush() - if err != nil { - return - } - err = s.stor.SetMeta(fd) - return -} - -// Flush record to disk. -func (s *session) flushManifest(rec *sessionRecord) (err error) { - s.fillRecord(rec, false) - w, err := s.manifest.Next() - if err != nil { - return - } - err = rec.encode(w) - if err != nil { - return - } - err = s.manifest.Flush() - if err != nil { - return - } - if !s.o.GetNoSync() { - err = s.manifestWriter.Sync() - if err != nil { - return - } - } - s.recordCommited(rec) - return -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage.go deleted file mode 100644 index d45fb5df..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/storage.go +++ /dev/null @@ -1,63 +0,0 @@ -package leveldb - -import ( - "github.com/syndtr/goleveldb/leveldb/storage" - "sync/atomic" -) - -type iStorage struct { - storage.Storage - read uint64 - write uint64 -} - -func (c *iStorage) Open(fd storage.FileDesc) (storage.Reader, error) { - r, err := c.Storage.Open(fd) - return &iStorageReader{r, c}, err -} - -func (c *iStorage) Create(fd storage.FileDesc) (storage.Writer, error) { - w, err := c.Storage.Create(fd) - return &iStorageWriter{w, c}, err -} - -func (c *iStorage) reads() uint64 { - return atomic.LoadUint64(&c.read) -} - -func (c *iStorage) writes() uint64 { - return atomic.LoadUint64(&c.write) -} - -// newIStorage returns the given storage wrapped by iStorage. -func newIStorage(s storage.Storage) *iStorage { - return &iStorage{s, 0, 0} -} - -type iStorageReader struct { - storage.Reader - c *iStorage -} - -func (r *iStorageReader) Read(p []byte) (n int, err error) { - n, err = r.Reader.Read(p) - atomic.AddUint64(&r.c.read, uint64(n)) - return n, err -} - -func (r *iStorageReader) ReadAt(p []byte, off int64) (n int, err error) { - n, err = r.Reader.ReadAt(p, off) - atomic.AddUint64(&r.c.read, uint64(n)) - return n, err -} - -type iStorageWriter struct { - storage.Writer - c *iStorage -} - -func (w *iStorageWriter) Write(p []byte) (n int, err error) { - n, err = w.Writer.Write(p) - atomic.AddUint64(&w.c.write, uint64(n)) - return n, err -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go deleted file mode 100644 index 9ba71fd6..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go +++ /dev/null @@ -1,671 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reservefs. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "time" -) - -var ( - errFileOpen = errors.New("leveldb/storage: file still open") - errReadOnly = errors.New("leveldb/storage: storage is read-only") -) - -type fileLock interface { - release() error -} - -type fileStorageLock struct { - fs *fileStorage -} - -func (lock *fileStorageLock) Unlock() { - if lock.fs != nil { - lock.fs.mu.Lock() - defer lock.fs.mu.Unlock() - if lock.fs.slock == lock { - lock.fs.slock = nil - } - } -} - -type int64Slice []int64 - -func (p int64Slice) Len() int { return len(p) } -func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func writeFileSynced(filename string, data []byte, perm os.FileMode) error { - f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - } - if err1 := f.Sync(); err == nil { - err = err1 - } - if err1 := f.Close(); err == nil { - err = err1 - } - return err -} - -const logSizeThreshold = 1024 * 1024 // 1 MiB - -// fileStorage is a file-system backed storage. -type fileStorage struct { - path string - readOnly bool - - mu sync.Mutex - flock fileLock - slock *fileStorageLock - logw *os.File - logSize int64 - buf []byte - // Opened file counter; if open < 0 means closed. - open int - day int -} - -// OpenFile returns a new filesystem-backed storage implementation with the given -// path. This also acquire a file lock, so any subsequent attempt to open the -// same path will fail. -// -// The storage must be closed after use, by calling Close method. -func OpenFile(path string, readOnly bool) (Storage, error) { - if fi, err := os.Stat(path); err == nil { - if !fi.IsDir() { - return nil, fmt.Errorf("leveldb/storage: open %s: not a directory", path) - } - } else if os.IsNotExist(err) && !readOnly { - if err := os.MkdirAll(path, 0755); err != nil { - return nil, err - } - } else { - return nil, err - } - - flock, err := newFileLock(filepath.Join(path, "LOCK"), readOnly) - if err != nil { - return nil, err - } - - defer func() { - if err != nil { - flock.release() - } - }() - - var ( - logw *os.File - logSize int64 - ) - if !readOnly { - logw, err = os.OpenFile(filepath.Join(path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644) - if err != nil { - return nil, err - } - logSize, err = logw.Seek(0, os.SEEK_END) - if err != nil { - logw.Close() - return nil, err - } - } - - fs := &fileStorage{ - path: path, - readOnly: readOnly, - flock: flock, - logw: logw, - logSize: logSize, - } - runtime.SetFinalizer(fs, (*fileStorage).Close) - return fs, nil -} - -func (fs *fileStorage) Lock() (Locker, error) { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return nil, ErrClosed - } - if fs.readOnly { - return &fileStorageLock{}, nil - } - if fs.slock != nil { - return nil, ErrLocked - } - fs.slock = &fileStorageLock{fs: fs} - return fs.slock, nil -} - -func itoa(buf []byte, i int, wid int) []byte { - u := uint(i) - if u == 0 && wid <= 1 { - return append(buf, '0') - } - - // Assemble decimal in reverse order. - var b [32]byte - bp := len(b) - for ; u > 0 || wid > 0; u /= 10 { - bp-- - wid-- - b[bp] = byte(u%10) + '0' - } - return append(buf, b[bp:]...) -} - -func (fs *fileStorage) printDay(t time.Time) { - if fs.day == t.Day() { - return - } - fs.day = t.Day() - fs.logw.Write([]byte("=============== " + t.Format("Jan 2, 2006 (MST)") + " ===============\n")) -} - -func (fs *fileStorage) doLog(t time.Time, str string) { - if fs.logSize > logSizeThreshold { - // Rotate log file. - fs.logw.Close() - fs.logw = nil - fs.logSize = 0 - rename(filepath.Join(fs.path, "LOG"), filepath.Join(fs.path, "LOG.old")) - } - if fs.logw == nil { - var err error - fs.logw, err = os.OpenFile(filepath.Join(fs.path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644) - if err != nil { - return - } - // Force printDay on new log file. - fs.day = 0 - } - fs.printDay(t) - hour, min, sec := t.Clock() - msec := t.Nanosecond() / 1e3 - // time - fs.buf = itoa(fs.buf[:0], hour, 2) - fs.buf = append(fs.buf, ':') - fs.buf = itoa(fs.buf, min, 2) - fs.buf = append(fs.buf, ':') - fs.buf = itoa(fs.buf, sec, 2) - fs.buf = append(fs.buf, '.') - fs.buf = itoa(fs.buf, msec, 6) - fs.buf = append(fs.buf, ' ') - // write - fs.buf = append(fs.buf, []byte(str)...) - fs.buf = append(fs.buf, '\n') - n, _ := fs.logw.Write(fs.buf) - fs.logSize += int64(n) -} - -func (fs *fileStorage) Log(str string) { - if !fs.readOnly { - t := time.Now() - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return - } - fs.doLog(t, str) - } -} - -func (fs *fileStorage) log(str string) { - if !fs.readOnly { - fs.doLog(time.Now(), str) - } -} - -func (fs *fileStorage) setMeta(fd FileDesc) error { - content := fsGenName(fd) + "\n" - // Check and backup old CURRENT file. - currentPath := filepath.Join(fs.path, "CURRENT") - if _, err := os.Stat(currentPath); err == nil { - b, err := ioutil.ReadFile(currentPath) - if err != nil { - fs.log(fmt.Sprintf("backup CURRENT: %v", err)) - return err - } - if string(b) == content { - // Content not changed, do nothing. - return nil - } - if err := writeFileSynced(currentPath+".bak", b, 0644); err != nil { - fs.log(fmt.Sprintf("backup CURRENT: %v", err)) - return err - } - } else if !os.IsNotExist(err) { - return err - } - path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), fd.Num) - if err := writeFileSynced(path, []byte(content), 0644); err != nil { - fs.log(fmt.Sprintf("create CURRENT.%d: %v", fd.Num, err)) - return err - } - // Replace CURRENT file. - if err := rename(path, currentPath); err != nil { - fs.log(fmt.Sprintf("rename CURRENT.%d: %v", fd.Num, err)) - return err - } - // Sync root directory. - if err := syncDir(fs.path); err != nil { - fs.log(fmt.Sprintf("syncDir: %v", err)) - return err - } - return nil -} - -func (fs *fileStorage) SetMeta(fd FileDesc) error { - if !FileDescOk(fd) { - return ErrInvalidFile - } - if fs.readOnly { - return errReadOnly - } - - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return ErrClosed - } - return fs.setMeta(fd) -} - -func (fs *fileStorage) GetMeta() (FileDesc, error) { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return FileDesc{}, ErrClosed - } - dir, err := os.Open(fs.path) - if err != nil { - return FileDesc{}, err - } - names, err := dir.Readdirnames(0) - // Close the dir first before checking for Readdirnames error. - if ce := dir.Close(); ce != nil { - fs.log(fmt.Sprintf("close dir: %v", ce)) - } - if err != nil { - return FileDesc{}, err - } - // Try this in order: - // - CURRENT.[0-9]+ ('pending rename' file, descending order) - // - CURRENT - // - CURRENT.bak - // - // Skip corrupted file or file that point to a missing target file. - type currentFile struct { - name string - fd FileDesc - } - tryCurrent := func(name string) (*currentFile, error) { - b, err := ioutil.ReadFile(filepath.Join(fs.path, name)) - if err != nil { - if os.IsNotExist(err) { - err = os.ErrNotExist - } - return nil, err - } - var fd FileDesc - if len(b) < 1 || b[len(b)-1] != '\n' || !fsParseNamePtr(string(b[:len(b)-1]), &fd) { - fs.log(fmt.Sprintf("%s: corrupted content: %q", name, b)) - err := &ErrCorrupted{ - Err: errors.New("leveldb/storage: corrupted or incomplete CURRENT file"), - } - return nil, err - } - if _, err := os.Stat(filepath.Join(fs.path, fsGenName(fd))); err != nil { - if os.IsNotExist(err) { - fs.log(fmt.Sprintf("%s: missing target file: %s", name, fd)) - err = os.ErrNotExist - } - return nil, err - } - return ¤tFile{name: name, fd: fd}, nil - } - tryCurrents := func(names []string) (*currentFile, error) { - var ( - cur *currentFile - // Last corruption error. - lastCerr error - ) - for _, name := range names { - var err error - cur, err = tryCurrent(name) - if err == nil { - break - } else if err == os.ErrNotExist { - // Fallback to the next file. - } else if isCorrupted(err) { - lastCerr = err - // Fallback to the next file. - } else { - // In case the error is due to permission, etc. - return nil, err - } - } - if cur == nil { - err := os.ErrNotExist - if lastCerr != nil { - err = lastCerr - } - return nil, err - } - return cur, nil - } - - // Try 'pending rename' files. - var nums []int64 - for _, name := range names { - if strings.HasPrefix(name, "CURRENT.") && name != "CURRENT.bak" { - i, err := strconv.ParseInt(name[8:], 10, 64) - if err == nil { - nums = append(nums, i) - } - } - } - var ( - pendCur *currentFile - pendErr = os.ErrNotExist - pendNames []string - ) - if len(nums) > 0 { - sort.Sort(sort.Reverse(int64Slice(nums))) - pendNames = make([]string, len(nums)) - for i, num := range nums { - pendNames[i] = fmt.Sprintf("CURRENT.%d", num) - } - pendCur, pendErr = tryCurrents(pendNames) - if pendErr != nil && pendErr != os.ErrNotExist && !isCorrupted(pendErr) { - return FileDesc{}, pendErr - } - } - - // Try CURRENT and CURRENT.bak. - curCur, curErr := tryCurrents([]string{"CURRENT", "CURRENT.bak"}) - if curErr != nil && curErr != os.ErrNotExist && !isCorrupted(curErr) { - return FileDesc{}, curErr - } - - // pendCur takes precedence, but guards against obsolete pendCur. - if pendCur != nil && (curCur == nil || pendCur.fd.Num > curCur.fd.Num) { - curCur = pendCur - } - - if curCur != nil { - // Restore CURRENT file to proper state. - if !fs.readOnly && (curCur.name != "CURRENT" || len(pendNames) != 0) { - // Ignore setMeta errors, however don't delete obsolete files if we - // catch error. - if err := fs.setMeta(curCur.fd); err == nil { - // Remove 'pending rename' files. - for _, name := range pendNames { - if err := os.Remove(filepath.Join(fs.path, name)); err != nil { - fs.log(fmt.Sprintf("remove %s: %v", name, err)) - } - } - } - } - return curCur.fd, nil - } - - // Nothing found. - if isCorrupted(pendErr) { - return FileDesc{}, pendErr - } - return FileDesc{}, curErr -} - -func (fs *fileStorage) List(ft FileType) (fds []FileDesc, err error) { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return nil, ErrClosed - } - dir, err := os.Open(fs.path) - if err != nil { - return - } - names, err := dir.Readdirnames(0) - // Close the dir first before checking for Readdirnames error. - if cerr := dir.Close(); cerr != nil { - fs.log(fmt.Sprintf("close dir: %v", cerr)) - } - if err == nil { - for _, name := range names { - if fd, ok := fsParseName(name); ok && fd.Type&ft != 0 { - fds = append(fds, fd) - } - } - } - return -} - -func (fs *fileStorage) Open(fd FileDesc) (Reader, error) { - if !FileDescOk(fd) { - return nil, ErrInvalidFile - } - - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return nil, ErrClosed - } - of, err := os.OpenFile(filepath.Join(fs.path, fsGenName(fd)), os.O_RDONLY, 0) - if err != nil { - if fsHasOldName(fd) && os.IsNotExist(err) { - of, err = os.OpenFile(filepath.Join(fs.path, fsGenOldName(fd)), os.O_RDONLY, 0) - if err == nil { - goto ok - } - } - return nil, err - } -ok: - fs.open++ - return &fileWrap{File: of, fs: fs, fd: fd}, nil -} - -func (fs *fileStorage) Create(fd FileDesc) (Writer, error) { - if !FileDescOk(fd) { - return nil, ErrInvalidFile - } - if fs.readOnly { - return nil, errReadOnly - } - - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return nil, ErrClosed - } - of, err := os.OpenFile(filepath.Join(fs.path, fsGenName(fd)), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - return nil, err - } - fs.open++ - return &fileWrap{File: of, fs: fs, fd: fd}, nil -} - -func (fs *fileStorage) Remove(fd FileDesc) error { - if !FileDescOk(fd) { - return ErrInvalidFile - } - if fs.readOnly { - return errReadOnly - } - - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return ErrClosed - } - err := os.Remove(filepath.Join(fs.path, fsGenName(fd))) - if err != nil { - if fsHasOldName(fd) && os.IsNotExist(err) { - if e1 := os.Remove(filepath.Join(fs.path, fsGenOldName(fd))); !os.IsNotExist(e1) { - fs.log(fmt.Sprintf("remove %s: %v (old name)", fd, err)) - err = e1 - } - } else { - fs.log(fmt.Sprintf("remove %s: %v", fd, err)) - } - } - return err -} - -func (fs *fileStorage) Rename(oldfd, newfd FileDesc) error { - if !FileDescOk(oldfd) || !FileDescOk(newfd) { - return ErrInvalidFile - } - if oldfd == newfd { - return nil - } - if fs.readOnly { - return errReadOnly - } - - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return ErrClosed - } - return rename(filepath.Join(fs.path, fsGenName(oldfd)), filepath.Join(fs.path, fsGenName(newfd))) -} - -func (fs *fileStorage) Close() error { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return ErrClosed - } - // Clear the finalizer. - runtime.SetFinalizer(fs, nil) - - if fs.open > 0 { - fs.log(fmt.Sprintf("close: warning, %d files still open", fs.open)) - } - fs.open = -1 - if fs.logw != nil { - fs.logw.Close() - } - return fs.flock.release() -} - -type fileWrap struct { - *os.File - fs *fileStorage - fd FileDesc - closed bool -} - -func (fw *fileWrap) Sync() error { - if err := fw.File.Sync(); err != nil { - return err - } - if fw.fd.Type == TypeManifest { - // Also sync parent directory if file type is manifest. - // See: https://code.google.com/p/leveldb/issues/detail?id=190. - if err := syncDir(fw.fs.path); err != nil { - fw.fs.log(fmt.Sprintf("syncDir: %v", err)) - return err - } - } - return nil -} - -func (fw *fileWrap) Close() error { - fw.fs.mu.Lock() - defer fw.fs.mu.Unlock() - if fw.closed { - return ErrClosed - } - fw.closed = true - fw.fs.open-- - err := fw.File.Close() - if err != nil { - fw.fs.log(fmt.Sprintf("close %s: %v", fw.fd, err)) - } - return err -} - -func fsGenName(fd FileDesc) string { - switch fd.Type { - case TypeManifest: - return fmt.Sprintf("MANIFEST-%06d", fd.Num) - case TypeJournal: - return fmt.Sprintf("%06d.log", fd.Num) - case TypeTable: - return fmt.Sprintf("%06d.ldb", fd.Num) - case TypeTemp: - return fmt.Sprintf("%06d.tmp", fd.Num) - default: - panic("invalid file type") - } -} - -func fsHasOldName(fd FileDesc) bool { - return fd.Type == TypeTable -} - -func fsGenOldName(fd FileDesc) string { - switch fd.Type { - case TypeTable: - return fmt.Sprintf("%06d.sst", fd.Num) - } - return fsGenName(fd) -} - -func fsParseName(name string) (fd FileDesc, ok bool) { - var tail string - _, err := fmt.Sscanf(name, "%d.%s", &fd.Num, &tail) - if err == nil { - switch tail { - case "log": - fd.Type = TypeJournal - case "ldb", "sst": - fd.Type = TypeTable - case "tmp": - fd.Type = TypeTemp - default: - return - } - return fd, true - } - n, _ := fmt.Sscanf(name, "MANIFEST-%d%s", &fd.Num, &tail) - if n == 1 { - fd.Type = TypeManifest - return fd, true - } - return -} - -func fsParseNamePtr(name string, fd *FileDesc) bool { - _fd, ok := fsParseName(name) - if fd != nil { - *fd = _fd - } - return ok -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_nacl.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_nacl.go deleted file mode 100644 index 5545aeef..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_nacl.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build nacl - -package storage - -import ( - "os" - "syscall" -) - -func newFileLock(path string, readOnly bool) (fl fileLock, err error) { - return nil, syscall.ENOTSUP -} - -func setFileLock(f *os.File, readOnly, lock bool) error { - return syscall.ENOTSUP -} - -func rename(oldpath, newpath string) error { - return syscall.ENOTSUP -} - -func isErrInvalid(err error) bool { - return false -} - -func syncDir(name string) error { - return syscall.ENOTSUP -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go deleted file mode 100644 index b8297980..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "os" -) - -type plan9FileLock struct { - f *os.File -} - -func (fl *plan9FileLock) release() error { - return fl.f.Close() -} - -func newFileLock(path string, readOnly bool) (fl fileLock, err error) { - var ( - flag int - perm os.FileMode - ) - if readOnly { - flag = os.O_RDONLY - } else { - flag = os.O_RDWR - perm = os.ModeExclusive - } - f, err := os.OpenFile(path, flag, perm) - if os.IsNotExist(err) { - f, err = os.OpenFile(path, flag|os.O_CREATE, perm|0644) - } - if err != nil { - return - } - fl = &plan9FileLock{f: f} - return -} - -func rename(oldpath, newpath string) error { - if _, err := os.Stat(newpath); err == nil { - if err := os.Remove(newpath); err != nil { - return err - } - } - - return os.Rename(oldpath, newpath) -} - -func syncDir(name string) error { - f, err := os.Open(name) - if err != nil { - return err - } - defer f.Close() - if err := f.Sync(); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go deleted file mode 100644 index 79901ee4..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build solaris - -package storage - -import ( - "os" - "syscall" -) - -type unixFileLock struct { - f *os.File -} - -func (fl *unixFileLock) release() error { - if err := setFileLock(fl.f, false, false); err != nil { - return err - } - return fl.f.Close() -} - -func newFileLock(path string, readOnly bool) (fl fileLock, err error) { - var flag int - if readOnly { - flag = os.O_RDONLY - } else { - flag = os.O_RDWR - } - f, err := os.OpenFile(path, flag, 0) - if os.IsNotExist(err) { - f, err = os.OpenFile(path, flag|os.O_CREATE, 0644) - } - if err != nil { - return - } - err = setFileLock(f, readOnly, true) - if err != nil { - f.Close() - return - } - fl = &unixFileLock{f: f} - return -} - -func setFileLock(f *os.File, readOnly, lock bool) error { - flock := syscall.Flock_t{ - Type: syscall.F_UNLCK, - Start: 0, - Len: 0, - Whence: 1, - } - if lock { - if readOnly { - flock.Type = syscall.F_RDLCK - } else { - flock.Type = syscall.F_WRLCK - } - } - return syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &flock) -} - -func rename(oldpath, newpath string) error { - return os.Rename(oldpath, newpath) -} - -func syncDir(name string) error { - f, err := os.Open(name) - if err != nil { - return err - } - defer f.Close() - if err := f.Sync(); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go deleted file mode 100644 index d75f66a9..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build darwin dragonfly freebsd linux netbsd openbsd - -package storage - -import ( - "os" - "syscall" -) - -type unixFileLock struct { - f *os.File -} - -func (fl *unixFileLock) release() error { - if err := setFileLock(fl.f, false, false); err != nil { - return err - } - return fl.f.Close() -} - -func newFileLock(path string, readOnly bool) (fl fileLock, err error) { - var flag int - if readOnly { - flag = os.O_RDONLY - } else { - flag = os.O_RDWR - } - f, err := os.OpenFile(path, flag, 0) - if os.IsNotExist(err) { - f, err = os.OpenFile(path, flag|os.O_CREATE, 0644) - } - if err != nil { - return - } - err = setFileLock(f, readOnly, true) - if err != nil { - f.Close() - return - } - fl = &unixFileLock{f: f} - return -} - -func setFileLock(f *os.File, readOnly, lock bool) error { - how := syscall.LOCK_UN - if lock { - if readOnly { - how = syscall.LOCK_SH - } else { - how = syscall.LOCK_EX - } - } - return syscall.Flock(int(f.Fd()), how|syscall.LOCK_NB) -} - -func rename(oldpath, newpath string) error { - return os.Rename(oldpath, newpath) -} - -func isErrInvalid(err error) bool { - if err == os.ErrInvalid { - return true - } - // Go < 1.8 - if syserr, ok := err.(*os.SyscallError); ok && syserr.Err == syscall.EINVAL { - return true - } - // Go >= 1.8 returns *os.PathError instead - if patherr, ok := err.(*os.PathError); ok && patherr.Err == syscall.EINVAL { - return true - } - return false -} - -func syncDir(name string) error { - // As per fsync manpage, Linux seems to expect fsync on directory, however - // some system don't support this, so we will ignore syscall.EINVAL. - // - // From fsync(2): - // Calling fsync() does not necessarily ensure that the entry in the - // directory containing the file has also reached disk. For that an - // explicit fsync() on a file descriptor for the directory is also needed. - f, err := os.Open(name) - if err != nil { - return err - } - defer f.Close() - if err := f.Sync(); err != nil && !isErrInvalid(err) { - return err - } - return nil -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go deleted file mode 100644 index 899335fd..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "syscall" - "unsafe" -) - -var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - - procMoveFileExW = modkernel32.NewProc("MoveFileExW") -) - -const ( - _MOVEFILE_REPLACE_EXISTING = 1 -) - -type windowsFileLock struct { - fd syscall.Handle -} - -func (fl *windowsFileLock) release() error { - return syscall.Close(fl.fd) -} - -func newFileLock(path string, readOnly bool) (fl fileLock, err error) { - pathp, err := syscall.UTF16PtrFromString(path) - if err != nil { - return - } - var access, shareMode uint32 - if readOnly { - access = syscall.GENERIC_READ - shareMode = syscall.FILE_SHARE_READ - } else { - access = syscall.GENERIC_READ | syscall.GENERIC_WRITE - } - fd, err := syscall.CreateFile(pathp, access, shareMode, nil, syscall.OPEN_EXISTING, syscall.FILE_ATTRIBUTE_NORMAL, 0) - if err == syscall.ERROR_FILE_NOT_FOUND { - fd, err = syscall.CreateFile(pathp, access, shareMode, nil, syscall.OPEN_ALWAYS, syscall.FILE_ATTRIBUTE_NORMAL, 0) - } - if err != nil { - return - } - fl = &windowsFileLock{fd: fd} - return -} - -func moveFileEx(from *uint16, to *uint16, flags uint32) error { - r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) - if r1 == 0 { - if e1 != 0 { - return error(e1) - } - return syscall.EINVAL - } - return nil -} - -func rename(oldpath, newpath string) error { - from, err := syscall.UTF16PtrFromString(oldpath) - if err != nil { - return err - } - to, err := syscall.UTF16PtrFromString(newpath) - if err != nil { - return err - } - return moveFileEx(from, to, _MOVEFILE_REPLACE_EXISTING) -} - -func syncDir(name string) error { return nil } diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go deleted file mode 100644 index 838f1bee..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "bytes" - "os" - "sync" -) - -const typeShift = 4 - -// Verify at compile-time that typeShift is large enough to cover all FileType -// values by confirming that 0 == 0. -var _ [0]struct{} = [TypeAll >> typeShift]struct{}{} - -type memStorageLock struct { - ms *memStorage -} - -func (lock *memStorageLock) Unlock() { - ms := lock.ms - ms.mu.Lock() - defer ms.mu.Unlock() - if ms.slock == lock { - ms.slock = nil - } - return -} - -// memStorage is a memory-backed storage. -type memStorage struct { - mu sync.Mutex - slock *memStorageLock - files map[uint64]*memFile - meta FileDesc -} - -// NewMemStorage returns a new memory-backed storage implementation. -func NewMemStorage() Storage { - return &memStorage{ - files: make(map[uint64]*memFile), - } -} - -func (ms *memStorage) Lock() (Locker, error) { - ms.mu.Lock() - defer ms.mu.Unlock() - if ms.slock != nil { - return nil, ErrLocked - } - ms.slock = &memStorageLock{ms: ms} - return ms.slock, nil -} - -func (*memStorage) Log(str string) {} - -func (ms *memStorage) SetMeta(fd FileDesc) error { - if !FileDescOk(fd) { - return ErrInvalidFile - } - - ms.mu.Lock() - ms.meta = fd - ms.mu.Unlock() - return nil -} - -func (ms *memStorage) GetMeta() (FileDesc, error) { - ms.mu.Lock() - defer ms.mu.Unlock() - if ms.meta.Zero() { - return FileDesc{}, os.ErrNotExist - } - return ms.meta, nil -} - -func (ms *memStorage) List(ft FileType) ([]FileDesc, error) { - ms.mu.Lock() - var fds []FileDesc - for x := range ms.files { - fd := unpackFile(x) - if fd.Type&ft != 0 { - fds = append(fds, fd) - } - } - ms.mu.Unlock() - return fds, nil -} - -func (ms *memStorage) Open(fd FileDesc) (Reader, error) { - if !FileDescOk(fd) { - return nil, ErrInvalidFile - } - - ms.mu.Lock() - defer ms.mu.Unlock() - if m, exist := ms.files[packFile(fd)]; exist { - if m.open { - return nil, errFileOpen - } - m.open = true - return &memReader{Reader: bytes.NewReader(m.Bytes()), ms: ms, m: m}, nil - } - return nil, os.ErrNotExist -} - -func (ms *memStorage) Create(fd FileDesc) (Writer, error) { - if !FileDescOk(fd) { - return nil, ErrInvalidFile - } - - x := packFile(fd) - ms.mu.Lock() - defer ms.mu.Unlock() - m, exist := ms.files[x] - if exist { - if m.open { - return nil, errFileOpen - } - m.Reset() - } else { - m = &memFile{} - ms.files[x] = m - } - m.open = true - return &memWriter{memFile: m, ms: ms}, nil -} - -func (ms *memStorage) Remove(fd FileDesc) error { - if !FileDescOk(fd) { - return ErrInvalidFile - } - - x := packFile(fd) - ms.mu.Lock() - defer ms.mu.Unlock() - if _, exist := ms.files[x]; exist { - delete(ms.files, x) - return nil - } - return os.ErrNotExist -} - -func (ms *memStorage) Rename(oldfd, newfd FileDesc) error { - if !FileDescOk(oldfd) || !FileDescOk(newfd) { - return ErrInvalidFile - } - if oldfd == newfd { - return nil - } - - oldx := packFile(oldfd) - newx := packFile(newfd) - ms.mu.Lock() - defer ms.mu.Unlock() - oldm, exist := ms.files[oldx] - if !exist { - return os.ErrNotExist - } - newm, exist := ms.files[newx] - if (exist && newm.open) || oldm.open { - return errFileOpen - } - delete(ms.files, oldx) - ms.files[newx] = oldm - return nil -} - -func (*memStorage) Close() error { return nil } - -type memFile struct { - bytes.Buffer - open bool -} - -type memReader struct { - *bytes.Reader - ms *memStorage - m *memFile - closed bool -} - -func (mr *memReader) Close() error { - mr.ms.mu.Lock() - defer mr.ms.mu.Unlock() - if mr.closed { - return ErrClosed - } - mr.m.open = false - return nil -} - -type memWriter struct { - *memFile - ms *memStorage - closed bool -} - -func (*memWriter) Sync() error { return nil } - -func (mw *memWriter) Close() error { - mw.ms.mu.Lock() - defer mw.ms.mu.Unlock() - if mw.closed { - return ErrClosed - } - mw.memFile.open = false - return nil -} - -func packFile(fd FileDesc) uint64 { - return uint64(fd.Num)<> typeShift)} -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go deleted file mode 100644 index 4e4a7242..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package storage provides storage abstraction for LevelDB. -package storage - -import ( - "errors" - "fmt" - "io" -) - -// FileType represent a file type. -type FileType int - -// File types. -const ( - TypeManifest FileType = 1 << iota - TypeJournal - TypeTable - TypeTemp - - TypeAll = TypeManifest | TypeJournal | TypeTable | TypeTemp -) - -func (t FileType) String() string { - switch t { - case TypeManifest: - return "manifest" - case TypeJournal: - return "journal" - case TypeTable: - return "table" - case TypeTemp: - return "temp" - } - return fmt.Sprintf("", t) -} - -// Common error. -var ( - ErrInvalidFile = errors.New("leveldb/storage: invalid file for argument") - ErrLocked = errors.New("leveldb/storage: already locked") - ErrClosed = errors.New("leveldb/storage: closed") -) - -// ErrCorrupted is the type that wraps errors that indicate corruption of -// a file. Package storage has its own type instead of using -// errors.ErrCorrupted to prevent circular import. -type ErrCorrupted struct { - Fd FileDesc - Err error -} - -func isCorrupted(err error) bool { - switch err.(type) { - case *ErrCorrupted: - return true - } - return false -} - -func (e *ErrCorrupted) Error() string { - if !e.Fd.Zero() { - return fmt.Sprintf("%v [file=%v]", e.Err, e.Fd) - } - return e.Err.Error() -} - -// Syncer is the interface that wraps basic Sync method. -type Syncer interface { - // Sync commits the current contents of the file to stable storage. - Sync() error -} - -// Reader is the interface that groups the basic Read, Seek, ReadAt and Close -// methods. -type Reader interface { - io.ReadSeeker - io.ReaderAt - io.Closer -} - -// Writer is the interface that groups the basic Write, Sync and Close -// methods. -type Writer interface { - io.WriteCloser - Syncer -} - -// Locker is the interface that wraps Unlock method. -type Locker interface { - Unlock() -} - -// FileDesc is a 'file descriptor'. -type FileDesc struct { - Type FileType - Num int64 -} - -func (fd FileDesc) String() string { - switch fd.Type { - case TypeManifest: - return fmt.Sprintf("MANIFEST-%06d", fd.Num) - case TypeJournal: - return fmt.Sprintf("%06d.log", fd.Num) - case TypeTable: - return fmt.Sprintf("%06d.ldb", fd.Num) - case TypeTemp: - return fmt.Sprintf("%06d.tmp", fd.Num) - default: - return fmt.Sprintf("%#x-%d", fd.Type, fd.Num) - } -} - -// Zero returns true if fd == (FileDesc{}). -func (fd FileDesc) Zero() bool { - return fd == (FileDesc{}) -} - -// FileDescOk returns true if fd is a valid 'file descriptor'. -func FileDescOk(fd FileDesc) bool { - switch fd.Type { - case TypeManifest: - case TypeJournal: - case TypeTable: - case TypeTemp: - default: - return false - } - return fd.Num >= 0 -} - -// Storage is the storage. A storage instance must be safe for concurrent use. -type Storage interface { - // Lock locks the storage. Any subsequent attempt to call Lock will fail - // until the last lock released. - // Caller should call Unlock method after use. - Lock() (Locker, error) - - // Log logs a string. This is used for logging. - // An implementation may write to a file, stdout or simply do nothing. - Log(str string) - - // SetMeta store 'file descriptor' that can later be acquired using GetMeta - // method. The 'file descriptor' should point to a valid file. - // SetMeta should be implemented in such way that changes should happen - // atomically. - SetMeta(fd FileDesc) error - - // GetMeta returns 'file descriptor' stored in meta. The 'file descriptor' - // can be updated using SetMeta method. - // Returns os.ErrNotExist if meta doesn't store any 'file descriptor', or - // 'file descriptor' point to nonexistent file. - GetMeta() (FileDesc, error) - - // List returns file descriptors that match the given file types. - // The file types may be OR'ed together. - List(ft FileType) ([]FileDesc, error) - - // Open opens file with the given 'file descriptor' read-only. - // Returns os.ErrNotExist error if the file does not exist. - // Returns ErrClosed if the underlying storage is closed. - Open(fd FileDesc) (Reader, error) - - // Create creates file with the given 'file descriptor', truncate if already - // exist and opens write-only. - // Returns ErrClosed if the underlying storage is closed. - Create(fd FileDesc) (Writer, error) - - // Remove removes file with the given 'file descriptor'. - // Returns ErrClosed if the underlying storage is closed. - Remove(fd FileDesc) error - - // Rename renames file from oldfd to newfd. - // Returns ErrClosed if the underlying storage is closed. - Rename(oldfd, newfd FileDesc) error - - // Close closes the storage. - // It is valid to call Close multiple times. Other methods should not be - // called after the storage has been closed. - Close() error -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table.go b/vendor/github.com/syndtr/goleveldb/leveldb/table.go deleted file mode 100644 index 1fac60d0..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/table.go +++ /dev/null @@ -1,531 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "fmt" - "sort" - "sync/atomic" - - "github.com/syndtr/goleveldb/leveldb/cache" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/table" - "github.com/syndtr/goleveldb/leveldb/util" -) - -// tFile holds basic information about a table. -type tFile struct { - fd storage.FileDesc - seekLeft int32 - size int64 - imin, imax internalKey -} - -// Returns true if given key is after largest key of this table. -func (t *tFile) after(icmp *iComparer, ukey []byte) bool { - return ukey != nil && icmp.uCompare(ukey, t.imax.ukey()) > 0 -} - -// Returns true if given key is before smallest key of this table. -func (t *tFile) before(icmp *iComparer, ukey []byte) bool { - return ukey != nil && icmp.uCompare(ukey, t.imin.ukey()) < 0 -} - -// Returns true if given key range overlaps with this table key range. -func (t *tFile) overlaps(icmp *iComparer, umin, umax []byte) bool { - return !t.after(icmp, umin) && !t.before(icmp, umax) -} - -// Cosumes one seek and return current seeks left. -func (t *tFile) consumeSeek() int32 { - return atomic.AddInt32(&t.seekLeft, -1) -} - -// Creates new tFile. -func newTableFile(fd storage.FileDesc, size int64, imin, imax internalKey) *tFile { - f := &tFile{ - fd: fd, - size: size, - imin: imin, - imax: imax, - } - - // We arrange to automatically compact this file after - // a certain number of seeks. Let's assume: - // (1) One seek costs 10ms - // (2) Writing or reading 1MB costs 10ms (100MB/s) - // (3) A compaction of 1MB does 25MB of IO: - // 1MB read from this level - // 10-12MB read from next level (boundaries may be misaligned) - // 10-12MB written to next level - // This implies that 25 seeks cost the same as the compaction - // of 1MB of data. I.e., one seek costs approximately the - // same as the compaction of 40KB of data. We are a little - // conservative and allow approximately one seek for every 16KB - // of data before triggering a compaction. - f.seekLeft = int32(size / 16384) - if f.seekLeft < 100 { - f.seekLeft = 100 - } - - return f -} - -func tableFileFromRecord(r atRecord) *tFile { - return newTableFile(storage.FileDesc{Type: storage.TypeTable, Num: r.num}, r.size, r.imin, r.imax) -} - -// tFiles hold multiple tFile. -type tFiles []*tFile - -func (tf tFiles) Len() int { return len(tf) } -func (tf tFiles) Swap(i, j int) { tf[i], tf[j] = tf[j], tf[i] } - -func (tf tFiles) nums() string { - x := "[ " - for i, f := range tf { - if i != 0 { - x += ", " - } - x += fmt.Sprint(f.fd.Num) - } - x += " ]" - return x -} - -// Returns true if i smallest key is less than j. -// This used for sort by key in ascending order. -func (tf tFiles) lessByKey(icmp *iComparer, i, j int) bool { - a, b := tf[i], tf[j] - n := icmp.Compare(a.imin, b.imin) - if n == 0 { - return a.fd.Num < b.fd.Num - } - return n < 0 -} - -// Returns true if i file number is greater than j. -// This used for sort by file number in descending order. -func (tf tFiles) lessByNum(i, j int) bool { - return tf[i].fd.Num > tf[j].fd.Num -} - -// Sorts tables by key in ascending order. -func (tf tFiles) sortByKey(icmp *iComparer) { - sort.Sort(&tFilesSortByKey{tFiles: tf, icmp: icmp}) -} - -// Sorts tables by file number in descending order. -func (tf tFiles) sortByNum() { - sort.Sort(&tFilesSortByNum{tFiles: tf}) -} - -// Returns sum of all tables size. -func (tf tFiles) size() (sum int64) { - for _, t := range tf { - sum += t.size - } - return sum -} - -// Searches smallest index of tables whose its smallest -// key is after or equal with given key. -func (tf tFiles) searchMin(icmp *iComparer, ikey internalKey) int { - return sort.Search(len(tf), func(i int) bool { - return icmp.Compare(tf[i].imin, ikey) >= 0 - }) -} - -// Searches smallest index of tables whose its largest -// key is after or equal with given key. -func (tf tFiles) searchMax(icmp *iComparer, ikey internalKey) int { - return sort.Search(len(tf), func(i int) bool { - return icmp.Compare(tf[i].imax, ikey) >= 0 - }) -} - -// Returns true if given key range overlaps with one or more -// tables key range. If unsorted is true then binary search will not be used. -func (tf tFiles) overlaps(icmp *iComparer, umin, umax []byte, unsorted bool) bool { - if unsorted { - // Check against all files. - for _, t := range tf { - if t.overlaps(icmp, umin, umax) { - return true - } - } - return false - } - - i := 0 - if len(umin) > 0 { - // Find the earliest possible internal key for min. - i = tf.searchMax(icmp, makeInternalKey(nil, umin, keyMaxSeq, keyTypeSeek)) - } - if i >= len(tf) { - // Beginning of range is after all files, so no overlap. - return false - } - return !tf[i].before(icmp, umax) -} - -// Returns tables whose its key range overlaps with given key range. -// Range will be expanded if ukey found hop across tables. -// If overlapped is true then the search will be restarted if umax -// expanded. -// The dst content will be overwritten. -func (tf tFiles) getOverlaps(dst tFiles, icmp *iComparer, umin, umax []byte, overlapped bool) tFiles { - dst = dst[:0] - for i := 0; i < len(tf); { - t := tf[i] - if t.overlaps(icmp, umin, umax) { - if umin != nil && icmp.uCompare(t.imin.ukey(), umin) < 0 { - umin = t.imin.ukey() - dst = dst[:0] - i = 0 - continue - } else if umax != nil && icmp.uCompare(t.imax.ukey(), umax) > 0 { - umax = t.imax.ukey() - // Restart search if it is overlapped. - if overlapped { - dst = dst[:0] - i = 0 - continue - } - } - - dst = append(dst, t) - } - i++ - } - - return dst -} - -// Returns tables key range. -func (tf tFiles) getRange(icmp *iComparer) (imin, imax internalKey) { - for i, t := range tf { - if i == 0 { - imin, imax = t.imin, t.imax - continue - } - if icmp.Compare(t.imin, imin) < 0 { - imin = t.imin - } - if icmp.Compare(t.imax, imax) > 0 { - imax = t.imax - } - } - - return -} - -// Creates iterator index from tables. -func (tf tFiles) newIndexIterator(tops *tOps, icmp *iComparer, slice *util.Range, ro *opt.ReadOptions) iterator.IteratorIndexer { - if slice != nil { - var start, limit int - if slice.Start != nil { - start = tf.searchMax(icmp, internalKey(slice.Start)) - } - if slice.Limit != nil { - limit = tf.searchMin(icmp, internalKey(slice.Limit)) - } else { - limit = tf.Len() - } - tf = tf[start:limit] - } - return iterator.NewArrayIndexer(&tFilesArrayIndexer{ - tFiles: tf, - tops: tops, - icmp: icmp, - slice: slice, - ro: ro, - }) -} - -// Tables iterator index. -type tFilesArrayIndexer struct { - tFiles - tops *tOps - icmp *iComparer - slice *util.Range - ro *opt.ReadOptions -} - -func (a *tFilesArrayIndexer) Search(key []byte) int { - return a.searchMax(a.icmp, internalKey(key)) -} - -func (a *tFilesArrayIndexer) Get(i int) iterator.Iterator { - if i == 0 || i == a.Len()-1 { - return a.tops.newIterator(a.tFiles[i], a.slice, a.ro) - } - return a.tops.newIterator(a.tFiles[i], nil, a.ro) -} - -// Helper type for sortByKey. -type tFilesSortByKey struct { - tFiles - icmp *iComparer -} - -func (x *tFilesSortByKey) Less(i, j int) bool { - return x.lessByKey(x.icmp, i, j) -} - -// Helper type for sortByNum. -type tFilesSortByNum struct { - tFiles -} - -func (x *tFilesSortByNum) Less(i, j int) bool { - return x.lessByNum(i, j) -} - -// Table operations. -type tOps struct { - s *session - noSync bool - evictRemoved bool - cache *cache.Cache - bcache *cache.Cache - bpool *util.BufferPool -} - -// Creates an empty table and returns table writer. -func (t *tOps) create() (*tWriter, error) { - fd := storage.FileDesc{Type: storage.TypeTable, Num: t.s.allocFileNum()} - fw, err := t.s.stor.Create(fd) - if err != nil { - return nil, err - } - return &tWriter{ - t: t, - fd: fd, - w: fw, - tw: table.NewWriter(fw, t.s.o.Options), - }, nil -} - -// Builds table from src iterator. -func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) { - w, err := t.create() - if err != nil { - return - } - - defer func() { - if err != nil { - w.drop() - } - }() - - for src.Next() { - err = w.append(src.Key(), src.Value()) - if err != nil { - return - } - } - err = src.Error() - if err != nil { - return - } - - n = w.tw.EntriesLen() - f, err = w.finish() - return -} - -// Opens table. It returns a cache handle, which should -// be released after use. -func (t *tOps) open(f *tFile) (ch *cache.Handle, err error) { - ch = t.cache.Get(0, uint64(f.fd.Num), func() (size int, value cache.Value) { - var r storage.Reader - r, err = t.s.stor.Open(f.fd) - if err != nil { - return 0, nil - } - - var bcache *cache.NamespaceGetter - if t.bcache != nil { - bcache = &cache.NamespaceGetter{Cache: t.bcache, NS: uint64(f.fd.Num)} - } - - var tr *table.Reader - tr, err = table.NewReader(r, f.size, f.fd, bcache, t.bpool, t.s.o.Options) - if err != nil { - r.Close() - return 0, nil - } - return 1, tr - - }) - if ch == nil && err == nil { - err = ErrClosed - } - return -} - -// Finds key/value pair whose key is greater than or equal to the -// given key. -func (t *tOps) find(f *tFile, key []byte, ro *opt.ReadOptions) (rkey, rvalue []byte, err error) { - ch, err := t.open(f) - if err != nil { - return nil, nil, err - } - defer ch.Release() - return ch.Value().(*table.Reader).Find(key, true, ro) -} - -// Finds key that is greater than or equal to the given key. -func (t *tOps) findKey(f *tFile, key []byte, ro *opt.ReadOptions) (rkey []byte, err error) { - ch, err := t.open(f) - if err != nil { - return nil, err - } - defer ch.Release() - return ch.Value().(*table.Reader).FindKey(key, true, ro) -} - -// Returns approximate offset of the given key. -func (t *tOps) offsetOf(f *tFile, key []byte) (offset int64, err error) { - ch, err := t.open(f) - if err != nil { - return - } - defer ch.Release() - return ch.Value().(*table.Reader).OffsetOf(key) -} - -// Creates an iterator from the given table. -func (t *tOps) newIterator(f *tFile, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - ch, err := t.open(f) - if err != nil { - return iterator.NewEmptyIterator(err) - } - iter := ch.Value().(*table.Reader).NewIterator(slice, ro) - iter.SetReleaser(ch) - return iter -} - -// Removes table from persistent storage. It waits until -// no one use the the table. -func (t *tOps) remove(f *tFile) { - t.cache.Delete(0, uint64(f.fd.Num), func() { - if err := t.s.stor.Remove(f.fd); err != nil { - t.s.logf("table@remove removing @%d %q", f.fd.Num, err) - } else { - t.s.logf("table@remove removed @%d", f.fd.Num) - } - if t.evictRemoved && t.bcache != nil { - t.bcache.EvictNS(uint64(f.fd.Num)) - } - }) -} - -// Closes the table ops instance. It will close all tables, -// regadless still used or not. -func (t *tOps) close() { - t.bpool.Close() - t.cache.Close() - if t.bcache != nil { - t.bcache.CloseWeak() - } -} - -// Creates new initialized table ops instance. -func newTableOps(s *session) *tOps { - var ( - cacher cache.Cacher - bcache *cache.Cache - bpool *util.BufferPool - ) - if s.o.GetOpenFilesCacheCapacity() > 0 { - cacher = cache.NewLRU(s.o.GetOpenFilesCacheCapacity()) - } - if !s.o.GetDisableBlockCache() { - var bcacher cache.Cacher - if s.o.GetBlockCacheCapacity() > 0 { - bcacher = s.o.GetBlockCacher().New(s.o.GetBlockCacheCapacity()) - } - bcache = cache.NewCache(bcacher) - } - if !s.o.GetDisableBufferPool() { - bpool = util.NewBufferPool(s.o.GetBlockSize() + 5) - } - return &tOps{ - s: s, - noSync: s.o.GetNoSync(), - evictRemoved: s.o.GetBlockCacheEvictRemoved(), - cache: cache.NewCache(cacher), - bcache: bcache, - bpool: bpool, - } -} - -// tWriter wraps the table writer. It keep track of file descriptor -// and added key range. -type tWriter struct { - t *tOps - - fd storage.FileDesc - w storage.Writer - tw *table.Writer - - first, last []byte -} - -// Append key/value pair to the table. -func (w *tWriter) append(key, value []byte) error { - if w.first == nil { - w.first = append([]byte{}, key...) - } - w.last = append(w.last[:0], key...) - return w.tw.Append(key, value) -} - -// Returns true if the table is empty. -func (w *tWriter) empty() bool { - return w.first == nil -} - -// Closes the storage.Writer. -func (w *tWriter) close() { - if w.w != nil { - w.w.Close() - w.w = nil - } -} - -// Finalizes the table and returns table file. -func (w *tWriter) finish() (f *tFile, err error) { - defer w.close() - err = w.tw.Close() - if err != nil { - return - } - if !w.t.noSync { - err = w.w.Sync() - if err != nil { - return - } - } - f = newTableFile(w.fd, int64(w.tw.BytesLen()), internalKey(w.first), internalKey(w.last)) - return -} - -// Drops the table. -func (w *tWriter) drop() { - w.close() - w.t.s.stor.Remove(w.fd) - w.t.s.reuseFileNum(w.fd.Num) - w.tw = nil - w.first = nil - w.last = nil -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go b/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go deleted file mode 100644 index 496feb6f..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go +++ /dev/null @@ -1,1139 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package table - -import ( - "encoding/binary" - "fmt" - "io" - "sort" - "strings" - "sync" - - "github.com/golang/snappy" - - "github.com/syndtr/goleveldb/leveldb/cache" - "github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/filter" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/util" -) - -// Reader errors. -var ( - ErrNotFound = errors.ErrNotFound - ErrReaderReleased = errors.New("leveldb/table: reader released") - ErrIterReleased = errors.New("leveldb/table: iterator released") -) - -// ErrCorrupted describes error due to corruption. This error will be wrapped -// with errors.ErrCorrupted. -type ErrCorrupted struct { - Pos int64 - Size int64 - Kind string - Reason string -} - -func (e *ErrCorrupted) Error() string { - return fmt.Sprintf("leveldb/table: corruption on %s (pos=%d): %s", e.Kind, e.Pos, e.Reason) -} - -func max(x, y int) int { - if x > y { - return x - } - return y -} - -type block struct { - bpool *util.BufferPool - bh blockHandle - data []byte - restartsLen int - restartsOffset int -} - -func (b *block) seek(cmp comparer.Comparer, rstart, rlimit int, key []byte) (index, offset int, err error) { - index = sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool { - offset := int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):])) - offset++ // shared always zero, since this is a restart point - v1, n1 := binary.Uvarint(b.data[offset:]) // key length - _, n2 := binary.Uvarint(b.data[offset+n1:]) // value length - m := offset + n1 + n2 - return cmp.Compare(b.data[m:m+int(v1)], key) > 0 - }) + rstart - 1 - if index < rstart { - // The smallest key is greater-than key sought. - index = rstart - } - offset = int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*index:])) - return -} - -func (b *block) restartIndex(rstart, rlimit, offset int) int { - return sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool { - return int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):])) > offset - }) + rstart - 1 -} - -func (b *block) restartOffset(index int) int { - return int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*index:])) -} - -func (b *block) entry(offset int) (key, value []byte, nShared, n int, err error) { - if offset >= b.restartsOffset { - if offset != b.restartsOffset { - err = &ErrCorrupted{Reason: "entries offset not aligned"} - } - return - } - v0, n0 := binary.Uvarint(b.data[offset:]) // Shared prefix length - v1, n1 := binary.Uvarint(b.data[offset+n0:]) // Key length - v2, n2 := binary.Uvarint(b.data[offset+n0+n1:]) // Value length - m := n0 + n1 + n2 - n = m + int(v1) + int(v2) - if n0 <= 0 || n1 <= 0 || n2 <= 0 || offset+n > b.restartsOffset { - err = &ErrCorrupted{Reason: "entries corrupted"} - return - } - key = b.data[offset+m : offset+m+int(v1)] - value = b.data[offset+m+int(v1) : offset+n] - nShared = int(v0) - return -} - -func (b *block) Release() { - b.bpool.Put(b.data) - b.bpool = nil - b.data = nil -} - -type dir int - -const ( - dirReleased dir = iota - 1 - dirSOI - dirEOI - dirBackward - dirForward -) - -type blockIter struct { - tr *Reader - block *block - blockReleaser util.Releaser - releaser util.Releaser - key, value []byte - offset int - // Previous offset, only filled by Next. - prevOffset int - prevNode []int - prevKeys []byte - restartIndex int - // Iterator direction. - dir dir - // Restart index slice range. - riStart int - riLimit int - // Offset slice range. - offsetStart int - offsetRealStart int - offsetLimit int - // Error. - err error -} - -func (i *blockIter) sErr(err error) { - i.err = err - i.key = nil - i.value = nil - i.prevNode = nil - i.prevKeys = nil -} - -func (i *blockIter) reset() { - if i.dir == dirBackward { - i.prevNode = i.prevNode[:0] - i.prevKeys = i.prevKeys[:0] - } - i.restartIndex = i.riStart - i.offset = i.offsetStart - i.dir = dirSOI - i.key = i.key[:0] - i.value = nil -} - -func (i *blockIter) isFirst() bool { - switch i.dir { - case dirForward: - return i.prevOffset == i.offsetRealStart - case dirBackward: - return len(i.prevNode) == 1 && i.restartIndex == i.riStart - } - return false -} - -func (i *blockIter) isLast() bool { - switch i.dir { - case dirForward, dirBackward: - return i.offset == i.offsetLimit - } - return false -} - -func (i *blockIter) First() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if i.dir == dirBackward { - i.prevNode = i.prevNode[:0] - i.prevKeys = i.prevKeys[:0] - } - i.dir = dirSOI - return i.Next() -} - -func (i *blockIter) Last() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if i.dir == dirBackward { - i.prevNode = i.prevNode[:0] - i.prevKeys = i.prevKeys[:0] - } - i.dir = dirEOI - return i.Prev() -} - -func (i *blockIter) Seek(key []byte) bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - ri, offset, err := i.block.seek(i.tr.cmp, i.riStart, i.riLimit, key) - if err != nil { - i.sErr(err) - return false - } - i.restartIndex = ri - i.offset = max(i.offsetStart, offset) - if i.dir == dirSOI || i.dir == dirEOI { - i.dir = dirForward - } - for i.Next() { - if i.tr.cmp.Compare(i.key, key) >= 0 { - return true - } - } - return false -} - -func (i *blockIter) Next() bool { - if i.dir == dirEOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if i.dir == dirSOI { - i.restartIndex = i.riStart - i.offset = i.offsetStart - } else if i.dir == dirBackward { - i.prevNode = i.prevNode[:0] - i.prevKeys = i.prevKeys[:0] - } - for i.offset < i.offsetRealStart { - key, value, nShared, n, err := i.block.entry(i.offset) - if err != nil { - i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err)) - return false - } - if n == 0 { - i.dir = dirEOI - return false - } - i.key = append(i.key[:nShared], key...) - i.value = value - i.offset += n - } - if i.offset >= i.offsetLimit { - i.dir = dirEOI - if i.offset != i.offsetLimit { - i.sErr(i.tr.newErrCorruptedBH(i.block.bh, "entries offset not aligned")) - } - return false - } - key, value, nShared, n, err := i.block.entry(i.offset) - if err != nil { - i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err)) - return false - } - if n == 0 { - i.dir = dirEOI - return false - } - i.key = append(i.key[:nShared], key...) - i.value = value - i.prevOffset = i.offset - i.offset += n - i.dir = dirForward - return true -} - -func (i *blockIter) Prev() bool { - if i.dir == dirSOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - var ri int - if i.dir == dirForward { - // Change direction. - i.offset = i.prevOffset - if i.offset == i.offsetRealStart { - i.dir = dirSOI - return false - } - ri = i.block.restartIndex(i.restartIndex, i.riLimit, i.offset) - i.dir = dirBackward - } else if i.dir == dirEOI { - // At the end of iterator. - i.restartIndex = i.riLimit - i.offset = i.offsetLimit - if i.offset == i.offsetRealStart { - i.dir = dirSOI - return false - } - ri = i.riLimit - 1 - i.dir = dirBackward - } else if len(i.prevNode) == 1 { - // This is the end of a restart range. - i.offset = i.prevNode[0] - i.prevNode = i.prevNode[:0] - if i.restartIndex == i.riStart { - i.dir = dirSOI - return false - } - i.restartIndex-- - ri = i.restartIndex - } else { - // In the middle of restart range, get from cache. - n := len(i.prevNode) - 3 - node := i.prevNode[n:] - i.prevNode = i.prevNode[:n] - // Get the key. - ko := node[0] - i.key = append(i.key[:0], i.prevKeys[ko:]...) - i.prevKeys = i.prevKeys[:ko] - // Get the value. - vo := node[1] - vl := vo + node[2] - i.value = i.block.data[vo:vl] - i.offset = vl - return true - } - // Build entries cache. - i.key = i.key[:0] - i.value = nil - offset := i.block.restartOffset(ri) - if offset == i.offset { - ri-- - if ri < 0 { - i.dir = dirSOI - return false - } - offset = i.block.restartOffset(ri) - } - i.prevNode = append(i.prevNode, offset) - for { - key, value, nShared, n, err := i.block.entry(offset) - if err != nil { - i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err)) - return false - } - if offset >= i.offsetRealStart { - if i.value != nil { - // Appends 3 variables: - // 1. Previous keys offset - // 2. Value offset in the data block - // 3. Value length - i.prevNode = append(i.prevNode, len(i.prevKeys), offset-len(i.value), len(i.value)) - i.prevKeys = append(i.prevKeys, i.key...) - } - i.value = value - } - i.key = append(i.key[:nShared], key...) - offset += n - // Stop if target offset reached. - if offset >= i.offset { - if offset != i.offset { - i.sErr(i.tr.newErrCorruptedBH(i.block.bh, "entries offset not aligned")) - return false - } - - break - } - } - i.restartIndex = ri - i.offset = offset - return true -} - -func (i *blockIter) Key() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.key -} - -func (i *blockIter) Value() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.value -} - -func (i *blockIter) Release() { - if i.dir != dirReleased { - i.tr = nil - i.block = nil - i.prevNode = nil - i.prevKeys = nil - i.key = nil - i.value = nil - i.dir = dirReleased - if i.blockReleaser != nil { - i.blockReleaser.Release() - i.blockReleaser = nil - } - if i.releaser != nil { - i.releaser.Release() - i.releaser = nil - } - } -} - -func (i *blockIter) SetReleaser(releaser util.Releaser) { - if i.dir == dirReleased { - panic(util.ErrReleased) - } - if i.releaser != nil && releaser != nil { - panic(util.ErrHasReleaser) - } - i.releaser = releaser -} - -func (i *blockIter) Valid() bool { - return i.err == nil && (i.dir == dirBackward || i.dir == dirForward) -} - -func (i *blockIter) Error() error { - return i.err -} - -type filterBlock struct { - bpool *util.BufferPool - data []byte - oOffset int - baseLg uint - filtersNum int -} - -func (b *filterBlock) contains(filter filter.Filter, offset uint64, key []byte) bool { - i := int(offset >> b.baseLg) - if i < b.filtersNum { - o := b.data[b.oOffset+i*4:] - n := int(binary.LittleEndian.Uint32(o)) - m := int(binary.LittleEndian.Uint32(o[4:])) - if n < m && m <= b.oOffset { - return filter.Contains(b.data[n:m], key) - } else if n == m { - return false - } - } - return true -} - -func (b *filterBlock) Release() { - b.bpool.Put(b.data) - b.bpool = nil - b.data = nil -} - -type indexIter struct { - *blockIter - tr *Reader - slice *util.Range - // Options - fillCache bool -} - -func (i *indexIter) Get() iterator.Iterator { - value := i.Value() - if value == nil { - return nil - } - dataBH, n := decodeBlockHandle(value) - if n == 0 { - return iterator.NewEmptyIterator(i.tr.newErrCorruptedBH(i.tr.indexBH, "bad data block handle")) - } - - var slice *util.Range - if i.slice != nil && (i.blockIter.isFirst() || i.blockIter.isLast()) { - slice = i.slice - } - return i.tr.getDataIterErr(dataBH, slice, i.tr.verifyChecksum, i.fillCache) -} - -// Reader is a table reader. -type Reader struct { - mu sync.RWMutex - fd storage.FileDesc - reader io.ReaderAt - cache *cache.NamespaceGetter - err error - bpool *util.BufferPool - // Options - o *opt.Options - cmp comparer.Comparer - filter filter.Filter - verifyChecksum bool - - dataEnd int64 - metaBH, indexBH, filterBH blockHandle - indexBlock *block - filterBlock *filterBlock -} - -func (r *Reader) blockKind(bh blockHandle) string { - switch bh.offset { - case r.metaBH.offset: - return "meta-block" - case r.indexBH.offset: - return "index-block" - case r.filterBH.offset: - if r.filterBH.length > 0 { - return "filter-block" - } - } - return "data-block" -} - -func (r *Reader) newErrCorrupted(pos, size int64, kind, reason string) error { - return &errors.ErrCorrupted{Fd: r.fd, Err: &ErrCorrupted{Pos: pos, Size: size, Kind: kind, Reason: reason}} -} - -func (r *Reader) newErrCorruptedBH(bh blockHandle, reason string) error { - return r.newErrCorrupted(int64(bh.offset), int64(bh.length), r.blockKind(bh), reason) -} - -func (r *Reader) fixErrCorruptedBH(bh blockHandle, err error) error { - if cerr, ok := err.(*ErrCorrupted); ok { - cerr.Pos = int64(bh.offset) - cerr.Size = int64(bh.length) - cerr.Kind = r.blockKind(bh) - return &errors.ErrCorrupted{Fd: r.fd, Err: cerr} - } - return err -} - -func (r *Reader) readRawBlock(bh blockHandle, verifyChecksum bool) ([]byte, error) { - data := r.bpool.Get(int(bh.length + blockTrailerLen)) - if _, err := r.reader.ReadAt(data, int64(bh.offset)); err != nil && err != io.EOF { - return nil, err - } - - if verifyChecksum { - n := bh.length + 1 - checksum0 := binary.LittleEndian.Uint32(data[n:]) - checksum1 := util.NewCRC(data[:n]).Value() - if checksum0 != checksum1 { - r.bpool.Put(data) - return nil, r.newErrCorruptedBH(bh, fmt.Sprintf("checksum mismatch, want=%#x got=%#x", checksum0, checksum1)) - } - } - - switch data[bh.length] { - case blockTypeNoCompression: - data = data[:bh.length] - case blockTypeSnappyCompression: - decLen, err := snappy.DecodedLen(data[:bh.length]) - if err != nil { - r.bpool.Put(data) - return nil, r.newErrCorruptedBH(bh, err.Error()) - } - decData := r.bpool.Get(decLen) - decData, err = snappy.Decode(decData, data[:bh.length]) - r.bpool.Put(data) - if err != nil { - r.bpool.Put(decData) - return nil, r.newErrCorruptedBH(bh, err.Error()) - } - data = decData - default: - r.bpool.Put(data) - return nil, r.newErrCorruptedBH(bh, fmt.Sprintf("unknown compression type %#x", data[bh.length])) - } - return data, nil -} - -func (r *Reader) readBlock(bh blockHandle, verifyChecksum bool) (*block, error) { - data, err := r.readRawBlock(bh, verifyChecksum) - if err != nil { - return nil, err - } - restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:])) - b := &block{ - bpool: r.bpool, - bh: bh, - data: data, - restartsLen: restartsLen, - restartsOffset: len(data) - (restartsLen+1)*4, - } - return b, nil -} - -func (r *Reader) readBlockCached(bh blockHandle, verifyChecksum, fillCache bool) (*block, util.Releaser, error) { - if r.cache != nil { - var ( - err error - ch *cache.Handle - ) - if fillCache { - ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) { - var b *block - b, err = r.readBlock(bh, verifyChecksum) - if err != nil { - return 0, nil - } - return cap(b.data), b - }) - } else { - ch = r.cache.Get(bh.offset, nil) - } - if ch != nil { - b, ok := ch.Value().(*block) - if !ok { - ch.Release() - return nil, nil, errors.New("leveldb/table: inconsistent block type") - } - return b, ch, err - } else if err != nil { - return nil, nil, err - } - } - - b, err := r.readBlock(bh, verifyChecksum) - return b, b, err -} - -func (r *Reader) readFilterBlock(bh blockHandle) (*filterBlock, error) { - data, err := r.readRawBlock(bh, true) - if err != nil { - return nil, err - } - n := len(data) - if n < 5 { - return nil, r.newErrCorruptedBH(bh, "too short") - } - m := n - 5 - oOffset := int(binary.LittleEndian.Uint32(data[m:])) - if oOffset > m { - return nil, r.newErrCorruptedBH(bh, "invalid data-offsets offset") - } - b := &filterBlock{ - bpool: r.bpool, - data: data, - oOffset: oOffset, - baseLg: uint(data[n-1]), - filtersNum: (m - oOffset) / 4, - } - return b, nil -} - -func (r *Reader) readFilterBlockCached(bh blockHandle, fillCache bool) (*filterBlock, util.Releaser, error) { - if r.cache != nil { - var ( - err error - ch *cache.Handle - ) - if fillCache { - ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) { - var b *filterBlock - b, err = r.readFilterBlock(bh) - if err != nil { - return 0, nil - } - return cap(b.data), b - }) - } else { - ch = r.cache.Get(bh.offset, nil) - } - if ch != nil { - b, ok := ch.Value().(*filterBlock) - if !ok { - ch.Release() - return nil, nil, errors.New("leveldb/table: inconsistent block type") - } - return b, ch, err - } else if err != nil { - return nil, nil, err - } - } - - b, err := r.readFilterBlock(bh) - return b, b, err -} - -func (r *Reader) getIndexBlock(fillCache bool) (b *block, rel util.Releaser, err error) { - if r.indexBlock == nil { - return r.readBlockCached(r.indexBH, true, fillCache) - } - return r.indexBlock, util.NoopReleaser{}, nil -} - -func (r *Reader) getFilterBlock(fillCache bool) (*filterBlock, util.Releaser, error) { - if r.filterBlock == nil { - return r.readFilterBlockCached(r.filterBH, fillCache) - } - return r.filterBlock, util.NoopReleaser{}, nil -} - -func (r *Reader) newBlockIter(b *block, bReleaser util.Releaser, slice *util.Range, inclLimit bool) *blockIter { - bi := &blockIter{ - tr: r, - block: b, - blockReleaser: bReleaser, - // Valid key should never be nil. - key: make([]byte, 0), - dir: dirSOI, - riStart: 0, - riLimit: b.restartsLen, - offsetStart: 0, - offsetRealStart: 0, - offsetLimit: b.restartsOffset, - } - if slice != nil { - if slice.Start != nil { - if bi.Seek(slice.Start) { - bi.riStart = b.restartIndex(bi.restartIndex, b.restartsLen, bi.prevOffset) - bi.offsetStart = b.restartOffset(bi.riStart) - bi.offsetRealStart = bi.prevOffset - } else { - bi.riStart = b.restartsLen - bi.offsetStart = b.restartsOffset - bi.offsetRealStart = b.restartsOffset - } - } - if slice.Limit != nil { - if bi.Seek(slice.Limit) && (!inclLimit || bi.Next()) { - bi.offsetLimit = bi.prevOffset - bi.riLimit = bi.restartIndex + 1 - } - } - bi.reset() - if bi.offsetStart > bi.offsetLimit { - bi.sErr(errors.New("leveldb/table: invalid slice range")) - } - } - return bi -} - -func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, verifyChecksum, fillCache bool) iterator.Iterator { - b, rel, err := r.readBlockCached(dataBH, verifyChecksum, fillCache) - if err != nil { - return iterator.NewEmptyIterator(err) - } - return r.newBlockIter(b, rel, slice, false) -} - -func (r *Reader) getDataIterErr(dataBH blockHandle, slice *util.Range, verifyChecksum, fillCache bool) iterator.Iterator { - r.mu.RLock() - defer r.mu.RUnlock() - - if r.err != nil { - return iterator.NewEmptyIterator(r.err) - } - - return r.getDataIter(dataBH, slice, verifyChecksum, fillCache) -} - -// NewIterator creates an iterator from the table. -// -// Slice allows slicing the iterator to only contains keys in the given -// range. A nil Range.Start is treated as a key before all keys in the -// table. And a nil Range.Limit is treated as a key after all keys in -// the table. -// -// WARNING: Any slice returned by interator (e.g. slice returned by calling -// Iterator.Key() or Iterator.Key() methods), its content should not be modified -// unless noted otherwise. -// -// The returned iterator is not safe for concurrent use and should be released -// after use. -// -// Also read Iterator documentation of the leveldb/iterator package. -func (r *Reader) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - r.mu.RLock() - defer r.mu.RUnlock() - - if r.err != nil { - return iterator.NewEmptyIterator(r.err) - } - - fillCache := !ro.GetDontFillCache() - indexBlock, rel, err := r.getIndexBlock(fillCache) - if err != nil { - return iterator.NewEmptyIterator(err) - } - index := &indexIter{ - blockIter: r.newBlockIter(indexBlock, rel, slice, true), - tr: r, - slice: slice, - fillCache: !ro.GetDontFillCache(), - } - return iterator.NewIndexedIterator(index, opt.GetStrict(r.o, ro, opt.StrictReader)) -} - -func (r *Reader) find(key []byte, filtered bool, ro *opt.ReadOptions, noValue bool) (rkey, value []byte, err error) { - r.mu.RLock() - defer r.mu.RUnlock() - - if r.err != nil { - err = r.err - return - } - - indexBlock, rel, err := r.getIndexBlock(true) - if err != nil { - return - } - defer rel.Release() - - index := r.newBlockIter(indexBlock, nil, nil, true) - defer index.Release() - - if !index.Seek(key) { - if err = index.Error(); err == nil { - err = ErrNotFound - } - return - } - - dataBH, n := decodeBlockHandle(index.Value()) - if n == 0 { - r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle") - return nil, nil, r.err - } - - // The filter should only used for exact match. - if filtered && r.filter != nil { - filterBlock, frel, ferr := r.getFilterBlock(true) - if ferr == nil { - if !filterBlock.contains(r.filter, dataBH.offset, key) { - frel.Release() - return nil, nil, ErrNotFound - } - frel.Release() - } else if !errors.IsCorrupted(ferr) { - return nil, nil, ferr - } - } - - data := r.getDataIter(dataBH, nil, r.verifyChecksum, !ro.GetDontFillCache()) - if !data.Seek(key) { - data.Release() - if err = data.Error(); err != nil { - return - } - - // The nearest greater-than key is the first key of the next block. - if !index.Next() { - if err = index.Error(); err == nil { - err = ErrNotFound - } - return - } - - dataBH, n = decodeBlockHandle(index.Value()) - if n == 0 { - r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle") - return nil, nil, r.err - } - - data = r.getDataIter(dataBH, nil, r.verifyChecksum, !ro.GetDontFillCache()) - if !data.Next() { - data.Release() - if err = data.Error(); err == nil { - err = ErrNotFound - } - return - } - } - - // Key doesn't use block buffer, no need to copy the buffer. - rkey = data.Key() - if !noValue { - if r.bpool == nil { - value = data.Value() - } else { - // Value does use block buffer, and since the buffer will be - // recycled, it need to be copied. - value = append([]byte{}, data.Value()...) - } - } - data.Release() - return -} - -// Find finds key/value pair whose key is greater than or equal to the -// given key. It returns ErrNotFound if the table doesn't contain -// such pair. -// If filtered is true then the nearest 'block' will be checked against -// 'filter data' (if present) and will immediately return ErrNotFound if -// 'filter data' indicates that such pair doesn't exist. -// -// The caller may modify the contents of the returned slice as it is its -// own copy. -// It is safe to modify the contents of the argument after Find returns. -func (r *Reader) Find(key []byte, filtered bool, ro *opt.ReadOptions) (rkey, value []byte, err error) { - return r.find(key, filtered, ro, false) -} - -// FindKey finds key that is greater than or equal to the given key. -// It returns ErrNotFound if the table doesn't contain such key. -// If filtered is true then the nearest 'block' will be checked against -// 'filter data' (if present) and will immediately return ErrNotFound if -// 'filter data' indicates that such key doesn't exist. -// -// The caller may modify the contents of the returned slice as it is its -// own copy. -// It is safe to modify the contents of the argument after Find returns. -func (r *Reader) FindKey(key []byte, filtered bool, ro *opt.ReadOptions) (rkey []byte, err error) { - rkey, _, err = r.find(key, filtered, ro, true) - return -} - -// Get gets the value for the given key. It returns errors.ErrNotFound -// if the table does not contain the key. -// -// The caller may modify the contents of the returned slice as it is its -// own copy. -// It is safe to modify the contents of the argument after Find returns. -func (r *Reader) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { - r.mu.RLock() - defer r.mu.RUnlock() - - if r.err != nil { - err = r.err - return - } - - rkey, value, err := r.find(key, false, ro, false) - if err == nil && r.cmp.Compare(rkey, key) != 0 { - value = nil - err = ErrNotFound - } - return -} - -// OffsetOf returns approximate offset for the given key. -// -// It is safe to modify the contents of the argument after Get returns. -func (r *Reader) OffsetOf(key []byte) (offset int64, err error) { - r.mu.RLock() - defer r.mu.RUnlock() - - if r.err != nil { - err = r.err - return - } - - indexBlock, rel, err := r.readBlockCached(r.indexBH, true, true) - if err != nil { - return - } - defer rel.Release() - - index := r.newBlockIter(indexBlock, nil, nil, true) - defer index.Release() - if index.Seek(key) { - dataBH, n := decodeBlockHandle(index.Value()) - if n == 0 { - r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle") - return - } - offset = int64(dataBH.offset) - return - } - err = index.Error() - if err == nil { - offset = r.dataEnd - } - return -} - -// Release implements util.Releaser. -// It also close the file if it is an io.Closer. -func (r *Reader) Release() { - r.mu.Lock() - defer r.mu.Unlock() - - if closer, ok := r.reader.(io.Closer); ok { - closer.Close() - } - if r.indexBlock != nil { - r.indexBlock.Release() - r.indexBlock = nil - } - if r.filterBlock != nil { - r.filterBlock.Release() - r.filterBlock = nil - } - r.reader = nil - r.cache = nil - r.bpool = nil - r.err = ErrReaderReleased -} - -// NewReader creates a new initialized table reader for the file. -// The fi, cache and bpool is optional and can be nil. -// -// The returned table reader instance is safe for concurrent use. -func NewReader(f io.ReaderAt, size int64, fd storage.FileDesc, cache *cache.NamespaceGetter, bpool *util.BufferPool, o *opt.Options) (*Reader, error) { - if f == nil { - return nil, errors.New("leveldb/table: nil file") - } - - r := &Reader{ - fd: fd, - reader: f, - cache: cache, - bpool: bpool, - o: o, - cmp: o.GetComparer(), - verifyChecksum: o.GetStrict(opt.StrictBlockChecksum), - } - - if size < footerLen { - r.err = r.newErrCorrupted(0, size, "table", "too small") - return r, nil - } - - footerPos := size - footerLen - var footer [footerLen]byte - if _, err := r.reader.ReadAt(footer[:], footerPos); err != nil && err != io.EOF { - return nil, err - } - if string(footer[footerLen-len(magic):footerLen]) != magic { - r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad magic number") - return r, nil - } - - var n int - // Decode the metaindex block handle. - r.metaBH, n = decodeBlockHandle(footer[:]) - if n == 0 { - r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad metaindex block handle") - return r, nil - } - - // Decode the index block handle. - r.indexBH, n = decodeBlockHandle(footer[n:]) - if n == 0 { - r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad index block handle") - return r, nil - } - - // Read metaindex block. - metaBlock, err := r.readBlock(r.metaBH, true) - if err != nil { - if errors.IsCorrupted(err) { - r.err = err - return r, nil - } - return nil, err - } - - // Set data end. - r.dataEnd = int64(r.metaBH.offset) - - // Read metaindex. - metaIter := r.newBlockIter(metaBlock, nil, nil, true) - for metaIter.Next() { - key := string(metaIter.Key()) - if !strings.HasPrefix(key, "filter.") { - continue - } - fn := key[7:] - if f0 := o.GetFilter(); f0 != nil && f0.Name() == fn { - r.filter = f0 - } else { - for _, f0 := range o.GetAltFilters() { - if f0.Name() == fn { - r.filter = f0 - break - } - } - } - if r.filter != nil { - filterBH, n := decodeBlockHandle(metaIter.Value()) - if n == 0 { - continue - } - r.filterBH = filterBH - // Update data end. - r.dataEnd = int64(filterBH.offset) - break - } - } - metaIter.Release() - metaBlock.Release() - - // Cache index and filter block locally, since we don't have global cache. - if cache == nil { - r.indexBlock, err = r.readBlock(r.indexBH, true) - if err != nil { - if errors.IsCorrupted(err) { - r.err = err - return r, nil - } - return nil, err - } - if r.filter != nil { - r.filterBlock, err = r.readFilterBlock(r.filterBH) - if err != nil { - if !errors.IsCorrupted(err) { - return nil, err - } - - // Don't use filter then. - r.filter = nil - } - } - } - - return r, nil -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table/table.go b/vendor/github.com/syndtr/goleveldb/leveldb/table/table.go deleted file mode 100644 index beacdc1f..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/table/table.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package table allows read and write sorted key/value. -package table - -import ( - "encoding/binary" -) - -/* -Table: - -Table is consist of one or more data blocks, an optional filter block -a metaindex block, an index block and a table footer. Metaindex block -is a special block used to keep parameters of the table, such as filter -block name and its block handle. Index block is a special block used to -keep record of data blocks offset and length, index block use one as -restart interval. The key used by index block are the last key of preceding -block, shorter separator of adjacent blocks or shorter successor of the -last key of the last block. Filter block is an optional block contains -sequence of filter data generated by a filter generator. - -Table data structure: - + optional - / - +--------------+--------------+--------------+------+-------+-----------------+-------------+--------+ - | data block 1 | ... | data block n | filter block | metaindex block | index block | footer | - +--------------+--------------+--------------+--------------+-----------------+-------------+--------+ - - Each block followed by a 5-bytes trailer contains compression type and checksum. - -Table block trailer: - - +---------------------------+-------------------+ - | compression type (1-byte) | checksum (4-byte) | - +---------------------------+-------------------+ - - The checksum is a CRC-32 computed using Castagnoli's polynomial. Compression - type also included in the checksum. - -Table footer: - - +------------------- 40-bytes -------------------+ - / \ - +------------------------+--------------------+------+-----------------+ - | metaindex block handle / index block handle / ---- | magic (8-bytes) | - +------------------------+--------------------+------+-----------------+ - - The magic are first 64-bit of SHA-1 sum of "http://code.google.com/p/leveldb/". - -NOTE: All fixed-length integer are little-endian. -*/ - -/* -Block: - -Block is consist of one or more key/value entries and a block trailer. -Block entry shares key prefix with its preceding key until a restart -point reached. A block should contains at least one restart point. -First restart point are always zero. - -Block data structure: - - + restart point + restart point (depends on restart interval) - / / - +---------------+---------------+---------------+---------------+---------+ - | block entry 1 | block entry 2 | ... | block entry n | trailer | - +---------------+---------------+---------------+---------------+---------+ - -Key/value entry: - - +---- key len ----+ - / \ - +-------+---------+-----------+---------+--------------------+--------------+----------------+ - | shared (varint) | not shared (varint) | value len (varint) | key (varlen) | value (varlen) | - +-----------------+---------------------+--------------------+--------------+----------------+ - - Block entry shares key prefix with its preceding key: - Conditions: - restart_interval=2 - entry one : key=deck,value=v1 - entry two : key=dock,value=v2 - entry three: key=duck,value=v3 - The entries will be encoded as follow: - - + restart point (offset=0) + restart point (offset=16) - / / - +-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+ - | 0 | 4 | 2 | "deck" | "v1" | 1 | 3 | 2 | "ock" | "v2" | 0 | 4 | 2 | "duck" | "v3" | - +-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+ - \ / \ / \ / - +----------- entry one -----------+ +----------- entry two ----------+ +---------- entry three ----------+ - - The block trailer will contains two restart points: - - +------------+-----------+--------+ - | 0 | 16 | 2 | - +------------+-----------+---+----+ - \ / \ - +-- restart points --+ + restart points length - -Block trailer: - - +-- 4-bytes --+ - / \ - +-----------------+-----------------+-----------------+------------------------------+ - | restart point 1 | .... | restart point n | restart points len (4-bytes) | - +-----------------+-----------------+-----------------+------------------------------+ - - -NOTE: All fixed-length integer are little-endian. -*/ - -/* -Filter block: - -Filter block consist of one or more filter data and a filter block trailer. -The trailer contains filter data offsets, a trailer offset and a 1-byte base Lg. - -Filter block data structure: - - + offset 1 + offset 2 + offset n + trailer offset - / / / / - +---------------+---------------+---------------+---------+ - | filter data 1 | ... | filter data n | trailer | - +---------------+---------------+---------------+---------+ - -Filter block trailer: - - +- 4-bytes -+ - / \ - +---------------+---------------+---------------+-------------------------------+------------------+ - | data 1 offset | .... | data n offset | data-offsets offset (4-bytes) | base Lg (1-byte) | - +-------------- +---------------+---------------+-------------------------------+------------------+ - - -NOTE: All fixed-length integer are little-endian. -*/ - -const ( - blockTrailerLen = 5 - footerLen = 48 - - magic = "\x57\xfb\x80\x8b\x24\x75\x47\xdb" - - // The block type gives the per-block compression format. - // These constants are part of the file format and should not be changed. - blockTypeNoCompression = 0 - blockTypeSnappyCompression = 1 - - // Generate new filter every 2KB of data - filterBaseLg = 11 - filterBase = 1 << filterBaseLg -) - -type blockHandle struct { - offset, length uint64 -} - -func decodeBlockHandle(src []byte) (blockHandle, int) { - offset, n := binary.Uvarint(src) - length, m := binary.Uvarint(src[n:]) - if n == 0 || m == 0 { - return blockHandle{}, 0 - } - return blockHandle{offset, length}, n + m -} - -func encodeBlockHandle(dst []byte, b blockHandle) int { - n := binary.PutUvarint(dst, b.offset) - m := binary.PutUvarint(dst[n:], b.length) - return n + m -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go b/vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go deleted file mode 100644 index b96b271d..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go +++ /dev/null @@ -1,375 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package table - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - - "github.com/golang/snappy" - - "github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/syndtr/goleveldb/leveldb/filter" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/util" -) - -func sharedPrefixLen(a, b []byte) int { - i, n := 0, len(a) - if n > len(b) { - n = len(b) - } - for i < n && a[i] == b[i] { - i++ - } - return i -} - -type blockWriter struct { - restartInterval int - buf util.Buffer - nEntries int - prevKey []byte - restarts []uint32 - scratch []byte -} - -func (w *blockWriter) append(key, value []byte) { - nShared := 0 - if w.nEntries%w.restartInterval == 0 { - w.restarts = append(w.restarts, uint32(w.buf.Len())) - } else { - nShared = sharedPrefixLen(w.prevKey, key) - } - n := binary.PutUvarint(w.scratch[0:], uint64(nShared)) - n += binary.PutUvarint(w.scratch[n:], uint64(len(key)-nShared)) - n += binary.PutUvarint(w.scratch[n:], uint64(len(value))) - w.buf.Write(w.scratch[:n]) - w.buf.Write(key[nShared:]) - w.buf.Write(value) - w.prevKey = append(w.prevKey[:0], key...) - w.nEntries++ -} - -func (w *blockWriter) finish() { - // Write restarts entry. - if w.nEntries == 0 { - // Must have at least one restart entry. - w.restarts = append(w.restarts, 0) - } - w.restarts = append(w.restarts, uint32(len(w.restarts))) - for _, x := range w.restarts { - buf4 := w.buf.Alloc(4) - binary.LittleEndian.PutUint32(buf4, x) - } -} - -func (w *blockWriter) reset() { - w.buf.Reset() - w.nEntries = 0 - w.restarts = w.restarts[:0] -} - -func (w *blockWriter) bytesLen() int { - restartsLen := len(w.restarts) - if restartsLen == 0 { - restartsLen = 1 - } - return w.buf.Len() + 4*restartsLen + 4 -} - -type filterWriter struct { - generator filter.FilterGenerator - buf util.Buffer - nKeys int - offsets []uint32 -} - -func (w *filterWriter) add(key []byte) { - if w.generator == nil { - return - } - w.generator.Add(key) - w.nKeys++ -} - -func (w *filterWriter) flush(offset uint64) { - if w.generator == nil { - return - } - for x := int(offset / filterBase); x > len(w.offsets); { - w.generate() - } -} - -func (w *filterWriter) finish() { - if w.generator == nil { - return - } - // Generate last keys. - - if w.nKeys > 0 { - w.generate() - } - w.offsets = append(w.offsets, uint32(w.buf.Len())) - for _, x := range w.offsets { - buf4 := w.buf.Alloc(4) - binary.LittleEndian.PutUint32(buf4, x) - } - w.buf.WriteByte(filterBaseLg) -} - -func (w *filterWriter) generate() { - // Record offset. - w.offsets = append(w.offsets, uint32(w.buf.Len())) - // Generate filters. - if w.nKeys > 0 { - w.generator.Generate(&w.buf) - w.nKeys = 0 - } -} - -// Writer is a table writer. -type Writer struct { - writer io.Writer - err error - // Options - cmp comparer.Comparer - filter filter.Filter - compression opt.Compression - blockSize int - - dataBlock blockWriter - indexBlock blockWriter - filterBlock filterWriter - pendingBH blockHandle - offset uint64 - nEntries int - // Scratch allocated enough for 5 uvarint. Block writer should not use - // first 20-bytes since it will be used to encode block handle, which - // then passed to the block writer itself. - scratch [50]byte - comparerScratch []byte - compressionScratch []byte -} - -func (w *Writer) writeBlock(buf *util.Buffer, compression opt.Compression) (bh blockHandle, err error) { - // Compress the buffer if necessary. - var b []byte - if compression == opt.SnappyCompression { - // Allocate scratch enough for compression and block trailer. - if n := snappy.MaxEncodedLen(buf.Len()) + blockTrailerLen; len(w.compressionScratch) < n { - w.compressionScratch = make([]byte, n) - } - compressed := snappy.Encode(w.compressionScratch, buf.Bytes()) - n := len(compressed) - b = compressed[:n+blockTrailerLen] - b[n] = blockTypeSnappyCompression - } else { - tmp := buf.Alloc(blockTrailerLen) - tmp[0] = blockTypeNoCompression - b = buf.Bytes() - } - - // Calculate the checksum. - n := len(b) - 4 - checksum := util.NewCRC(b[:n]).Value() - binary.LittleEndian.PutUint32(b[n:], checksum) - - // Write the buffer to the file. - _, err = w.writer.Write(b) - if err != nil { - return - } - bh = blockHandle{w.offset, uint64(len(b) - blockTrailerLen)} - w.offset += uint64(len(b)) - return -} - -func (w *Writer) flushPendingBH(key []byte) { - if w.pendingBH.length == 0 { - return - } - var separator []byte - if len(key) == 0 { - separator = w.cmp.Successor(w.comparerScratch[:0], w.dataBlock.prevKey) - } else { - separator = w.cmp.Separator(w.comparerScratch[:0], w.dataBlock.prevKey, key) - } - if separator == nil { - separator = w.dataBlock.prevKey - } else { - w.comparerScratch = separator - } - n := encodeBlockHandle(w.scratch[:20], w.pendingBH) - // Append the block handle to the index block. - w.indexBlock.append(separator, w.scratch[:n]) - // Reset prev key of the data block. - w.dataBlock.prevKey = w.dataBlock.prevKey[:0] - // Clear pending block handle. - w.pendingBH = blockHandle{} -} - -func (w *Writer) finishBlock() error { - w.dataBlock.finish() - bh, err := w.writeBlock(&w.dataBlock.buf, w.compression) - if err != nil { - return err - } - w.pendingBH = bh - // Reset the data block. - w.dataBlock.reset() - // Flush the filter block. - w.filterBlock.flush(w.offset) - return nil -} - -// Append appends key/value pair to the table. The keys passed must -// be in increasing order. -// -// It is safe to modify the contents of the arguments after Append returns. -func (w *Writer) Append(key, value []byte) error { - if w.err != nil { - return w.err - } - if w.nEntries > 0 && w.cmp.Compare(w.dataBlock.prevKey, key) >= 0 { - w.err = fmt.Errorf("leveldb/table: Writer: keys are not in increasing order: %q, %q", w.dataBlock.prevKey, key) - return w.err - } - - w.flushPendingBH(key) - // Append key/value pair to the data block. - w.dataBlock.append(key, value) - // Add key to the filter block. - w.filterBlock.add(key) - - // Finish the data block if block size target reached. - if w.dataBlock.bytesLen() >= w.blockSize { - if err := w.finishBlock(); err != nil { - w.err = err - return w.err - } - } - w.nEntries++ - return nil -} - -// BlocksLen returns number of blocks written so far. -func (w *Writer) BlocksLen() int { - n := w.indexBlock.nEntries - if w.pendingBH.length > 0 { - // Includes the pending block. - n++ - } - return n -} - -// EntriesLen returns number of entries added so far. -func (w *Writer) EntriesLen() int { - return w.nEntries -} - -// BytesLen returns number of bytes written so far. -func (w *Writer) BytesLen() int { - return int(w.offset) -} - -// Close will finalize the table. Calling Append is not possible -// after Close, but calling BlocksLen, EntriesLen and BytesLen -// is still possible. -func (w *Writer) Close() error { - if w.err != nil { - return w.err - } - - // Write the last data block. Or empty data block if there - // aren't any data blocks at all. - if w.dataBlock.nEntries > 0 || w.nEntries == 0 { - if err := w.finishBlock(); err != nil { - w.err = err - return w.err - } - } - w.flushPendingBH(nil) - - // Write the filter block. - var filterBH blockHandle - w.filterBlock.finish() - if buf := &w.filterBlock.buf; buf.Len() > 0 { - filterBH, w.err = w.writeBlock(buf, opt.NoCompression) - if w.err != nil { - return w.err - } - } - - // Write the metaindex block. - if filterBH.length > 0 { - key := []byte("filter." + w.filter.Name()) - n := encodeBlockHandle(w.scratch[:20], filterBH) - w.dataBlock.append(key, w.scratch[:n]) - } - w.dataBlock.finish() - metaindexBH, err := w.writeBlock(&w.dataBlock.buf, w.compression) - if err != nil { - w.err = err - return w.err - } - - // Write the index block. - w.indexBlock.finish() - indexBH, err := w.writeBlock(&w.indexBlock.buf, w.compression) - if err != nil { - w.err = err - return w.err - } - - // Write the table footer. - footer := w.scratch[:footerLen] - for i := range footer { - footer[i] = 0 - } - n := encodeBlockHandle(footer, metaindexBH) - encodeBlockHandle(footer[n:], indexBH) - copy(footer[footerLen-len(magic):], magic) - if _, err := w.writer.Write(footer); err != nil { - w.err = err - return w.err - } - w.offset += footerLen - - w.err = errors.New("leveldb/table: writer is closed") - return nil -} - -// NewWriter creates a new initialized table writer for the file. -// -// Table writer is not safe for concurrent use. -func NewWriter(f io.Writer, o *opt.Options) *Writer { - w := &Writer{ - writer: f, - cmp: o.GetComparer(), - filter: o.GetFilter(), - compression: o.GetCompression(), - blockSize: o.GetBlockSize(), - comparerScratch: make([]byte, 0), - } - // data block - w.dataBlock.restartInterval = o.GetBlockRestartInterval() - // The first 20-bytes are used for encoding block handle. - w.dataBlock.scratch = w.scratch[20:] - // index block - w.indexBlock.restartInterval = 1 - w.indexBlock.scratch = w.scratch[20:] - // filter block - if w.filter != nil { - w.filterBlock.generator = w.filter.NewGenerator() - w.filterBlock.flush(0) - } - return w -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util.go b/vendor/github.com/syndtr/goleveldb/leveldb/util.go deleted file mode 100644 index 0e2b519e..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/util.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "fmt" - "sort" - - "github.com/syndtr/goleveldb/leveldb/storage" -) - -func shorten(str string) string { - if len(str) <= 8 { - return str - } - return str[:3] + ".." + str[len(str)-3:] -} - -var bunits = [...]string{"", "Ki", "Mi", "Gi", "Ti"} - -func shortenb(bytes int) string { - i := 0 - for ; bytes > 1024 && i < 4; i++ { - bytes /= 1024 - } - return fmt.Sprintf("%d%sB", bytes, bunits[i]) -} - -func sshortenb(bytes int) string { - if bytes == 0 { - return "~" - } - sign := "+" - if bytes < 0 { - sign = "-" - bytes *= -1 - } - i := 0 - for ; bytes > 1024 && i < 4; i++ { - bytes /= 1024 - } - return fmt.Sprintf("%s%d%sB", sign, bytes, bunits[i]) -} - -func sint(x int) string { - if x == 0 { - return "~" - } - sign := "+" - if x < 0 { - sign = "-" - x *= -1 - } - return fmt.Sprintf("%s%d", sign, x) -} - -func minInt(a, b int) int { - if a < b { - return a - } - return b -} - -func maxInt(a, b int) int { - if a > b { - return a - } - return b -} - -type fdSorter []storage.FileDesc - -func (p fdSorter) Len() int { - return len(p) -} - -func (p fdSorter) Less(i, j int) bool { - return p[i].Num < p[j].Num -} - -func (p fdSorter) Swap(i, j int) { - p[i], p[j] = p[j], p[i] -} - -func sortFds(fds []storage.FileDesc) { - sort.Sort(fdSorter(fds)) -} - -func ensureBuffer(b []byte, n int) []byte { - if cap(b) < n { - return make([]byte, n) - } - return b[:n] -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer.go deleted file mode 100644 index 21de2425..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer.go +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package util - -// This a copy of Go std bytes.Buffer with some modification -// and some features stripped. - -import ( - "bytes" - "io" -) - -// A Buffer is a variable-sized buffer of bytes with Read and Write methods. -// The zero value for Buffer is an empty buffer ready to use. -type Buffer struct { - buf []byte // contents are the bytes buf[off : len(buf)] - off int // read at &buf[off], write at &buf[len(buf)] - bootstrap [64]byte // memory to hold first slice; helps small buffers (Printf) avoid allocation. -} - -// Bytes returns a slice of the contents of the unread portion of the buffer; -// len(b.Bytes()) == b.Len(). If the caller changes the contents of the -// returned slice, the contents of the buffer will change provided there -// are no intervening method calls on the Buffer. -func (b *Buffer) Bytes() []byte { return b.buf[b.off:] } - -// String returns the contents of the unread portion of the buffer -// as a string. If the Buffer is a nil pointer, it returns "". -func (b *Buffer) String() string { - if b == nil { - // Special case, useful in debugging. - return "" - } - return string(b.buf[b.off:]) -} - -// Len returns the number of bytes of the unread portion of the buffer; -// b.Len() == len(b.Bytes()). -func (b *Buffer) Len() int { return len(b.buf) - b.off } - -// Truncate discards all but the first n unread bytes from the buffer. -// It panics if n is negative or greater than the length of the buffer. -func (b *Buffer) Truncate(n int) { - switch { - case n < 0 || n > b.Len(): - panic("leveldb/util.Buffer: truncation out of range") - case n == 0: - // Reuse buffer space. - b.off = 0 - } - b.buf = b.buf[0 : b.off+n] -} - -// Reset resets the buffer so it has no content. -// b.Reset() is the same as b.Truncate(0). -func (b *Buffer) Reset() { b.Truncate(0) } - -// grow grows the buffer to guarantee space for n more bytes. -// It returns the index where bytes should be written. -// If the buffer can't grow it will panic with bytes.ErrTooLarge. -func (b *Buffer) grow(n int) int { - m := b.Len() - // If buffer is empty, reset to recover space. - if m == 0 && b.off != 0 { - b.Truncate(0) - } - if len(b.buf)+n > cap(b.buf) { - var buf []byte - if b.buf == nil && n <= len(b.bootstrap) { - buf = b.bootstrap[0:] - } else if m+n <= cap(b.buf)/2 { - // We can slide things down instead of allocating a new - // slice. We only need m+n <= cap(b.buf) to slide, but - // we instead let capacity get twice as large so we - // don't spend all our time copying. - copy(b.buf[:], b.buf[b.off:]) - buf = b.buf[:m] - } else { - // not enough space anywhere - buf = makeSlice(2*cap(b.buf) + n) - copy(buf, b.buf[b.off:]) - } - b.buf = buf - b.off = 0 - } - b.buf = b.buf[0 : b.off+m+n] - return b.off + m -} - -// Alloc allocs n bytes of slice from the buffer, growing the buffer as -// needed. If n is negative, Alloc will panic. -// If the buffer can't grow it will panic with bytes.ErrTooLarge. -func (b *Buffer) Alloc(n int) []byte { - if n < 0 { - panic("leveldb/util.Buffer.Alloc: negative count") - } - m := b.grow(n) - return b.buf[m:] -} - -// Grow grows the buffer's capacity, if necessary, to guarantee space for -// another n bytes. After Grow(n), at least n bytes can be written to the -// buffer without another allocation. -// If n is negative, Grow will panic. -// If the buffer can't grow it will panic with bytes.ErrTooLarge. -func (b *Buffer) Grow(n int) { - if n < 0 { - panic("leveldb/util.Buffer.Grow: negative count") - } - m := b.grow(n) - b.buf = b.buf[0:m] -} - -// Write appends the contents of p to the buffer, growing the buffer as -// needed. The return value n is the length of p; err is always nil. If the -// buffer becomes too large, Write will panic with bytes.ErrTooLarge. -func (b *Buffer) Write(p []byte) (n int, err error) { - m := b.grow(len(p)) - return copy(b.buf[m:], p), nil -} - -// MinRead is the minimum slice size passed to a Read call by -// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond -// what is required to hold the contents of r, ReadFrom will not grow the -// underlying buffer. -const MinRead = 512 - -// ReadFrom reads data from r until EOF and appends it to the buffer, growing -// the buffer as needed. The return value n is the number of bytes read. Any -// error except io.EOF encountered during the read is also returned. If the -// buffer becomes too large, ReadFrom will panic with bytes.ErrTooLarge. -func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) { - // If buffer is empty, reset to recover space. - if b.off >= len(b.buf) { - b.Truncate(0) - } - for { - if free := cap(b.buf) - len(b.buf); free < MinRead { - // not enough space at end - newBuf := b.buf - if b.off+free < MinRead { - // not enough space using beginning of buffer; - // double buffer capacity - newBuf = makeSlice(2*cap(b.buf) + MinRead) - } - copy(newBuf, b.buf[b.off:]) - b.buf = newBuf[:len(b.buf)-b.off] - b.off = 0 - } - m, e := r.Read(b.buf[len(b.buf):cap(b.buf)]) - b.buf = b.buf[0 : len(b.buf)+m] - n += int64(m) - if e == io.EOF { - break - } - if e != nil { - return n, e - } - } - return n, nil // err is EOF, so return nil explicitly -} - -// makeSlice allocates a slice of size n. If the allocation fails, it panics -// with bytes.ErrTooLarge. -func makeSlice(n int) []byte { - // If the make fails, give a known error. - defer func() { - if recover() != nil { - panic(bytes.ErrTooLarge) - } - }() - return make([]byte, n) -} - -// WriteTo writes data to w until the buffer is drained or an error occurs. -// The return value n is the number of bytes written; it always fits into an -// int, but it is int64 to match the io.WriterTo interface. Any error -// encountered during the write is also returned. -func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) { - if b.off < len(b.buf) { - nBytes := b.Len() - m, e := w.Write(b.buf[b.off:]) - if m > nBytes { - panic("leveldb/util.Buffer.WriteTo: invalid Write count") - } - b.off += m - n = int64(m) - if e != nil { - return n, e - } - // all bytes should have been written, by definition of - // Write method in io.Writer - if m != nBytes { - return n, io.ErrShortWrite - } - } - // Buffer is now empty; reset. - b.Truncate(0) - return -} - -// WriteByte appends the byte c to the buffer, growing the buffer as needed. -// The returned error is always nil, but is included to match bufio.Writer's -// WriteByte. If the buffer becomes too large, WriteByte will panic with -// bytes.ErrTooLarge. -func (b *Buffer) WriteByte(c byte) error { - m := b.grow(1) - b.buf[m] = c - return nil -} - -// Read reads the next len(p) bytes from the buffer or until the buffer -// is drained. The return value n is the number of bytes read. If the -// buffer has no data to return, err is io.EOF (unless len(p) is zero); -// otherwise it is nil. -func (b *Buffer) Read(p []byte) (n int, err error) { - if b.off >= len(b.buf) { - // Buffer is empty, reset to recover space. - b.Truncate(0) - if len(p) == 0 { - return - } - return 0, io.EOF - } - n = copy(p, b.buf[b.off:]) - b.off += n - return -} - -// Next returns a slice containing the next n bytes from the buffer, -// advancing the buffer as if the bytes had been returned by Read. -// If there are fewer than n bytes in the buffer, Next returns the entire buffer. -// The slice is only valid until the next call to a read or write method. -func (b *Buffer) Next(n int) []byte { - m := b.Len() - if n > m { - n = m - } - data := b.buf[b.off : b.off+n] - b.off += n - return data -} - -// ReadByte reads and returns the next byte from the buffer. -// If no byte is available, it returns error io.EOF. -func (b *Buffer) ReadByte() (c byte, err error) { - if b.off >= len(b.buf) { - // Buffer is empty, reset to recover space. - b.Truncate(0) - return 0, io.EOF - } - c = b.buf[b.off] - b.off++ - return c, nil -} - -// ReadBytes reads until the first occurrence of delim in the input, -// returning a slice containing the data up to and including the delimiter. -// If ReadBytes encounters an error before finding a delimiter, -// it returns the data read before the error and the error itself (often io.EOF). -// ReadBytes returns err != nil if and only if the returned data does not end in -// delim. -func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) { - slice, err := b.readSlice(delim) - // return a copy of slice. The buffer's backing array may - // be overwritten by later calls. - line = append(line, slice...) - return -} - -// readSlice is like ReadBytes but returns a reference to internal buffer data. -func (b *Buffer) readSlice(delim byte) (line []byte, err error) { - i := bytes.IndexByte(b.buf[b.off:], delim) - end := b.off + i + 1 - if i < 0 { - end = len(b.buf) - err = io.EOF - } - line = b.buf[b.off:end] - b.off = end - return line, err -} - -// NewBuffer creates and initializes a new Buffer using buf as its initial -// contents. It is intended to prepare a Buffer to read existing data. It -// can also be used to size the internal buffer for writing. To do that, -// buf should have the desired capacity but a length of zero. -// -// In most cases, new(Buffer) (or just declaring a Buffer variable) is -// sufficient to initialize a Buffer. -func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} } diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go deleted file mode 100644 index 2f3db974..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package util - -import ( - "fmt" - "sync" - "sync/atomic" - "time" -) - -type buffer struct { - b []byte - miss int -} - -// BufferPool is a 'buffer pool'. -type BufferPool struct { - pool [6]chan []byte - size [5]uint32 - sizeMiss [5]uint32 - sizeHalf [5]uint32 - baseline [4]int - baseline0 int - - mu sync.RWMutex - closed bool - closeC chan struct{} - - get uint32 - put uint32 - half uint32 - less uint32 - equal uint32 - greater uint32 - miss uint32 -} - -func (p *BufferPool) poolNum(n int) int { - if n <= p.baseline0 && n > p.baseline0/2 { - return 0 - } - for i, x := range p.baseline { - if n <= x { - return i + 1 - } - } - return len(p.baseline) + 1 -} - -// Get returns buffer with length of n. -func (p *BufferPool) Get(n int) []byte { - if p == nil { - return make([]byte, n) - } - - p.mu.RLock() - defer p.mu.RUnlock() - - if p.closed { - return make([]byte, n) - } - - atomic.AddUint32(&p.get, 1) - - poolNum := p.poolNum(n) - pool := p.pool[poolNum] - if poolNum == 0 { - // Fast path. - select { - case b := <-pool: - switch { - case cap(b) > n: - if cap(b)-n >= n { - atomic.AddUint32(&p.half, 1) - select { - case pool <- b: - default: - } - return make([]byte, n) - } else { - atomic.AddUint32(&p.less, 1) - return b[:n] - } - case cap(b) == n: - atomic.AddUint32(&p.equal, 1) - return b[:n] - default: - atomic.AddUint32(&p.greater, 1) - } - default: - atomic.AddUint32(&p.miss, 1) - } - - return make([]byte, n, p.baseline0) - } else { - sizePtr := &p.size[poolNum-1] - - select { - case b := <-pool: - switch { - case cap(b) > n: - if cap(b)-n >= n { - atomic.AddUint32(&p.half, 1) - sizeHalfPtr := &p.sizeHalf[poolNum-1] - if atomic.AddUint32(sizeHalfPtr, 1) == 20 { - atomic.StoreUint32(sizePtr, uint32(cap(b)/2)) - atomic.StoreUint32(sizeHalfPtr, 0) - } else { - select { - case pool <- b: - default: - } - } - return make([]byte, n) - } else { - atomic.AddUint32(&p.less, 1) - return b[:n] - } - case cap(b) == n: - atomic.AddUint32(&p.equal, 1) - return b[:n] - default: - atomic.AddUint32(&p.greater, 1) - if uint32(cap(b)) >= atomic.LoadUint32(sizePtr) { - select { - case pool <- b: - default: - } - } - } - default: - atomic.AddUint32(&p.miss, 1) - } - - if size := atomic.LoadUint32(sizePtr); uint32(n) > size { - if size == 0 { - atomic.CompareAndSwapUint32(sizePtr, 0, uint32(n)) - } else { - sizeMissPtr := &p.sizeMiss[poolNum-1] - if atomic.AddUint32(sizeMissPtr, 1) == 20 { - atomic.StoreUint32(sizePtr, uint32(n)) - atomic.StoreUint32(sizeMissPtr, 0) - } - } - return make([]byte, n) - } else { - return make([]byte, n, size) - } - } -} - -// Put adds given buffer to the pool. -func (p *BufferPool) Put(b []byte) { - if p == nil { - return - } - - p.mu.RLock() - defer p.mu.RUnlock() - - if p.closed { - return - } - - atomic.AddUint32(&p.put, 1) - - pool := p.pool[p.poolNum(cap(b))] - select { - case pool <- b: - default: - } - -} - -func (p *BufferPool) Close() { - if p == nil { - return - } - - p.mu.Lock() - if !p.closed { - p.closed = true - p.closeC <- struct{}{} - } - p.mu.Unlock() -} - -func (p *BufferPool) String() string { - if p == nil { - return "" - } - - return fmt.Sprintf("BufferPool{B·%d Z·%v Zm·%v Zh·%v G·%d P·%d H·%d <·%d =·%d >·%d M·%d}", - p.baseline0, p.size, p.sizeMiss, p.sizeHalf, p.get, p.put, p.half, p.less, p.equal, p.greater, p.miss) -} - -func (p *BufferPool) drain() { - ticker := time.NewTicker(2 * time.Second) - defer ticker.Stop() - for { - select { - case <-ticker.C: - for _, ch := range p.pool { - select { - case <-ch: - default: - } - } - case <-p.closeC: - close(p.closeC) - for _, ch := range p.pool { - close(ch) - } - return - } - } -} - -// NewBufferPool creates a new initialized 'buffer pool'. -func NewBufferPool(baseline int) *BufferPool { - if baseline <= 0 { - panic("baseline can't be <= 0") - } - p := &BufferPool{ - baseline0: baseline, - baseline: [...]int{baseline / 4, baseline / 2, baseline * 2, baseline * 4}, - closeC: make(chan struct{}, 1), - } - for i, cap := range []int{2, 2, 4, 4, 2, 1} { - p.pool[i] = make(chan []byte, cap) - } - go p.drain() - return p -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/crc32.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/crc32.go deleted file mode 100644 index 631c9d61..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/util/crc32.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2011 The LevelDB-Go Authors. All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package util - -import ( - "hash/crc32" -) - -var table = crc32.MakeTable(crc32.Castagnoli) - -// CRC is a CRC-32 checksum computed using Castagnoli's polynomial. -type CRC uint32 - -// NewCRC creates a new crc based on the given bytes. -func NewCRC(b []byte) CRC { - return CRC(0).Update(b) -} - -// Update updates the crc with the given bytes. -func (c CRC) Update(b []byte) CRC { - return CRC(crc32.Update(uint32(c), table, b)) -} - -// Value returns a masked crc. -func (c CRC) Value() uint32 { - return uint32(c>>15|c<<17) + 0xa282ead8 -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/hash.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/hash.go deleted file mode 100644 index 7f3fa4e2..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/util/hash.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package util - -import ( - "encoding/binary" -) - -// Hash return hash of the given data. -func Hash(data []byte, seed uint32) uint32 { - // Similar to murmur hash - const ( - m = uint32(0xc6a4a793) - r = uint32(24) - ) - var ( - h = seed ^ (uint32(len(data)) * m) - i int - ) - - for n := len(data) - len(data)%4; i < n; i += 4 { - h += binary.LittleEndian.Uint32(data[i:]) - h *= m - h ^= (h >> 16) - } - - switch len(data) - i { - default: - panic("not reached") - case 3: - h += uint32(data[i+2]) << 16 - fallthrough - case 2: - h += uint32(data[i+1]) << 8 - fallthrough - case 1: - h += uint32(data[i]) - h *= m - h ^= (h >> r) - case 0: - } - - return h -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/range.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/range.go deleted file mode 100644 index 85159583..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/util/range.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package util - -// Range is a key range. -type Range struct { - // Start of the key range, include in the range. - Start []byte - - // Limit of the key range, not include in the range. - Limit []byte -} - -// BytesPrefix returns key range that satisfy the given prefix. -// This only applicable for the standard 'bytes comparer'. -func BytesPrefix(prefix []byte) *Range { - var limit []byte - for i := len(prefix) - 1; i >= 0; i-- { - c := prefix[i] - if c < 0xff { - limit = make([]byte, i+1) - copy(limit, prefix) - limit[i] = c + 1 - break - } - } - return &Range{prefix, limit} -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/util.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/util.go deleted file mode 100644 index 80614afc..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/util/util.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package util provides utilities used throughout leveldb. -package util - -import ( - "errors" -) - -var ( - ErrReleased = errors.New("leveldb: resource already relesed") - ErrHasReleaser = errors.New("leveldb: releaser already defined") -) - -// Releaser is the interface that wraps the basic Release method. -type Releaser interface { - // Release releases associated resources. Release should always success - // and can be called multiple times without causing error. - Release() -} - -// ReleaseSetter is the interface that wraps the basic SetReleaser method. -type ReleaseSetter interface { - // SetReleaser associates the given releaser to the resources. The - // releaser will be called once coresponding resources released. - // Calling SetReleaser with nil will clear the releaser. - // - // This will panic if a releaser already present or coresponding - // resource is already released. Releaser should be cleared first - // before assigned a new one. - SetReleaser(releaser Releaser) -} - -// BasicReleaser provides basic implementation of Releaser and ReleaseSetter. -type BasicReleaser struct { - releaser Releaser - released bool -} - -// Released returns whether Release method already called. -func (r *BasicReleaser) Released() bool { - return r.released -} - -// Release implements Releaser.Release. -func (r *BasicReleaser) Release() { - if !r.released { - if r.releaser != nil { - r.releaser.Release() - r.releaser = nil - } - r.released = true - } -} - -// SetReleaser implements ReleaseSetter.SetReleaser. -func (r *BasicReleaser) SetReleaser(releaser Releaser) { - if r.released { - panic(ErrReleased) - } - if r.releaser != nil && releaser != nil { - panic(ErrHasReleaser) - } - r.releaser = releaser -} - -type NoopReleaser struct{} - -func (NoopReleaser) Release() {} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/version.go b/vendor/github.com/syndtr/goleveldb/leveldb/version.go deleted file mode 100644 index 73f272af..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/version.go +++ /dev/null @@ -1,528 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "fmt" - "sync/atomic" - "unsafe" - - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/util" -) - -type tSet struct { - level int - table *tFile -} - -type version struct { - s *session - - levels []tFiles - - // Level that should be compacted next and its compaction score. - // Score < 1 means compaction is not strictly needed. These fields - // are initialized by computeCompaction() - cLevel int - cScore float64 - - cSeek unsafe.Pointer - - closing bool - ref int - released bool -} - -func newVersion(s *session) *version { - return &version{s: s} -} - -func (v *version) incref() { - if v.released { - panic("already released") - } - - v.ref++ - if v.ref == 1 { - // Incr file ref. - for _, tt := range v.levels { - for _, t := range tt { - v.s.addFileRef(t.fd, 1) - } - } - } -} - -func (v *version) releaseNB() { - v.ref-- - if v.ref > 0 { - return - } else if v.ref < 0 { - panic("negative version ref") - } - - for _, tt := range v.levels { - for _, t := range tt { - if v.s.addFileRef(t.fd, -1) == 0 { - v.s.tops.remove(t) - } - } - } - - v.released = true -} - -func (v *version) release() { - v.s.vmu.Lock() - v.releaseNB() - v.s.vmu.Unlock() -} - -func (v *version) walkOverlapping(aux tFiles, ikey internalKey, f func(level int, t *tFile) bool, lf func(level int) bool) { - ukey := ikey.ukey() - - // Aux level. - if aux != nil { - for _, t := range aux { - if t.overlaps(v.s.icmp, ukey, ukey) { - if !f(-1, t) { - return - } - } - } - - if lf != nil && !lf(-1) { - return - } - } - - // Walk tables level-by-level. - for level, tables := range v.levels { - if len(tables) == 0 { - continue - } - - if level == 0 { - // Level-0 files may overlap each other. Find all files that - // overlap ukey. - for _, t := range tables { - if t.overlaps(v.s.icmp, ukey, ukey) { - if !f(level, t) { - return - } - } - } - } else { - if i := tables.searchMax(v.s.icmp, ikey); i < len(tables) { - t := tables[i] - if v.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 { - if !f(level, t) { - return - } - } - } - } - - if lf != nil && !lf(level) { - return - } - } -} - -func (v *version) get(aux tFiles, ikey internalKey, ro *opt.ReadOptions, noValue bool) (value []byte, tcomp bool, err error) { - if v.closing { - return nil, false, ErrClosed - } - - ukey := ikey.ukey() - - var ( - tset *tSet - tseek bool - - // Level-0. - zfound bool - zseq uint64 - zkt keyType - zval []byte - ) - - err = ErrNotFound - - // Since entries never hop across level, finding key/value - // in smaller level make later levels irrelevant. - v.walkOverlapping(aux, ikey, func(level int, t *tFile) bool { - if level >= 0 && !tseek { - if tset == nil { - tset = &tSet{level, t} - } else { - tseek = true - } - } - - var ( - fikey, fval []byte - ferr error - ) - if noValue { - fikey, ferr = v.s.tops.findKey(t, ikey, ro) - } else { - fikey, fval, ferr = v.s.tops.find(t, ikey, ro) - } - - switch ferr { - case nil: - case ErrNotFound: - return true - default: - err = ferr - return false - } - - if fukey, fseq, fkt, fkerr := parseInternalKey(fikey); fkerr == nil { - if v.s.icmp.uCompare(ukey, fukey) == 0 { - // Level <= 0 may overlaps each-other. - if level <= 0 { - if fseq >= zseq { - zfound = true - zseq = fseq - zkt = fkt - zval = fval - } - } else { - switch fkt { - case keyTypeVal: - value = fval - err = nil - case keyTypeDel: - default: - panic("leveldb: invalid internalKey type") - } - return false - } - } - } else { - err = fkerr - return false - } - - return true - }, func(level int) bool { - if zfound { - switch zkt { - case keyTypeVal: - value = zval - err = nil - case keyTypeDel: - default: - panic("leveldb: invalid internalKey type") - } - return false - } - - return true - }) - - if tseek && tset.table.consumeSeek() <= 0 { - tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset)) - } - - return -} - -func (v *version) sampleSeek(ikey internalKey) (tcomp bool) { - var tset *tSet - - v.walkOverlapping(nil, ikey, func(level int, t *tFile) bool { - if tset == nil { - tset = &tSet{level, t} - return true - } - if tset.table.consumeSeek() <= 0 { - tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset)) - } - return false - }, nil) - - return -} - -func (v *version) getIterators(slice *util.Range, ro *opt.ReadOptions) (its []iterator.Iterator) { - strict := opt.GetStrict(v.s.o.Options, ro, opt.StrictReader) - for level, tables := range v.levels { - if level == 0 { - // Merge all level zero files together since they may overlap. - for _, t := range tables { - its = append(its, v.s.tops.newIterator(t, slice, ro)) - } - } else if len(tables) != 0 { - its = append(its, iterator.NewIndexedIterator(tables.newIndexIterator(v.s.tops, v.s.icmp, slice, ro), strict)) - } - } - return -} - -func (v *version) newStaging() *versionStaging { - return &versionStaging{base: v} -} - -// Spawn a new version based on this version. -func (v *version) spawn(r *sessionRecord) *version { - staging := v.newStaging() - staging.commit(r) - return staging.finish() -} - -func (v *version) fillRecord(r *sessionRecord) { - for level, tables := range v.levels { - for _, t := range tables { - r.addTableFile(level, t) - } - } -} - -func (v *version) tLen(level int) int { - if level < len(v.levels) { - return len(v.levels[level]) - } - return 0 -} - -func (v *version) offsetOf(ikey internalKey) (n int64, err error) { - for level, tables := range v.levels { - for _, t := range tables { - if v.s.icmp.Compare(t.imax, ikey) <= 0 { - // Entire file is before "ikey", so just add the file size - n += t.size - } else if v.s.icmp.Compare(t.imin, ikey) > 0 { - // Entire file is after "ikey", so ignore - if level > 0 { - // Files other than level 0 are sorted by meta->min, so - // no further files in this level will contain data for - // "ikey". - break - } - } else { - // "ikey" falls in the range for this table. Add the - // approximate offset of "ikey" within the table. - if m, err := v.s.tops.offsetOf(t, ikey); err == nil { - n += m - } else { - return 0, err - } - } - } - } - - return -} - -func (v *version) pickMemdbLevel(umin, umax []byte, maxLevel int) (level int) { - if maxLevel > 0 { - if len(v.levels) == 0 { - return maxLevel - } - if !v.levels[0].overlaps(v.s.icmp, umin, umax, true) { - var overlaps tFiles - for ; level < maxLevel; level++ { - if pLevel := level + 1; pLevel >= len(v.levels) { - return maxLevel - } else if v.levels[pLevel].overlaps(v.s.icmp, umin, umax, false) { - break - } - if gpLevel := level + 2; gpLevel < len(v.levels) { - overlaps = v.levels[gpLevel].getOverlaps(overlaps, v.s.icmp, umin, umax, false) - if overlaps.size() > int64(v.s.o.GetCompactionGPOverlaps(level)) { - break - } - } - } - } - } - return -} - -func (v *version) computeCompaction() { - // Precomputed best level for next compaction - bestLevel := int(-1) - bestScore := float64(-1) - - statFiles := make([]int, len(v.levels)) - statSizes := make([]string, len(v.levels)) - statScore := make([]string, len(v.levels)) - statTotSize := int64(0) - - for level, tables := range v.levels { - var score float64 - size := tables.size() - if level == 0 { - // We treat level-0 specially by bounding the number of files - // instead of number of bytes for two reasons: - // - // (1) With larger write-buffer sizes, it is nice not to do too - // many level-0 compaction. - // - // (2) The files in level-0 are merged on every read and - // therefore we wish to avoid too many files when the individual - // file size is small (perhaps because of a small write-buffer - // setting, or very high compression ratios, or lots of - // overwrites/deletions). - score = float64(len(tables)) / float64(v.s.o.GetCompactionL0Trigger()) - } else { - score = float64(size) / float64(v.s.o.GetCompactionTotalSize(level)) - } - - if score > bestScore { - bestLevel = level - bestScore = score - } - - statFiles[level] = len(tables) - statSizes[level] = shortenb(int(size)) - statScore[level] = fmt.Sprintf("%.2f", score) - statTotSize += size - } - - v.cLevel = bestLevel - v.cScore = bestScore - - v.s.logf("version@stat F·%v S·%s%v Sc·%v", statFiles, shortenb(int(statTotSize)), statSizes, statScore) -} - -func (v *version) needCompaction() bool { - return v.cScore >= 1 || atomic.LoadPointer(&v.cSeek) != nil -} - -type tablesScratch struct { - added map[int64]atRecord - deleted map[int64]struct{} -} - -type versionStaging struct { - base *version - levels []tablesScratch -} - -func (p *versionStaging) getScratch(level int) *tablesScratch { - if level >= len(p.levels) { - newLevels := make([]tablesScratch, level+1) - copy(newLevels, p.levels) - p.levels = newLevels - } - return &(p.levels[level]) -} - -func (p *versionStaging) commit(r *sessionRecord) { - // Deleted tables. - for _, r := range r.deletedTables { - scratch := p.getScratch(r.level) - if r.level < len(p.base.levels) && len(p.base.levels[r.level]) > 0 { - if scratch.deleted == nil { - scratch.deleted = make(map[int64]struct{}) - } - scratch.deleted[r.num] = struct{}{} - } - if scratch.added != nil { - delete(scratch.added, r.num) - } - } - - // New tables. - for _, r := range r.addedTables { - scratch := p.getScratch(r.level) - if scratch.added == nil { - scratch.added = make(map[int64]atRecord) - } - scratch.added[r.num] = r - if scratch.deleted != nil { - delete(scratch.deleted, r.num) - } - } -} - -func (p *versionStaging) finish() *version { - // Build new version. - nv := newVersion(p.base.s) - numLevel := len(p.levels) - if len(p.base.levels) > numLevel { - numLevel = len(p.base.levels) - } - nv.levels = make([]tFiles, numLevel) - for level := 0; level < numLevel; level++ { - var baseTabels tFiles - if level < len(p.base.levels) { - baseTabels = p.base.levels[level] - } - - if level < len(p.levels) { - scratch := p.levels[level] - - var nt tFiles - // Prealloc list if possible. - if n := len(baseTabels) + len(scratch.added) - len(scratch.deleted); n > 0 { - nt = make(tFiles, 0, n) - } - - // Base tables. - for _, t := range baseTabels { - if _, ok := scratch.deleted[t.fd.Num]; ok { - continue - } - if _, ok := scratch.added[t.fd.Num]; ok { - continue - } - nt = append(nt, t) - } - - // New tables. - for _, r := range scratch.added { - nt = append(nt, tableFileFromRecord(r)) - } - - if len(nt) != 0 { - // Sort tables. - if level == 0 { - nt.sortByNum() - } else { - nt.sortByKey(p.base.s.icmp) - } - - nv.levels[level] = nt - } - } else { - nv.levels[level] = baseTabels - } - } - - // Trim levels. - n := len(nv.levels) - for ; n > 0 && nv.levels[n-1] == nil; n-- { - } - nv.levels = nv.levels[:n] - - // Compute compaction score for new version. - nv.computeCompaction() - - return nv -} - -type versionReleaser struct { - v *version - once bool -} - -func (vr *versionReleaser) Release() { - v := vr.v - v.s.vmu.Lock() - if !vr.once { - v.releaseNB() - vr.once = true - } - v.s.vmu.Unlock() -} diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore deleted file mode 100644 index ae6a3bcf..00000000 --- a/vendor/go.opentelemetry.io/otel/.codespellignore +++ /dev/null @@ -1,5 +0,0 @@ -ot -fo -te -collison -consequentially diff --git a/vendor/go.opentelemetry.io/otel/.codespellrc b/vendor/go.opentelemetry.io/otel/.codespellrc deleted file mode 100644 index 4afbb1fb..00000000 --- a/vendor/go.opentelemetry.io/otel/.codespellrc +++ /dev/null @@ -1,10 +0,0 @@ -# https://github.com/codespell-project/codespell -[codespell] -builtin = clear,rare,informal -check-filenames = -check-hidden = -ignore-words = .codespellignore -interactive = 1 -skip = .git,go.mod,go.sum,semconv,venv,.tools -uri-ignore-words-list = * -write = diff --git a/vendor/go.opentelemetry.io/otel/.gitattributes b/vendor/go.opentelemetry.io/otel/.gitattributes deleted file mode 100644 index 314766e9..00000000 --- a/vendor/go.opentelemetry.io/otel/.gitattributes +++ /dev/null @@ -1,3 +0,0 @@ -* text=auto eol=lf -*.{cmd,[cC][mM][dD]} text eol=crlf -*.{bat,[bB][aA][tT]} text eol=crlf diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore deleted file mode 100644 index aa699376..00000000 --- a/vendor/go.opentelemetry.io/otel/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -.DS_Store -Thumbs.db - -.tools/ -venv/ -.idea/ -.vscode/ -*.iml -*.so -coverage.* -go.work -go.work.sum - -gen/ - -/example/fib/fib -/example/fib/traces.txt -/example/jaeger/jaeger -/example/namedtracer/namedtracer -/example/opencensus/opencensus -/example/passthrough/passthrough -/example/prometheus/prometheus -/example/zipkin/zipkin -/example/otel-collector/otel-collector diff --git a/vendor/go.opentelemetry.io/otel/.gitmodules b/vendor/go.opentelemetry.io/otel/.gitmodules deleted file mode 100644 index 38a1f569..00000000 --- a/vendor/go.opentelemetry.io/otel/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "opentelemetry-proto"] - path = exporters/otlp/internal/opentelemetry-proto - url = https://github.com/open-telemetry/opentelemetry-proto diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml deleted file mode 100644 index dbb6670b..00000000 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ /dev/null @@ -1,246 +0,0 @@ -# See https://github.com/golangci/golangci-lint#config-file -run: - issues-exit-code: 1 #Default - tests: true #Default - -linters: - # Disable everything by default so upgrades to not include new "default - # enabled" linters. - disable-all: true - # Specifically enable linters we want to use. - enable: - - depguard - - errcheck - - godot - - gofmt - - goimports - - gosimple - - govet - - ineffassign - - misspell - - revive - - staticcheck - - typecheck - - unused - -issues: - # Maximum issues count per one linter. - # Set to 0 to disable. - # Default: 50 - # Setting to unlimited so the linter only is run once to debug all issues. - max-issues-per-linter: 0 - # Maximum count of issues with the same text. - # Set to 0 to disable. - # Default: 3 - # Setting to unlimited so the linter only is run once to debug all issues. - max-same-issues: 0 - # Excluding configuration per-path, per-linter, per-text and per-source. - exclude-rules: - # TODO: Having appropriate comments for exported objects helps development, - # even for objects in internal packages. Appropriate comments for all - # exported objects should be added and this exclusion removed. - - path: '.*internal/.*' - text: "exported (method|function|type|const) (.+) should have comment or be unexported" - linters: - - revive - # Yes, they are, but it's okay in a test. - - path: _test\.go - text: "exported func.*returns unexported type.*which can be annoying to use" - linters: - - revive - # Example test functions should be treated like main. - - path: example.*_test\.go - text: "calls to (.+) only in main[(][)] or init[(][)] functions" - linters: - - revive - include: - # revive exported should have comment or be unexported. - - EXC0012 - # revive package comment should be of the form ... - - EXC0013 - -linters-settings: - depguard: - # Check the list against standard lib. - # Default: false - include-go-root: true - # A list of packages for the list type specified. - # Default: [] - packages: - - "crypto/md5" - - "crypto/sha1" - - "crypto/**/pkix" - ignore-file-rules: - - "**/*_test.go" - additional-guards: - # Do not allow testing packages in non-test files. - - list-type: denylist - include-go-root: true - packages: - - testing - - github.com/stretchr/testify - ignore-file-rules: - - "**/*_test.go" - - "**/*test/*.go" - - "**/internal/matchers/*.go" - godot: - exclude: - # Exclude links. - - '^ *\[[^]]+\]:' - # Exclude sentence fragments for lists. - - '^[ ]*[-•]' - # Exclude sentences prefixing a list. - - ':$' - goimports: - local-prefixes: go.opentelemetry.io - misspell: - locale: US - ignore-words: - - cancelled - revive: - # Sets the default failure confidence. - # This means that linting errors with less than 0.8 confidence will be ignored. - # Default: 0.8 - confidence: 0.01 - rules: - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#blank-imports - - name: blank-imports - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr - - name: bool-literal-in-expr - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#constant-logical-expr - - name: constant-logical-expr - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument - # TODO (#3372) re-enable linter when it is compatible. https://github.com/golangci/golangci-lint/issues/3280 - - name: context-as-argument - disabled: true - arguments: - allowTypesBefore: "*testing.T" - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type - - name: context-keys-type - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#deep-exit - - name: deep-exit - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#defer - - name: defer - disabled: false - arguments: - - ["call-chain", "loop"] - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#dot-imports - - name: dot-imports - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#duplicated-imports - - name: duplicated-imports - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#early-return - - name: early-return - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-block - - name: empty-block - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-lines - - name: empty-lines - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-naming - - name: error-naming - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-return - - name: error-return - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-strings - - name: error-strings - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#errorf - - name: errorf - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#exported - - name: exported - disabled: false - arguments: - - "sayRepetitiveInsteadOfStutters" - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#flag-parameter - - name: flag-parameter - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#identical-branches - - name: identical-branches - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return - - name: if-return - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#increment-decrement - - name: increment-decrement - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#indent-error-flow - - name: indent-error-flow - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-shadowing - - name: import-shadowing - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#package-comments - - name: package-comments - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range - - name: range - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-in-closure - - name: range-val-in-closure - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-address - - name: range-val-address - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#redefines-builtin-id - - name: redefines-builtin-id - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-format - - name: string-format - disabled: false - arguments: - - - panic - - '/^[^\n]*$/' - - must not contain line breaks - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#struct-tag - - name: struct-tag - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#superfluous-else - - name: superfluous-else - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#time-equal - - name: time-equal - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-naming - - name: var-naming - disabled: false - arguments: - - ["ID"] # AllowList - - ["Otel", "Aws", "Gcp"] # DenyList - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-declaration - - name: var-declaration - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unconditional-recursion - - name: unconditional-recursion - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-return - - name: unexported-return - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unhandled-error - - name: unhandled-error - disabled: false - arguments: - - "fmt.Fprint" - - "fmt.Fprintf" - - "fmt.Fprintln" - - "fmt.Print" - - "fmt.Printf" - - "fmt.Println" - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unnecessary-stmt - - name: unnecessary-stmt - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break - - name: useless-break - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value - - name: waitgroup-by-value - disabled: false diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore deleted file mode 100644 index 40d62fa2..00000000 --- a/vendor/go.opentelemetry.io/otel/.lycheeignore +++ /dev/null @@ -1,6 +0,0 @@ -http://localhost -http://jaeger-collector -https://github.com/open-telemetry/opentelemetry-go/milestone/ -https://github.com/open-telemetry/opentelemetry-go/projects -file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries -file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual diff --git a/vendor/go.opentelemetry.io/otel/.markdownlint.yaml b/vendor/go.opentelemetry.io/otel/.markdownlint.yaml deleted file mode 100644 index 3202496c..00000000 --- a/vendor/go.opentelemetry.io/otel/.markdownlint.yaml +++ /dev/null @@ -1,29 +0,0 @@ -# Default state for all rules -default: true - -# ul-style -MD004: false - -# hard-tabs -MD010: false - -# line-length -MD013: false - -# no-duplicate-header -MD024: - siblings_only: true - -#single-title -MD025: false - -# ol-prefix -MD029: - style: ordered - -# no-inline-html -MD033: false - -# fenced-code-language -MD040: false - diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md deleted file mode 100644 index d9f145f8..00000000 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ /dev/null @@ -1,2567 +0,0 @@ -# Changelog - -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). - -This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [Unreleased] - -## [1.16.0/0.39.0] 2023-05-18 - -This release contains the first stable release of the OpenTelemetry Go [metric API]. -Our project stability guarantees now apply to the `go.opentelemetry.io/otel/metric` package. -See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. - -### Added - -- The `go.opentelemetry.io/otel/semconv/v1.19.0` package. - The package contains semantic conventions from the `v1.19.0` version of the OpenTelemetry specification. (#3848) -- The `go.opentelemetry.io/otel/semconv/v1.20.0` package. - The package contains semantic conventions from the `v1.20.0` version of the OpenTelemetry specification. (#4078) - -### Changed - -- Use `strings.Cut()` instead of `string.SplitN()` for better readability and memory use. (#4049) - -### Removed - -- The deprecated `go.opentelemetry.io/otel/metric/instrument` package is removed. - Use `go.opentelemetry.io/otel/metric` instead. (#4055) - -### Fixed - -- Fix build for BSD based systems in `go.opentelemetry.io/otel/sdk/resource`. (#4077) - -## [1.16.0-rc.1/0.39.0-rc.1] 2023-05-03 - -This is a release candidate for the v1.16.0/v0.39.0 release. -That release is expected to include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. -See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. - -### Added - -- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#4039) - - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`. - - Use `GetMeterProivder` for a global `metric.MeterProvider`. - - Use `SetMeterProivder` to set the global `metric.MeterProvider`. - -### Changed - -- Move the `go.opentelemetry.io/otel/metric` module to the `stable-v1` module set. - This stages the metric API to be released as a stable module. (#4038) - -### Removed - -- The `go.opentelemetry.io/otel/metric/global` package is removed. - Use `go.opentelemetry.io/otel` instead. (#4039) - -## [1.15.1/0.38.1] 2023-05-02 - -### Fixed - -- Remove unused imports from `sdk/resource/host_id_bsd.go` which caused build failures. (#4040, #4041) - -## [1.15.0/0.38.0] 2023-04-27 - -### Added - -- The `go.opentelemetry.io/otel/metric/embedded` package. (#3916) -- The `Version` function to `go.opentelemetry.io/otel/sdk` to return the SDK version. (#3949) -- Add a `WithNamespace` option to `go.opentelemetry.io/otel/exporters/prometheus` to allow users to prefix metrics with a namespace. (#3970) -- The following configuration types were added to `go.opentelemetry.io/otel/metric/instrument` to be used in the configuration of measurement methods. (#3971) - - The `AddConfig` used to hold configuration for addition measurements - - `NewAddConfig` used to create a new `AddConfig` - - `AddOption` used to configure an `AddConfig` - - The `RecordConfig` used to hold configuration for recorded measurements - - `NewRecordConfig` used to create a new `RecordConfig` - - `RecordOption` used to configure a `RecordConfig` - - The `ObserveConfig` used to hold configuration for observed measurements - - `NewObserveConfig` used to create a new `ObserveConfig` - - `ObserveOption` used to configure an `ObserveConfig` -- `WithAttributeSet` and `WithAttributes` are added to `go.opentelemetry.io/otel/metric/instrument`. - They return an option used during a measurement that defines the attribute Set associated with the measurement. (#3971) -- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` to return the OTLP metrics client version. (#3956) -- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlptrace` to return the OTLP trace client version. (#3956) - -### Changed - -- The `Extrema` in `go.opentelemetry.io/otel/sdk/metric/metricdata` is redefined with a generic argument of `[N int64 | float64]`. (#3870) -- Update all exported interfaces from `go.opentelemetry.io/otel/metric` to embed their corresponding interface from `go.opentelemetry.io/otel/metric/embedded`. - This adds an implementation requirement to set the interface default behavior for unimplemented methods. (#3916) -- Move No-Op implementation from `go.opentelemetry.io/otel/metric` into its own package `go.opentelemetry.io/otel/metric/noop`. (#3941) - - `metric.NewNoopMeterProvider` is replaced with `noop.NewMeterProvider` -- Add all the methods from `"go.opentelemetry.io/otel/trace".SpanContext` to `bridgeSpanContext` by embedding `otel.SpanContext` in `bridgeSpanContext`. (#3966) -- Wrap `UploadMetrics` error in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/` to improve error message when encountering generic grpc errors. (#3974) -- The measurement methods for all instruments in `go.opentelemetry.io/otel/metric/instrument` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971) - - The `Int64Counter.Add` method now accepts `...AddOption` - - The `Float64Counter.Add` method now accepts `...AddOption` - - The `Int64UpDownCounter.Add` method now accepts `...AddOption` - - The `Float64UpDownCounter.Add` method now accepts `...AddOption` - - The `Int64Histogram.Record` method now accepts `...RecordOption` - - The `Float64Histogram.Record` method now accepts `...RecordOption` - - The `Int64Observer.Observe` method now accepts `...ObserveOption` - - The `Float64Observer.Observe` method now accepts `...ObserveOption` -- The `Observer` methods in `go.opentelemetry.io/otel/metric` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971) - - The `Observer.ObserveInt64` method now accepts `...ObserveOption` - - The `Observer.ObserveFloat64` method now accepts `...ObserveOption` -- Move global metric back to `go.opentelemetry.io/otel/metric/global` from `go.opentelemetry.io/otel`. (#3986) - -### Fixed - -- `TracerProvider` allows calling `Tracer()` while it's shutting down. - It used to deadlock. (#3924) -- Use the SDK version for the Telemetry SDK resource detector in `go.opentelemetry.io/otel/sdk/resource`. (#3949) -- Fix a data race in `SpanProcessor` returned by `NewSimpleSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace`. (#3951) -- Automatically figure out the default aggregation with `aggregation.Default`. (#3967) - -### Deprecated - -- The `go.opentelemetry.io/otel/metric/instrument` package is deprecated. - Use the equivalent types added to `go.opentelemetry.io/otel/metric` instead. (#4018) - -## [1.15.0-rc.2/0.38.0-rc.2] 2023-03-23 - -This is a release candidate for the v1.15.0/v0.38.0 release. -That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. -See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. - -### Added - -- The `WithHostID` option to `go.opentelemetry.io/otel/sdk/resource`. (#3812) -- The `WithoutTimestamps` option to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to sets all timestamps to zero. (#3828) -- The new `Exemplar` type is added to `go.opentelemetry.io/otel/sdk/metric/metricdata`. - Both the `DataPoint` and `HistogramDataPoint` types from that package have a new field of `Exemplars` containing the sampled exemplars for their timeseries. (#3849) -- Configuration for each metric instrument in `go.opentelemetry.io/otel/sdk/metric/instrument`. (#3895) -- The internal logging introduces a warning level verbosity equal to `V(1)`. (#3900) -- Added a log message warning about usage of `SimpleSpanProcessor` in production environments. (#3854) - -### Changed - -- Optimize memory allocation when creation a new `Set` using `NewSet` or `NewSetWithFiltered` in `go.opentelemetry.io/otel/attribute`. (#3832) -- Optimize memory allocation when creation new metric instruments in `go.opentelemetry.io/otel/sdk/metric`. (#3832) -- Avoid creating new objects on all calls to `WithDeferredSetup` and `SkipContextSetup` in OpenTracing bridge. (#3833) -- The `New` and `Detect` functions from `go.opentelemetry.io/otel/sdk/resource` return errors that wrap underlying errors instead of just containing the underlying error strings. (#3844) -- Both the `Histogram` and `HistogramDataPoint` are redefined with a generic argument of `[N int64 | float64]` in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#3849) -- The metric `Export` interface from `go.opentelemetry.io/otel/sdk/metric` accepts a `*ResourceMetrics` instead of `ResourceMetrics`. (#3853) -- Rename `Asynchronous` to `Observable` in `go.opentelemetry.io/otel/metric/instrument`. (#3892) -- Rename `Int64ObserverOption` to `Int64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895) -- Rename `Float64ObserverOption` to `Float64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895) -- The internal logging changes the verbosity level of info to `V(4)`, the verbosity level of debug to `V(8)`. (#3900) - -### Fixed - -- `TracerProvider` consistently doesn't allow to register a `SpanProcessor` after shutdown. (#3845) - -### Removed - -- The deprecated `go.opentelemetry.io/otel/metric/global` package is removed. (#3829) -- The unneeded `Synchronous` interface in `go.opentelemetry.io/otel/metric/instrument` was removed. (#3892) -- The `Float64ObserverConfig` and `NewFloat64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`. - Use the added `float64` instrument configuration instead. (#3895) -- The `Int64ObserverConfig` and `NewInt64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`. - Use the added `int64` instrument configuration instead. (#3895) -- The `NewNoopMeter` function in `go.opentelemetry.io/otel/metric`, use `NewMeterProvider().Meter("")` instead. (#3893) - -## [1.15.0-rc.1/0.38.0-rc.1] 2023-03-01 - -This is a release candidate for the v1.15.0/v0.38.0 release. -That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. -See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. - -This release drops the compatibility guarantee of [Go 1.18]. - -### Added - -- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#3818) - - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`. - - Use `GetMeterProivder` for a global `metric.MeterProvider`. - - Use `SetMeterProivder` to set the global `metric.MeterProvider`. - -### Changed - -- Dropped compatibility testing for [Go 1.18]. - The project no longer guarantees support for this version of Go. (#3813) - -### Fixed - -- Handle empty environment variable as it they were not set. (#3764) -- Clarify the `httpconv` and `netconv` packages in `go.opentelemetry.io/otel/semconv/*` provide tracing semantic conventions. (#3823) - -### Deprecated - -- The `go.opentelemetry.io/otel/metric/global` package is deprecated. - Use `go.opentelemetry.io/otel` instead. (#3818) - -### Removed - -- The deprecated `go.opentelemetry.io/otel/metric/unit` package is removed. (#3814) - -## [1.14.0/0.37.0/0.0.4] 2023-02-27 - -This release is the last to support [Go 1.18]. -The next release will require at least [Go 1.19]. - -### Added - -- The `event` type semantic conventions are added to `go.opentelemetry.io/otel/semconv/v1.17.0`. (#3697) -- Support [Go 1.20]. (#3693) -- The `go.opentelemetry.io/otel/semconv/v1.18.0` package. - The package contains semantic conventions from the `v1.18.0` version of the OpenTelemetry specification. (#3719) - - The following `const` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: - - `OtelScopeNameKey` -> `OTelScopeNameKey` - - `OtelScopeVersionKey` -> `OTelScopeVersionKey` - - `OtelLibraryNameKey` -> `OTelLibraryNameKey` - - `OtelLibraryVersionKey` -> `OTelLibraryVersionKey` - - `OtelStatusCodeKey` -> `OTelStatusCodeKey` - - `OtelStatusDescriptionKey` -> `OTelStatusDescriptionKey` - - `OtelStatusCodeOk` -> `OTelStatusCodeOk` - - `OtelStatusCodeError` -> `OTelStatusCodeError` - - The following `func` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: - - `OtelScopeName` -> `OTelScopeName` - - `OtelScopeVersion` -> `OTelScopeVersion` - - `OtelLibraryName` -> `OTelLibraryName` - - `OtelLibraryVersion` -> `OTelLibraryVersion` - - `OtelStatusDescription` -> `OTelStatusDescription` -- A `IsSampled` method is added to the `SpanContext` implementation in `go.opentelemetry.io/otel/bridge/opentracing` to expose the span sampled state. - See the [README](./bridge/opentracing/README.md) for more information. (#3570) -- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/metric`. (#3738) -- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/trace`. (#3739) -- The following environment variables are supported by the periodic `Reader` in `go.opentelemetry.io/otel/sdk/metric`. (#3763) - - `OTEL_METRIC_EXPORT_INTERVAL` sets the time between collections and exports. - - `OTEL_METRIC_EXPORT_TIMEOUT` sets the timeout an export is attempted. - -### Changed - -- Fall-back to `TextMapCarrier` when it's not `HttpHeader`s in `go.opentelemetry.io/otel/bridge/opentracing`. (#3679) -- The `Collect` method of the `"go.opentelemetry.io/otel/sdk/metric".Reader` interface is updated to accept the `metricdata.ResourceMetrics` value the collection will be made into. - This change is made to enable memory reuse by SDK users. (#3732) -- The `WithUnit` option in `go.opentelemetry.io/otel/sdk/metric/instrument` is updated to accept a `string` for the unit value. (#3776) - -### Fixed - -- Ensure `go.opentelemetry.io/otel` does not use generics. (#3723, #3725) -- Multi-reader `MeterProvider`s now export metrics for all readers, instead of just the first reader. (#3720, #3724) -- Remove use of deprecated `"math/rand".Seed` in `go.opentelemetry.io/otel/example/prometheus`. (#3733) -- Do not silently drop unknown schema data with `Parse` in `go.opentelemetry.io/otel/schema/v1.1`. (#3743) -- Data race issue in OTLP exporter retry mechanism. (#3755, #3756) -- Wrapping empty errors when exporting in `go.opentelemetry.io/otel/sdk/metric`. (#3698, #3772) -- Incorrect "all" and "resource" definition for schema files in `go.opentelemetry.io/otel/schema/v1.1`. (#3777) - -### Deprecated - -- The `go.opentelemetry.io/otel/metric/unit` package is deprecated. - Use the equivalent unit string instead. (#3776) - - Use `"1"` instead of `unit.Dimensionless` - - Use `"By"` instead of `unit.Bytes` - - Use `"ms"` instead of `unit.Milliseconds` - -## [1.13.0/0.36.0] 2023-02-07 - -### Added - -- Attribute `KeyValue` creations functions to `go.opentelemetry.io/otel/semconv/v1.17.0` for all non-enum semantic conventions. - These functions ensure semantic convention type correctness. (#3675) - -### Fixed - -- Removed the `http.target` attribute from being added by `ServerRequest` in the following packages. (#3687) - - `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv` - - `go.opentelemetry.io/otel/semconv/v1.14.0/httpconv` - - `go.opentelemetry.io/otel/semconv/v1.15.0/httpconv` - - `go.opentelemetry.io/otel/semconv/v1.16.0/httpconv` - - `go.opentelemetry.io/otel/semconv/v1.17.0/httpconv` - -### Removed - -- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is removed. (#3631) -- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is removed. (#3631) -- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is removed. (#3631) -- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncint64` package is removed. (#3631) - -## [1.12.0/0.35.0] 2023-01-28 - -### Added - -- The `WithInt64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. - This options is used to configure `int64` Observer callbacks during their creation. (#3507) -- The `WithFloat64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. - This options is used to configure `float64` Observer callbacks during their creation. (#3507) -- The `Producer` interface and `Reader.RegisterProducer(Producer)` to `go.opentelemetry.io/otel/sdk/metric`. - These additions are used to enable external metric Producers. (#3524) -- The `Callback` function type to `go.opentelemetry.io/otel/metric`. - This new named function type is registered with a `Meter`. (#3564) -- The `go.opentelemetry.io/otel/semconv/v1.13.0` package. - The package contains semantic conventions from the `v1.13.0` version of the OpenTelemetry specification. (#3499) - - The `EndUserAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientRequest` and `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. - - The `HTTPAttributesFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientResponse` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. - - The `HTTPClientAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. - - The `HTTPServerAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. - - The `HTTPServerMetricAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. - - The `NetAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `Transport` in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` and `ClientRequest` or `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. - - The `SpanStatusFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. - - The `SpanStatusFromHTTPStatusCodeAndSpanKind` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `ClientStatus` and `ServerStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. - - The `Client` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Conn`. - - The `Server` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Listener`. -- The `go.opentelemetry.io/otel/semconv/v1.14.0` package. - The package contains semantic conventions from the `v1.14.0` version of the OpenTelemetry specification. (#3566) -- The `go.opentelemetry.io/otel/semconv/v1.15.0` package. - The package contains semantic conventions from the `v1.15.0` version of the OpenTelemetry specification. (#3578) -- The `go.opentelemetry.io/otel/semconv/v1.16.0` package. - The package contains semantic conventions from the `v1.16.0` version of the OpenTelemetry specification. (#3579) -- Metric instruments to `go.opentelemetry.io/otel/metric/instrument`. - These instruments are use as replacements of the deprecated `go.opentelemetry.io/otel/metric/instrument/{asyncfloat64,asyncint64,syncfloat64,syncint64}` packages.(#3575, #3586) - - `Float64ObservableCounter` replaces the `asyncfloat64.Counter` - - `Float64ObservableUpDownCounter` replaces the `asyncfloat64.UpDownCounter` - - `Float64ObservableGauge` replaces the `asyncfloat64.Gauge` - - `Int64ObservableCounter` replaces the `asyncint64.Counter` - - `Int64ObservableUpDownCounter` replaces the `asyncint64.UpDownCounter` - - `Int64ObservableGauge` replaces the `asyncint64.Gauge` - - `Float64Counter` replaces the `syncfloat64.Counter` - - `Float64UpDownCounter` replaces the `syncfloat64.UpDownCounter` - - `Float64Histogram` replaces the `syncfloat64.Histogram` - - `Int64Counter` replaces the `syncint64.Counter` - - `Int64UpDownCounter` replaces the `syncint64.UpDownCounter` - - `Int64Histogram` replaces the `syncint64.Histogram` -- `NewTracerProvider` to `go.opentelemetry.io/otel/bridge/opentracing`. - This is used to create `WrapperTracer` instances from a `TracerProvider`. (#3116) -- The `Extrema` type to `go.opentelemetry.io/otel/sdk/metric/metricdata`. - This type is used to represent min/max values and still be able to distinguish unset and zero values. (#3487) -- The `go.opentelemetry.io/otel/semconv/v1.17.0` package. - The package contains semantic conventions from the `v1.17.0` version of the OpenTelemetry specification. (#3599) - -### Changed - -- Jaeger and Zipkin exporter use `github.com/go-logr/logr` as the logging interface, and add the `WithLogr` option. (#3497, #3500) -- Instrument configuration in `go.opentelemetry.io/otel/metric/instrument` is split into specific options and configuration based on the instrument type. (#3507) - - Use the added `Int64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncint64`. - - Use the added `Float64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncfloat64`. - - Use the added `Int64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncint64`. - - Use the added `Float64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncfloat64`. -- Return a `Registration` from the `RegisterCallback` method of a `Meter` in the `go.opentelemetry.io/otel/metric` package. - This `Registration` can be used to unregister callbacks. (#3522) -- Global error handler uses an atomic value instead of a mutex. (#3543) -- Add `NewMetricProducer` to `go.opentelemetry.io/otel/bridge/opencensus`, which can be used to pass OpenCensus metrics to an OpenTelemetry Reader. (#3541) -- Global logger uses an atomic value instead of a mutex. (#3545) -- The `Shutdown` method of the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` releases all computational resources when called the first time. (#3551) -- The `Sampler` returned from `TraceIDRatioBased` `go.opentelemetry.io/otel/sdk/trace` now uses the rightmost bits for sampling decisions. - This fixes random sampling when using ID generators like `xray.IDGenerator` and increasing parity with other language implementations. (#3557) -- Errors from `go.opentelemetry.io/otel/exporters/otlp/otlptrace` exporters are wrapped in errors identifying their signal name. - Existing users of the exporters attempting to identify specific errors will need to use `errors.Unwrap()` to get the underlying error. (#3516) -- Exporters from `go.opentelemetry.io/otel/exporters/otlp` will print the final retryable error message when attempts to retry time out. (#3514) -- The instrument kind names in `go.opentelemetry.io/otel/sdk/metric` are updated to match the API. (#3562) - - `InstrumentKindSyncCounter` is renamed to `InstrumentKindCounter` - - `InstrumentKindSyncUpDownCounter` is renamed to `InstrumentKindUpDownCounter` - - `InstrumentKindSyncHistogram` is renamed to `InstrumentKindHistogram` - - `InstrumentKindAsyncCounter` is renamed to `InstrumentKindObservableCounter` - - `InstrumentKindAsyncUpDownCounter` is renamed to `InstrumentKindObservableUpDownCounter` - - `InstrumentKindAsyncGauge` is renamed to `InstrumentKindObservableGauge` -- The `RegisterCallback` method of the `Meter` in `go.opentelemetry.io/otel/metric` changed. - - The named `Callback` replaces the inline function parameter. (#3564) - - `Callback` is required to return an error. (#3576) - - `Callback` accepts the added `Observer` parameter added. - This new parameter is used by `Callback` implementations to observe values for asynchronous instruments instead of calling the `Observe` method of the instrument directly. (#3584) - - The slice of `instrument.Asynchronous` is now passed as a variadic argument. (#3587) -- The exporter from `go.opentelemetry.io/otel/exporters/zipkin` is updated to use the `v1.16.0` version of semantic conventions. - This means it no longer uses the removed `net.peer.ip` or `http.host` attributes to determine the remote endpoint. - Instead it uses the `net.sock.peer` attributes. (#3581) -- The `Min` and `Max` fields of the `HistogramDataPoint` in `go.opentelemetry.io/otel/sdk/metric/metricdata` are now defined with the added `Extrema` type instead of a `*float64`. (#3487) - -### Fixed - -- Asynchronous instruments that use sum aggregators and attribute filters correctly add values from equivalent attribute sets that have been filtered. (#3439, #3549) -- The `RegisterCallback` method of the `Meter` from `go.opentelemetry.io/otel/sdk/metric` only registers a callback for instruments created by that meter. - Trying to register a callback with instruments from a different meter will result in an error being returned. (#3584) - -### Deprecated - -- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` is deprecated. - Use `NewMetricProducer` instead. (#3541) -- The `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is deprecated. - Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) -- The `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is deprecated. - Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) -- The `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is deprecated. - Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) -- The `go.opentelemetry.io/otel/metric/instrument/syncint64` package is deprecated. - Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) -- The `NewWrappedTracerProvider` in `go.opentelemetry.io/otel/bridge/opentracing` is now deprecated. - Use `NewTracerProvider` instead. (#3116) - -### Removed - -- The deprecated `go.opentelemetry.io/otel/sdk/metric/view` package is removed. (#3520) -- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncint64` is removed. - Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) - - The `Counter` method is replaced by `Meter.Int64ObservableCounter` - - The `UpDownCounter` method is replaced by `Meter.Int64ObservableUpDownCounter` - - The `Gauge` method is replaced by `Meter.Int64ObservableGauge` -- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncfloat64` is removed. - Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) - - The `Counter` method is replaced by `Meter.Float64ObservableCounter` - - The `UpDownCounter` method is replaced by `Meter.Float64ObservableUpDownCounter` - - The `Gauge` method is replaced by `Meter.Float64ObservableGauge` -- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncint64` is removed. - Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) - - The `Counter` method is replaced by `Meter.Int64Counter` - - The `UpDownCounter` method is replaced by `Meter.Int64UpDownCounter` - - The `Histogram` method is replaced by `Meter.Int64Histogram` -- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncfloat64` is removed. - Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) - - The `Counter` method is replaced by `Meter.Float64Counter` - - The `UpDownCounter` method is replaced by `Meter.Float64UpDownCounter` - - The `Histogram` method is replaced by `Meter.Float64Histogram` - -## [1.11.2/0.34.0] 2022-12-05 - -### Added - -- The `WithView` `Option` is added to the `go.opentelemetry.io/otel/sdk/metric` package. - This option is used to configure the view(s) a `MeterProvider` will use for all `Reader`s that are registered with it. (#3387) -- Add Instrumentation Scope and Version as info metric and label in Prometheus exporter. - This can be disabled using the `WithoutScopeInfo()` option added to that package.(#3273, #3357) -- OTLP exporters now recognize: (#3363) - - `OTEL_EXPORTER_OTLP_INSECURE` - - `OTEL_EXPORTER_OTLP_TRACES_INSECURE` - - `OTEL_EXPORTER_OTLP_METRICS_INSECURE` - - `OTEL_EXPORTER_OTLP_CLIENT_KEY` - - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY` - - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY` - - `OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE` - - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE` - - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE` -- The `View` type and related `NewView` function to create a view according to the OpenTelemetry specification are added to `go.opentelemetry.io/otel/sdk/metric`. - These additions are replacements for the `View` type and `New` function from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) -- The `Instrument` and `InstrumentKind` type are added to `go.opentelemetry.io/otel/sdk/metric`. - These additions are replacements for the `Instrument` and `InstrumentKind` types from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) -- The `Stream` type is added to `go.opentelemetry.io/otel/sdk/metric` to define a metric data stream a view will produce. (#3459) -- The `AssertHasAttributes` allows instrument authors to test that datapoints returned have appropriate attributes. (#3487) - -### Changed - -- The `"go.opentelemetry.io/otel/sdk/metric".WithReader` option no longer accepts views to associate with the `Reader`. - Instead, views are now registered directly with the `MeterProvider` via the new `WithView` option. - The views registered with the `MeterProvider` apply to all `Reader`s. (#3387) -- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/sdk/metric".Exporter` interface. (#3260) -- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric".Client` interface. (#3260) -- The `WithTemporalitySelector` and `WithAggregationSelector` `ReaderOption`s have been changed to `ManualReaderOption`s in the `go.opentelemetry.io/otel/sdk/metric` package. (#3260) -- The periodic reader in the `go.opentelemetry.io/otel/sdk/metric` package now uses the temporality and aggregation selectors from its configured exporter instead of accepting them as options. (#3260) - -### Fixed - -- The `go.opentelemetry.io/otel/exporters/prometheus` exporter fixes duplicated `_total` suffixes. (#3369) -- Remove comparable requirement for `Reader`s. (#3387) -- Cumulative metrics from the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) are defined as monotonic sums, instead of non-monotonic. (#3389) -- Asynchronous counters (`Counter` and `UpDownCounter`) from the metric SDK now produce delta sums when configured with delta temporality. (#3398) -- Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340) -- `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436) -- Re-enabled Attribute Filters in the Metric SDK. (#3396) -- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggragation. (#3408) -- Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432) -- Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440) -- Prevent duplicate Prometheus description, unit, and type. (#3469) -- Prevents panic when using incorrect `attribute.Value.As[Type]Slice()`. (#3489) - -### Removed - -- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.Client` interface is removed. (#3486) -- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.New` function is removed. Use the `otlpmetric[http|grpc].New` directly. (#3486) - -### Deprecated - -- The `go.opentelemetry.io/otel/sdk/metric/view` package is deprecated. - Use `Instrument`, `InstrumentKind`, `View`, and `NewView` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3476) - -## [1.11.1/0.33.0] 2022-10-19 - -### Added - -- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` registers with a Prometheus registerer on creation. - By default, it will register with the default Prometheus registerer. - A non-default registerer can be used by passing the `WithRegisterer` option. (#3239) -- Added the `WithAggregationSelector` option to the `go.opentelemetry.io/otel/exporters/prometheus` package to change the default `AggregationSelector` used. (#3341) -- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` converts the `Resource` associated with metric exports into a `target_info` metric. (#3285) - -### Changed - -- The `"go.opentelemetry.io/otel/exporters/prometheus".New` function is updated to return an error. - It will return an error if the exporter fails to register with Prometheus. (#3239) - -### Fixed - -- The URL-encoded values from the `OTEL_RESOURCE_ATTRIBUTES` environment variable are decoded. (#2963) -- The `baggage.NewMember` function decodes the `value` parameter instead of directly using it. - This fixes the implementation to be compliant with the W3C specification. (#3226) -- Slice attributes of the `attribute` package are now comparable based on their value, not instance. (#3108 #3252) -- The `Shutdown` and `ForceFlush` methods of the `"go.opentelemetry.io/otel/sdk/trace".TraceProvider` no longer return an error when no processor is registered. (#3268) -- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` cumulatively sums histogram buckets. (#3281) -- The sum of each histogram data point is now uniquely exported by the `go.opentelemetry.io/otel/exporters/otlpmetric` exporters. (#3284, #3293) -- Recorded values for asynchronous counters (`Counter` and `UpDownCounter`) are interpreted as exact, not incremental, sum values by the metric SDK. (#3350, #3278) -- `UpDownCounters` are now correctly output as Prometheus gauges in the `go.opentelemetry.io/otel/exporters/prometheus` exporter. (#3358) -- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` no longer describes the metrics it will send to Prometheus on startup. - Instead the exporter is defined as an "unchecked" collector for Prometheus. - This fixes the `reader is not registered` warning currently emitted on startup. (#3291 #3342) -- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now correctly adds `_total` suffixes to counter metrics. (#3360) -- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now adds a unit suffix to metric names. - This can be disabled using the `WithoutUnits()` option added to that package. (#3352) - -## [1.11.0/0.32.3] 2022-10-12 - -### Added - -- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlptrace/otlptracehttp`). (#3261) - -### Changed - -- `span.SetStatus` has been updated such that calls that lower the status are now no-ops. (#3214) -- Upgrade `golang.org/x/sys/unix` from `v0.0.0-20210423185535-09eb48e85fd7` to `v0.0.0-20220919091848-fb04ddd9f9c8`. - This addresses [GO-2022-0493](https://pkg.go.dev/vuln/GO-2022-0493). (#3235) - -## [0.32.2] Metric SDK (Alpha) - 2022-10-11 - -### Added - -- Added an example of using metric views to customize instruments. (#3177) -- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetrichttp`). (#3261) - -### Changed - -- Flush pending measurements with the `PeriodicReader` in the `go.opentelemetry.io/otel/sdk/metric` when `ForceFlush` or `Shutdown` are called. (#3220) -- Update histogram default bounds to match the requirements of the latest specification. (#3222) -- Encode the HTTP status code in the OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`) as an integer. (#3265) - -### Fixed - -- Use default view if instrument does not match any registered view of a reader. (#3224, #3237) -- Return the same instrument every time a user makes the exact same instrument creation call. (#3229, #3251) -- Return the existing instrument when a view transforms a creation call to match an existing instrument. (#3240, #3251) -- Log a warning when a conflicting instrument (e.g. description, unit, data-type) is created instead of returning an error. (#3251) -- The OpenCensus bridge no longer sends empty batches of metrics. (#3263) - -## [0.32.1] Metric SDK (Alpha) - 2022-09-22 - -### Changed - -- The Prometheus exporter sanitizes OpenTelemetry instrument names when exporting. - Invalid characters are replaced with `_`. (#3212) - -### Added - -- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been reintroduced. (#3192) -- The OpenCensus bridge example (`go.opentelemetry.io/otel/example/opencensus`) has been reintroduced. (#3206) - -### Fixed - -- Updated go.mods to point to valid versions of the sdk. (#3216) -- Set the `MeterProvider` resource on all exported metric data. (#3218) - -## [0.32.0] Revised Metric SDK (Alpha) - 2022-09-18 - -### Changed - -- The metric SDK in `go.opentelemetry.io/otel/sdk/metric` is completely refactored to comply with the OpenTelemetry specification. - Please see the package documentation for how the new SDK is initialized and configured. (#3175) -- Update the minimum supported go version to go1.18. Removes support for go1.17 (#3179) - -### Removed - -- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been removed. - A new bridge compliant with the revised metric SDK will be added back in a future release. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/aggregator/histogram` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/aggregator/sum` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/aggregator` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/controller/basic` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/controller/controllertest` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/controller/time` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/export/aggregation` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/export` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/metrictest` package is removed. - A replacement package that supports the new metric SDK will be added back in a future release. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/number` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/processor/basic` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/processor/processortest` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/processor/reducer` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/registry` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/sdkapi` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/selector/simple` package is removed, see the new metric SDK. (#3175) -- The `"go.opentelemetry.io/otel/sdk/metric".ErrUninitializedInstrument` variable was removed. (#3175) -- The `"go.opentelemetry.io/otel/sdk/metric".ErrBadInstrument` variable was removed. (#3175) -- The `"go.opentelemetry.io/otel/sdk/metric".Accumulator` type was removed, see the `MeterProvider`in the new metric SDK. (#3175) -- The `"go.opentelemetry.io/otel/sdk/metric".NewAccumulator` function was removed, see `NewMeterProvider`in the new metric SDK. (#3175) -- The deprecated `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets` function was removed. (#3175) - -## [1.10.0] - 2022-09-09 - -### Added - -- Support Go 1.19. (#3077) - Include compatibility testing and document support. (#3077) -- Support the OTLP ExportTracePartialSuccess response; these are passed to the registered error handler. (#3106) -- Upgrade go.opentelemetry.io/proto/otlp from v0.18.0 to v0.19.0 (#3107) - -### Changed - -- Fix misidentification of OpenTelemetry `SpanKind` in OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`). (#3096) -- Attempting to start a span with a nil `context` will no longer cause a panic. (#3110) -- All exporters will be shutdown even if one reports an error (#3091) -- Ensure valid UTF-8 when truncating over-length attribute values. (#3156) - -## [1.9.0/0.0.3] - 2022-08-01 - -### Added - -- Add support for Schema Files format 1.1.x (metric "split" transform) with the new `go.opentelemetry.io/otel/schema/v1.1` package. (#2999) -- Add the `go.opentelemetry.io/otel/semconv/v1.11.0` package. - The package contains semantic conventions from the `v1.11.0` version of the OpenTelemetry specification. (#3009) -- Add the `go.opentelemetry.io/otel/semconv/v1.12.0` package. - The package contains semantic conventions from the `v1.12.0` version of the OpenTelemetry specification. (#3010) -- Add the `http.method` attribute to HTTP server metric from all `go.opentelemetry.io/otel/semconv/*` packages. (#3018) - -### Fixed - -- Invalid warning for context setup being deferred in `go.opentelemetry.io/otel/bridge/opentracing` package. (#3029) - -## [1.8.0/0.31.0] - 2022-07-08 - -### Added - -- Add support for `opentracing.TextMap` format in the `Inject` and `Extract` methods -of the `"go.opentelemetry.io/otel/bridge/opentracing".BridgeTracer` type. (#2911) - -### Changed - -- The `crosslink` make target has been updated to use the `go.opentelemetry.io/build-tools/crosslink` package. (#2886) -- In the `go.opentelemetry.io/otel/sdk/instrumentation` package rename `Library` to `Scope` and alias `Library` as `Scope` (#2976) -- Move metric no-op implementation form `nonrecording` to `metric` package. (#2866) - -### Removed - -- Support for go1.16. Support is now only for go1.17 and go1.18 (#2917) - -### Deprecated - -- The `Library` struct in the `go.opentelemetry.io/otel/sdk/instrumentation` package is deprecated. - Use the equivalent `Scope` struct instead. (#2977) -- The `ReadOnlySpan.InstrumentationLibrary` method from the `go.opentelemetry.io/otel/sdk/trace` package is deprecated. - Use the equivalent `ReadOnlySpan.InstrumentationScope` method instead. (#2977) - -## [1.7.0/0.30.0] - 2022-04-28 - -### Added - -- Add the `go.opentelemetry.io/otel/semconv/v1.8.0` package. - The package contains semantic conventions from the `v1.8.0` version of the OpenTelemetry specification. (#2763) -- Add the `go.opentelemetry.io/otel/semconv/v1.9.0` package. - The package contains semantic conventions from the `v1.9.0` version of the OpenTelemetry specification. (#2792) -- Add the `go.opentelemetry.io/otel/semconv/v1.10.0` package. - The package contains semantic conventions from the `v1.10.0` version of the OpenTelemetry specification. (#2842) -- Added an in-memory exporter to metrictest to aid testing with a full SDK. (#2776) - -### Fixed - -- Globally delegated instruments are unwrapped before delegating asynchronous callbacks. (#2784) -- Remove import of `testing` package in non-tests builds of the `go.opentelemetry.io/otel` package. (#2786) - -### Changed - -- The `WithLabelEncoder` option from the `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` package is renamed to `WithAttributeEncoder`. (#2790) -- The `LabelFilterSelector` interface from `go.opentelemetry.io/otel/sdk/metric/processor/reducer` is renamed to `AttributeFilterSelector`. - The method included in the renamed interface also changed from `LabelFilterFor` to `AttributeFilterFor`. (#2790) -- The `Metadata.Labels` method from the `go.opentelemetry.io/otel/sdk/metric/export` package is renamed to `Metadata.Attributes`. - Consequentially, the `Record` type from the same package also has had the embedded method renamed. (#2790) - -### Deprecated - -- The `Iterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. - Use the equivalent `Iterator.Attribute` method instead. (#2790) -- The `Iterator.IndexedLabel` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. - Use the equivalent `Iterator.IndexedAttribute` method instead. (#2790) -- The `MergeIterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. - Use the equivalent `MergeIterator.Attribute` method instead. (#2790) - -### Removed - -- Removed the `Batch` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864) -- Removed the `Measurement` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864) - -## [0.29.0] - 2022-04-11 - -### Added - -- The metrics global package was added back into several test files. (#2764) -- The `Meter` function is added back to the `go.opentelemetry.io/otel/metric/global` package. - This function is a convenience function equivalent to calling `global.MeterProvider().Meter(...)`. (#2750) - -### Removed - -- Removed module the `go.opentelemetry.io/otel/sdk/export/metric`. - Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2720) - -### Changed - -- Don't panic anymore when setting a global MeterProvider to itself. (#2749) -- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` from `v0.12.1` to `v0.15.0`. - This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibraryMetrics` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeMetrics`. (#2748) - -## [1.6.3] - 2022-04-07 - -### Fixed - -- Allow non-comparable global `MeterProvider`, `TracerProvider`, and `TextMapPropagator` types to be set. (#2772, #2773) - -## [1.6.2] - 2022-04-06 - -### Changed - -- Don't panic anymore when setting a global TracerProvider or TextMapPropagator to itself. (#2749) -- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace` from `v0.12.1` to `v0.15.0`. - This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibrarySpans` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeSpans`. (#2748) - -## [1.6.1] - 2022-03-28 - -### Fixed - -- The `go.opentelemetry.io/otel/schema/*` packages now use the correct schema URL for their `SchemaURL` constant. - Instead of using `"https://opentelemetry.io/schemas/v"` they now use the correct URL without a `v` prefix, `"https://opentelemetry.io/schemas/"`. (#2743, #2744) - -### Security - -- Upgrade `go.opentelemetry.io/proto/otlp` from `v0.12.0` to `v0.12.1`. - This includes an indirect upgrade of `github.com/grpc-ecosystem/grpc-gateway` which resolves [a vulnerability](https://nvd.nist.gov/vuln/detail/CVE-2019-11254) from `gopkg.in/yaml.v2` in version `v2.2.3`. (#2724, #2728) - -## [1.6.0/0.28.0] - 2022-03-23 - -### ⚠️ Notice ⚠️ - -This update is a breaking change of the unstable Metrics API. -Code instrumented with the `go.opentelemetry.io/otel/metric` will need to be modified. - -### Added - -- Add metrics exponential histogram support. - New mapping functions have been made available in `sdk/metric/aggregator/exponential/mapping` for other OpenTelemetry projects to take dependencies on. (#2502) -- Add Go 1.18 to our compatibility tests. (#2679) -- Allow configuring the Sampler with the `OTEL_TRACES_SAMPLER` and `OTEL_TRACES_SAMPLER_ARG` environment variables. (#2305, #2517) -- Add the `metric/global` for obtaining and setting the global `MeterProvider`. (#2660) - -### Changed - -- The metrics API has been significantly changed to match the revised OpenTelemetry specification. - High-level changes include: - - - Synchronous and asynchronous instruments are now handled by independent `InstrumentProvider`s. - These `InstrumentProvider`s are managed with a `Meter`. - - Synchronous and asynchronous instruments are grouped into their own packages based on value types. - - Asynchronous callbacks can now be registered with a `Meter`. - - Be sure to check out the metric module documentation for more information on how to use the revised API. (#2587, #2660) - -### Fixed - -- Fallback to general attribute limits when span specific ones are not set in the environment. (#2675, #2677) - -## [1.5.0] - 2022-03-16 - -### Added - -- Log the Exporters configuration in the TracerProviders message. (#2578) -- Added support to configure the span limits with environment variables. - The following environment variables are supported. (#2606, #2637) - - `OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT` - - `OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT` - - `OTEL_SPAN_EVENT_COUNT_LIMIT` - - `OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT` - - `OTEL_SPAN_LINK_COUNT_LIMIT` - - `OTEL_LINK_ATTRIBUTE_COUNT_LIMIT` - - If the provided environment variables are invalid (negative), the default values would be used. -- Rename the `gc` runtime name to `go` (#2560) -- Add resource container ID detection. (#2418) -- Add span attribute value length limit. - The new `AttributeValueLengthLimit` field is added to the `"go.opentelemetry.io/otel/sdk/trace".SpanLimits` type to configure this limit for a `TracerProvider`. - The default limit for this resource is "unlimited". (#2637) -- Add the `WithRawSpanLimits` option to `go.opentelemetry.io/otel/sdk/trace`. - This option replaces the `WithSpanLimits` option. - Zero or negative values will not be changed to the default value like `WithSpanLimits` does. - Setting a limit to zero will effectively disable the related resource it limits and setting to a negative value will mean that resource is unlimited. - Consequentially, limits should be constructed using `NewSpanLimits` and updated accordingly. (#2637) - -### Changed - -- Drop oldest tracestate `Member` when capacity is reached. (#2592) -- Add event and link drop counts to the exported data from the `oltptrace` exporter. (#2601) -- Unify path cleaning functionally in the `otlpmetric` and `otlptrace` configuration. (#2639) -- Change the debug message from the `sdk/trace.BatchSpanProcessor` to reflect the count is cumulative. (#2640) -- Introduce new internal `envconfig` package for OTLP exporters. (#2608) -- If `http.Request.Host` is empty, fall back to use `URL.Host` when populating `http.host` in the `semconv` packages. (#2661) - -### Fixed - -- Remove the OTLP trace exporter limit of SpanEvents when exporting. (#2616) -- Default to port `4318` instead of `4317` for the `otlpmetrichttp` and `otlptracehttp` client. (#2614, #2625) -- Unlimited span limits are now supported (negative values). (#2636, #2637) - -### Deprecated - -- Deprecated `"go.opentelemetry.io/otel/sdk/trace".WithSpanLimits`. - Use `WithRawSpanLimits` instead. - That option allows setting unlimited and zero limits, this option does not. - This option will be kept until the next major version incremented release. (#2637) - -## [1.4.1] - 2022-02-16 - -### Fixed - -- Fix race condition in reading the dropped spans number for the `BatchSpanProcessor`. (#2615) - -## [1.4.0] - 2022-02-11 - -### Added - -- Use `OTEL_EXPORTER_ZIPKIN_ENDPOINT` environment variable to specify zipkin collector endpoint. (#2490) -- Log the configuration of `TracerProvider`s, and `Tracer`s for debugging. - To enable use a logger with Verbosity (V level) `>=1`. (#2500) -- Added support to configure the batch span-processor with environment variables. - The following environment variables are used. (#2515) - - `OTEL_BSP_SCHEDULE_DELAY` - - `OTEL_BSP_EXPORT_TIMEOUT` - - `OTEL_BSP_MAX_QUEUE_SIZE`. - - `OTEL_BSP_MAX_EXPORT_BATCH_SIZE` - -### Changed - -- Zipkin exporter exports `Resource` attributes in the `Tags` field. (#2589) - -### Deprecated - -- Deprecate module the `go.opentelemetry.io/otel/sdk/export/metric`. - Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2382) -- Deprecate `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets`. (#2445) - -### Fixed - -- Fixed the instrument kind for noop async instruments to correctly report an implementation. (#2461) -- Fix UDP packets overflowing with Jaeger payloads. (#2489, #2512) -- Change the `otlpmetric.Client` interface's `UploadMetrics` method to accept a single `ResourceMetrics` instead of a slice of them. (#2491) -- Specify explicit buckets in Prometheus example, fixing issue where example only has `+inf` bucket. (#2419, #2493) -- W3C baggage will now decode urlescaped values. (#2529) -- Baggage members are now only validated once, when calling `NewMember` and not also when adding it to the baggage itself. (#2522) -- The order attributes are dropped from spans in the `go.opentelemetry.io/otel/sdk/trace` package when capacity is reached is fixed to be in compliance with the OpenTelemetry specification. - Instead of dropping the least-recently-used attribute, the last added attribute is dropped. - This drop order still only applies to attributes with unique keys not already contained in the span. - If an attribute is added with a key already contained in the span, that attribute is updated to the new value being added. (#2576) - -### Removed - -- Updated `go.opentelemetry.io/proto/otlp` from `v0.11.0` to `v0.12.0`. This version removes a number of deprecated methods. (#2546) - - [`Metric.GetIntGauge()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntGauge) - - [`Metric.GetIntHistogram()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntHistogram) - - [`Metric.GetIntSum()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntSum) - -## [1.3.0] - 2021-12-10 - -### ⚠️ Notice ⚠️ - -We have updated the project minimum supported Go version to 1.16 - -### Added - -- Added an internal Logger. - This can be used by the SDK and API to provide users with feedback of the internal state. - To enable verbose logs configure the logger which will print V(1) logs. For debugging information configure to print V(5) logs. (#2343) -- Add the `WithRetry` `Option` and the `RetryConfig` type to the `go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp` package to specify retry behavior consistently. (#2425) -- Add `SpanStatusFromHTTPStatusCodeAndSpanKind` to all `semconv` packages to return a span status code similar to `SpanStatusFromHTTPStatusCode`, but exclude `4XX` HTTP errors as span errors if the span is of server kind. (#2296) - -### Changed - -- The `"go.opentelemetry.io/otel/exporter/otel/otlptrace/otlptracegrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2329) -- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2425) -- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".RetrySettings` type is renamed to `RetryConfig`. (#2425) -- The `go.opentelemetry.io/otel/exporter/otel/*` gRPC exporters now default to using the host's root CA set if none are provided by the user and `WithInsecure` is not specified. (#2432) -- Change `resource.Default` to be evaluated the first time it is called, rather than on import. This allows the caller the option to update `OTEL_RESOURCE_ATTRIBUTES` first, such as with `os.Setenv`. (#2371) - -### Fixed - -- The `go.opentelemetry.io/otel/exporter/otel/*` exporters are updated to handle per-signal and universal endpoints according to the OpenTelemetry specification. - Any per-signal endpoint set via an `OTEL_EXPORTER_OTLP__ENDPOINT` environment variable is now used without modification of the path. - When `OTEL_EXPORTER_OTLP_ENDPOINT` is set, if it contains a path, that path is used as a base path which per-signal paths are appended to. (#2433) -- Basic metric controller updated to use sync.Map to avoid blocking calls (#2381) -- The `go.opentelemetry.io/otel/exporter/jaeger` correctly sets the `otel.status_code` value to be a string of `ERROR` or `OK` instead of an integer code. (#2439, #2440) - -### Deprecated - -- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithMaxAttempts` `Option`, use the new `WithRetry` `Option` instead. (#2425) -- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithBackoff` `Option`, use the new `WithRetry` `Option` instead. (#2425) - -### Removed - -- Remove the metric Processor's ability to convert cumulative to delta aggregation temporality. (#2350) -- Remove the metric Bound Instruments interface and implementations. (#2399) -- Remove the metric MinMaxSumCount kind aggregation and the corresponding OTLP export path. (#2423) -- Metric SDK removes the "exact" aggregator for histogram instruments, as it performed a non-standard aggregation for OTLP export (creating repeated Gauge points) and worked its way into a number of confusing examples. (#2348) - -## [1.2.0] - 2021-11-12 - -### Changed - -- Metric SDK `export.ExportKind`, `export.ExportKindSelector` types have been renamed to `aggregation.Temporality` and `aggregation.TemporalitySelector` respectively to keep in line with current specification and protocol along with built-in selectors (e.g., `aggregation.CumulativeTemporalitySelector`, ...). (#2274) -- The Metric `Exporter` interface now requires a `TemporalitySelector` method instead of an `ExportKindSelector`. (#2274) -- Metrics API cleanup. The `metric/sdkapi` package has been created to relocate the API-to-SDK interface: - - The following interface types simply moved from `metric` to `metric/sdkapi`: `Descriptor`, `MeterImpl`, `InstrumentImpl`, `SyncImpl`, `BoundSyncImpl`, `AsyncImpl`, `AsyncRunner`, `AsyncSingleRunner`, and `AsyncBatchRunner` - - The following struct types moved and are replaced with type aliases, since they are exposed to the user: `Observation`, `Measurement`. - - The No-op implementations of sync and async instruments are no longer exported, new functions `sdkapi.NewNoopAsyncInstrument()` and `sdkapi.NewNoopSyncInstrument()` are provided instead. (#2271) -- Update the SDK `BatchSpanProcessor` to export all queued spans when `ForceFlush` is called. (#2080, #2335) - -### Added - -- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002) -- Added a new `schema` module to help parse Schema Files in OTEP 0152 format. (#2267) -- Added a new `MapCarrier` to the `go.opentelemetry.io/otel/propagation` package to hold propagated cross-cutting concerns as a `map[string]string` held in memory. (#2334) - -## [1.1.0] - 2021-10-27 - -### Added - -- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002) -- Add the `go.opentelemetry.io/otel/semconv/v1.7.0` package. - The package contains semantic conventions from the `v1.7.0` version of the OpenTelemetry specification. (#2320) -- Add the `go.opentelemetry.io/otel/semconv/v1.6.1` package. - The package contains semantic conventions from the `v1.6.1` version of the OpenTelemetry specification. (#2321) -- Add the `go.opentelemetry.io/otel/semconv/v1.5.0` package. - The package contains semantic conventions from the `v1.5.0` version of the OpenTelemetry specification. (#2322) - - When upgrading from the `semconv/v1.4.0` package note the following name changes: - - `K8SReplicasetUIDKey` -> `K8SReplicaSetUIDKey` - - `K8SReplicasetNameKey` -> `K8SReplicaSetNameKey` - - `K8SStatefulsetUIDKey` -> `K8SStatefulSetUIDKey` - - `k8SStatefulsetNameKey` -> `K8SStatefulSetNameKey` - - `K8SDaemonsetUIDKey` -> `K8SDaemonSetUIDKey` - - `K8SDaemonsetNameKey` -> `K8SDaemonSetNameKey` - -### Changed - -- Links added to a span will be dropped by the SDK if they contain an invalid span context (#2275). - -### Fixed - -- The `"go.opentelemetry.io/otel/semconv/v1.4.0".HTTPServerAttributesFromHTTPRequest` now correctly only sets the HTTP client IP attribute even if the connection was routed with proxies and there are multiple addresses in the `X-Forwarded-For` header. (#2282, #2284) -- The `"go.opentelemetry.io/otel/semconv/v1.4.0".NetAttributesFromHTTPRequest` function correctly handles IPv6 addresses as IP addresses and sets the correct net peer IP instead of the net peer hostname attribute. (#2283, #2285) -- The simple span processor shutdown method deterministically returns the exporter error status if it simultaneously finishes when the deadline is reached. (#2290, #2289) - -## [1.0.1] - 2021-10-01 - -### Fixed - -- json stdout exporter no longer crashes due to concurrency bug. (#2265) - -## [Metrics 0.24.0] - 2021-10-01 - -### Changed - -- NoopMeterProvider is now private and NewNoopMeterProvider must be used to obtain a noopMeterProvider. (#2237) -- The Metric SDK `Export()` function takes a new two-level reader interface for iterating over results one instrumentation library at a time. (#2197) - - The former `"go.opentelemetry.io/otel/sdk/export/metric".CheckpointSet` is renamed `Reader`. - - The new interface is named `"go.opentelemetry.io/otel/sdk/export/metric".InstrumentationLibraryReader`. - -## [1.0.0] - 2021-09-20 - -This is the first stable release for the project. -This release includes an API and SDK for the tracing signal that will comply with the stability guarantees defined by the projects [versioning policy](./VERSIONING.md). - -### Added - -- OTLP trace exporter now sets the `SchemaURL` field in the exported telemetry if the Tracer has `WithSchemaURL` option. (#2242) - -### Fixed - -- Slice-valued attributes can correctly be used as map keys. (#2223) - -### Removed - -- Removed the `"go.opentelemetry.io/otel/exporters/zipkin".WithSDKOptions` function. (#2248) -- Removed the deprecated package `go.opentelemetry.io/otel/oteltest`. (#2234) -- Removed the deprecated package `go.opentelemetry.io/otel/bridge/opencensus/utils`. (#2233) -- Removed deprecated functions, types, and methods from `go.opentelemetry.io/otel/attribute` package. - Use the typed functions and methods added to the package instead. (#2235) - - The `Key.Array` method is removed. - - The `Array` function is removed. - - The `Any` function is removed. - - The `ArrayValue` function is removed. - - The `AsArray` function is removed. - -## [1.0.0-RC3] - 2021-09-02 - -### Added - -- Added `ErrorHandlerFunc` to use a function as an `"go.opentelemetry.io/otel".ErrorHandler`. (#2149) -- Added `"go.opentelemetry.io/otel/trace".WithStackTrace` option to add a stack trace when using `span.RecordError` or when panic is handled in `span.End`. (#2163) -- Added typed slice attribute types and functionality to the `go.opentelemetry.io/otel/attribute` package to replace the existing array type and functions. (#2162) - - `BoolSlice`, `IntSlice`, `Int64Slice`, `Float64Slice`, and `StringSlice` replace the use of the `Array` function in the package. -- Added the `go.opentelemetry.io/otel/example/fib` example package. - Included is an example application that computes Fibonacci numbers. (#2203) - -### Changed - -- Metric instruments have been renamed to match the (feature-frozen) metric API specification: - - ValueRecorder becomes Histogram - - ValueObserver becomes Gauge - - SumObserver becomes CounterObserver - - UpDownSumObserver becomes UpDownCounterObserver - The API exported from this project is still considered experimental. (#2202) -- Metric SDK/API implementation type `InstrumentKind` moves into `sdkapi` sub-package. (#2091) -- The Metrics SDK export record no longer contains a Resource pointer, the SDK `"go.opentelemetry.io/otel/sdk/trace/export/metric".Exporter.Export()` function for push-based exporters now takes a single Resource argument, pull-based exporters use `"go.opentelemetry.io/otel/sdk/metric/controller/basic".Controller.Resource()`. (#2120) -- The JSON output of the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` is harmonized now such that the output is "plain" JSON objects after each other of the form `{ ... } { ... } { ... }`. Earlier the JSON objects describing a span were wrapped in a slice for each `Exporter.ExportSpans` call, like `[ { ... } ][ { ... } { ... } ]`. Outputting JSON object directly after each other is consistent with JSON loggers, and a bit easier to parse and read. (#2196) -- Update the `NewTracerConfig`, `NewSpanStartConfig`, `NewSpanEndConfig`, and `NewEventConfig` function in the `go.opentelemetry.io/otel/trace` package to return their respective configurations as structs instead of pointers to the struct. (#2212) - -### Deprecated - -- The `go.opentelemetry.io/otel/bridge/opencensus/utils` package is deprecated. - All functionality from this package now exists in the `go.opentelemetry.io/otel/bridge/opencensus` package. - The functions from that package should be used instead. (#2166) -- The `"go.opentelemetry.io/otel/attribute".Array` function and the related `ARRAY` value type is deprecated. - Use the typed `*Slice` functions and types added to the package instead. (#2162) -- The `"go.opentelemetry.io/otel/attribute".Any` function is deprecated. - Use the typed functions instead. (#2181) -- The `go.opentelemetry.io/otel/oteltest` package is deprecated. - The `"go.opentelemetry.io/otel/sdk/trace/tracetest".SpanRecorder` can be registered with the default SDK (`go.opentelemetry.io/otel/sdk/trace`) as a `SpanProcessor` and used as a replacement for this deprecated package. (#2188) - -### Removed - -- Removed metrics test package `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#2105) - -### Fixed - -- The `fromEnv` detector no longer throws an error when `OTEL_RESOURCE_ATTRIBUTES` environment variable is not set or empty. (#2138) -- Setting the global `ErrorHandler` with `"go.opentelemetry.io/otel".SetErrorHandler` multiple times is now supported. (#2160, #2140) -- The `"go.opentelemetry.io/otel/attribute".Any` function now supports `int32` values. (#2169) -- Multiple calls to `"go.opentelemetry.io/otel/sdk/metric/controller/basic".WithResource()` are handled correctly, and when no resources are provided `"go.opentelemetry.io/otel/sdk/resource".Default()` is used. (#2120) -- The `WithoutTimestamps` option for the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter causes the exporter to correctly omit timestamps. (#2195) -- Fixed typos in resources.go. (#2201) - -## [1.0.0-RC2] - 2021-07-26 - -### Added - -- Added `WithOSDescription` resource configuration option to set OS (Operating System) description resource attribute (`os.description`). (#1840) -- Added `WithOS` resource configuration option to set all OS (Operating System) resource attributes at once. (#1840) -- Added the `WithRetry` option to the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package. - This option is a replacement for the removed `WithMaxAttempts` and `WithBackoff` options. (#2095) -- Added API `LinkFromContext` to return Link which encapsulates SpanContext from provided context and also encapsulates attributes. (#2115) -- Added a new `Link` type under the SDK `otel/sdk/trace` package that counts the number of attributes that were dropped for surpassing the `AttributePerLinkCountLimit` configured in the Span's `SpanLimits`. - This new type replaces the equal-named API `Link` type found in the `otel/trace` package for most usages within the SDK. - For example, instances of this type are now returned by the `Links()` function of `ReadOnlySpan`s provided in places like the `OnEnd` function of `SpanProcessor` implementations. (#2118) -- Added the `SpanRecorder` type to the `go.opentelemetry.io/otel/skd/trace/tracetest` package. - This type can be used with the default SDK as a `SpanProcessor` during testing. (#2132) - -### Changed - -- The `SpanModels` function is now exported from the `go.opentelemetry.io/otel/exporters/zipkin` package to convert OpenTelemetry spans into Zipkin model spans. (#2027) -- Rename the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".RetrySettings` to `RetryConfig`. (#2095) - -### Deprecated - -- The `TextMapCarrier` and `TextMapPropagator` from the `go.opentelemetry.io/otel/oteltest` package and their associated creation functions (`TextMapCarrier`, `NewTextMapPropagator`) are deprecated. (#2114) -- The `Harness` type from the `go.opentelemetry.io/otel/oteltest` package and its associated creation function, `NewHarness` are deprecated and will be removed in the next release. (#2123) -- The `TraceStateFromKeyValues` function from the `go.opentelemetry.io/otel/oteltest` package is deprecated. - Use the `trace.ParseTraceState` function instead. (#2122) - -### Removed - -- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/jaeger`. (#2020) -- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/zipkin`. (#2020) -- Removed the `"go.opentelemetry.io/otel/sdk/resource".WithBuiltinDetectors` function. - The explicit `With*` options for every built-in detector should be used instead. (#2026 #2097) -- Removed the `WithMaxAttempts` and `WithBackoff` options from the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package. - The retry logic of the package has been updated to match the `otlptracegrpc` package and accordingly a `WithRetry` option is added that should be used instead. (#2095) -- Removed `DroppedAttributeCount` field from `otel/trace.Link` struct. (#2118) - -### Fixed - -- When using WithNewRoot, don't use the parent context for making sampling decisions. (#2032) -- `oteltest.Tracer` now creates a valid `SpanContext` when using `WithNewRoot`. (#2073) -- OS type detector now sets the correct `dragonflybsd` value for DragonFly BSD. (#2092) -- The OTel span status is correctly transformed into the OTLP status in the `go.opentelemetry.io/otel/exporters/otlp/otlptrace` package. - This fix will by default set the status to `Unset` if it is not explicitly set to `Ok` or `Error`. (#2099 #2102) -- The `Inject` method for the `"go.opentelemetry.io/otel/propagation".TraceContext` type no longer injects empty `tracestate` values. (#2108) -- Use `6831` as default Jaeger agent port instead of `6832`. (#2131) - -## [Experimental Metrics v0.22.0] - 2021-07-19 - -### Added - -- Adds HTTP support for OTLP metrics exporter. (#2022) - -### Removed - -- Removed the deprecated package `go.opentelemetry.io/otel/exporters/metric/prometheus`. (#2020) - -## [1.0.0-RC1] / 0.21.0 - 2021-06-18 - -With this release we are introducing a split in module versions. The tracing API and SDK are entering the `v1.0.0` Release Candidate phase with `v1.0.0-RC1` -while the experimental metrics API and SDK continue with `v0.x` releases at `v0.21.0`. Modules at major version 1 or greater will not depend on modules -with major version 0. - -### Added - -- Adds `otlpgrpc.WithRetry`option for configuring the retry policy for transient errors on the otlp/gRPC exporter. (#1832) - - The following status codes are defined as transient errors: - | gRPC Status Code | Description | - | ---------------- | ----------- | - | 1 | Cancelled | - | 4 | Deadline Exceeded | - | 8 | Resource Exhausted | - | 10 | Aborted | - | 10 | Out of Range | - | 14 | Unavailable | - | 15 | Data Loss | -- Added `Status` type to the `go.opentelemetry.io/otel/sdk/trace` package to represent the status of a span. (#1874) -- Added `SpanStub` type and its associated functions to the `go.opentelemetry.io/otel/sdk/trace/tracetest` package. - This type can be used as a testing replacement for the `SpanSnapshot` that was removed from the `go.opentelemetry.io/otel/sdk/trace` package. (#1873) -- Adds support for scheme in `OTEL_EXPORTER_OTLP_ENDPOINT` according to the spec. (#1886) -- Adds `trace.WithSchemaURL` option for configuring the tracer with a Schema URL. (#1889) -- Added an example of using OpenTelemetry Go as a trace context forwarder. (#1912) -- `ParseTraceState` is added to the `go.opentelemetry.io/otel/trace` package. - It can be used to decode a `TraceState` from a `tracestate` header string value. (#1937) -- Added `Len` method to the `TraceState` type in the `go.opentelemetry.io/otel/trace` package. - This method returns the number of list-members the `TraceState` holds. (#1937) -- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace` that defines a trace exporter that uses a `otlptrace.Client` to send data. - Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` implementing a gRPC `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing .(#1922) -- Added `Baggage`, `Member`, and `Property` types to the `go.opentelemetry.io/otel/baggage` package along with their related functions. (#1967) -- Added `ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext` functions to the `go.opentelemetry.io/otel/baggage` package. - These functions replace the `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions from that package and directly work with the new `Baggage` type. (#1967) -- The `OTEL_SERVICE_NAME` environment variable is the preferred source for `service.name`, used by the environment resource detector if a service name is present both there and in `OTEL_RESOURCE_ATTRIBUTES`. (#1969) -- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` implementing an HTTP `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing. (#1963) -- Changes `go.opentelemetry.io/otel/sdk/resource.NewWithAttributes` to require a schema URL. The old function is still available as `resource.NewSchemaless`. This is a breaking change. (#1938) -- Several builtin resource detectors now correctly populate the schema URL. (#1938) -- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` that defines a metrics exporter that uses a `otlpmetric.Client` to send data. -- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` implementing a gRPC `otlpmetric.Client` and offers convenience functions, `New` and `NewUnstarted`, to create an `otlpmetric.Exporter`.(#1991) -- Added `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter. (#2005) -- Added `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` exporter. (#2005) -- Added a `TracerProvider()` method to the `"go.opentelemetry.io/otel/trace".Span` interface. This can be used to obtain a `TracerProvider` from a given span that utilizes the same trace processing pipeline. (#2009) - -### Changed - -- Make `NewSplitDriver` from `go.opentelemetry.io/otel/exporters/otlp` take variadic arguments instead of a `SplitConfig` item. - `NewSplitDriver` now automatically implements an internal `noopDriver` for `SplitConfig` fields that are not initialized. (#1798) -- `resource.New()` now creates a Resource without builtin detectors. Previous behavior is now achieved by using `WithBuiltinDetectors` Option. (#1810) -- Move the `Event` type from the `go.opentelemetry.io/otel` package to the `go.opentelemetry.io/otel/sdk/trace` package. (#1846) -- CI builds validate against last two versions of Go, dropping 1.14 and adding 1.16. (#1865) -- BatchSpanProcessor now report export failures when calling `ForceFlush()` method. (#1860) -- `Set.Encoded(Encoder)` no longer caches the result of an encoding. (#1855) -- Renamed `CloudZoneKey` to `CloudAvailabilityZoneKey` in Resource semantic conventions according to spec. (#1871) -- The `StatusCode` and `StatusMessage` methods of the `ReadOnlySpan` interface and the `Span` produced by the `go.opentelemetry.io/otel/sdk/trace` package have been replaced with a single `Status` method. - This method returns the status of a span using the new `Status` type. (#1874) -- Updated `ExportSpans` method of the`SpanExporter` interface type to accept `ReadOnlySpan`s instead of the removed `SpanSnapshot`. - This brings the export interface into compliance with the specification in that it now accepts an explicitly immutable type instead of just an implied one. (#1873) -- Unembed `SpanContext` in `Link`. (#1877) -- Generate Semantic conventions from the specification YAML. (#1891) -- Spans created by the global `Tracer` obtained from `go.opentelemetry.io/otel`, prior to a functioning `TracerProvider` being set, now propagate the span context from their parent if one exists. (#1901) -- The `"go.opentelemetry.io/otel".Tracer` function now accepts tracer options. (#1902) -- Move the `go.opentelemetry.io/otel/unit` package to `go.opentelemetry.io/otel/metric/unit`. (#1903) -- Changed `go.opentelemetry.io/otel/trace.TracerConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config.) (#1921) -- Changed `go.opentelemetry.io/otel/trace.SpanConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) -- Changed `span.End()` now only accepts Options that are allowed at `End()`. (#1921) -- Changed `go.opentelemetry.io/otel/metric.InstrumentConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) -- Changed `go.opentelemetry.io/otel/metric.MeterConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) -- Refactored option types according to the contribution style guide. (#1882) -- Move the `go.opentelemetry.io/otel/trace.TraceStateFromKeyValues` function to the `go.opentelemetry.io/otel/oteltest` package. - This function is preserved for testing purposes where it may be useful to create a `TraceState` from `attribute.KeyValue`s, but it is not intended for production use. - The new `ParseTraceState` function should be used to create a `TraceState`. (#1931) -- Updated `MarshalJSON` method of the `go.opentelemetry.io/otel/trace.TraceState` type to marshal the type into the string representation of the `TraceState`. (#1931) -- The `TraceState.Delete` method from the `go.opentelemetry.io/otel/trace` package no longer returns an error in addition to a `TraceState`. (#1931) -- Updated `Get` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931) -- Updated `Insert` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a pair of `string`s instead of an `attribute.KeyValue` type. (#1931) -- Updated `Delete` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931) -- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/stdout` package. (#1985) -- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/metric/prometheus` package. (#1985) -- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1985) -- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1985) -- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985) -- Renamed `NewUnstartedExporter` to `NewUnstarted` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985) -- The `go.opentelemetry.io/otel/semconv` package has been moved to `go.opentelemetry.io/otel/semconv/v1.4.0` to allow for multiple [telemetry schema](https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md) versions to be used concurrently. (#1987) -- Metrics test helpers in `go.opentelemetry.io/otel/oteltest` have been moved to `go.opentelemetry.io/otel/metric/metrictest`. (#1988) - -### Deprecated - -- The `go.opentelemetry.io/otel/exporters/metric/prometheus` is deprecated, use `go.opentelemetry.io/otel/exporters/prometheus` instead. (#1993) -- The `go.opentelemetry.io/otel/exporters/trace/jaeger` is deprecated, use `go.opentelemetry.io/otel/exporters/jaeger` instead. (#1993) -- The `go.opentelemetry.io/otel/exporters/trace/zipkin` is deprecated, use `go.opentelemetry.io/otel/exporters/zipkin` instead. (#1993) - -### Removed - -- Removed `resource.WithoutBuiltin()`. Use `resource.New()`. (#1810) -- Unexported types `resource.FromEnv`, `resource.Host`, and `resource.TelemetrySDK`, Use the corresponding `With*()` to use individually. (#1810) -- Removed the `Tracer` and `IsRecording` method from the `ReadOnlySpan` in the `go.opentelemetry.io/otel/sdk/trace`. - The `Tracer` method is not a required to be included in this interface and given the mutable nature of the tracer that is associated with a span, this method is not appropriate. - The `IsRecording` method returns if the span is recording or not. - A read-only span value does not need to know if updates to it will be recorded or not. - By definition, it cannot be updated so there is no point in communicating if an update is recorded. (#1873) -- Removed the `SpanSnapshot` type from the `go.opentelemetry.io/otel/sdk/trace` package. - The use of this type has been replaced with the use of the explicitly immutable `ReadOnlySpan` type. - When a concrete representation of a read-only span is needed for testing, the newly added `SpanStub` in the `go.opentelemetry.io/otel/sdk/trace/tracetest` package should be used. (#1873) -- Removed the `Tracer` method from the `Span` interface in the `go.opentelemetry.io/otel/trace` package. - Using the same tracer that created a span introduces the error where an instrumentation library's `Tracer` is used by other code instead of their own. - The `"go.opentelemetry.io/otel".Tracer` function or a `TracerProvider` should be used to acquire a library specific `Tracer` instead. (#1900) - - The `TracerProvider()` method on the `Span` interface may also be used to obtain a `TracerProvider` using the same trace processing pipeline. (#2009) -- The `http.url` attribute generated by `HTTPClientAttributesFromHTTPRequest` will no longer include username or password information. (#1919) -- Removed `IsEmpty` method of the `TraceState` type in the `go.opentelemetry.io/otel/trace` package in favor of using the added `TraceState.Len` method. (#1931) -- Removed `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions in the `go.opentelemetry.io/otel/baggage` package. - Handling of baggage is now done using the added `Baggage` type and related context functions (`ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext`) in that package. (#1967) -- The `InstallNewPipeline` and `NewExportPipeline` creation functions in all the exporters (prometheus, otlp, stdout, jaeger, and zipkin) have been removed. - These functions were deemed premature attempts to provide convenience that did not achieve this aim. (#1985) -- The `go.opentelemetry.io/otel/exporters/otlp` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace` instead. (#1990) -- The `go.opentelemetry.io/otel/exporters/stdout` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` or `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` instead. (#2005) - -### Fixed - -- Only report errors from the `"go.opentelemetry.io/otel/sdk/resource".Environment` function when they are not `nil`. (#1850, #1851) -- The `Shutdown` method of the simple `SpanProcessor` in the `go.opentelemetry.io/otel/sdk/trace` package now honors the context deadline or cancellation. (#1616, #1856) -- BatchSpanProcessor now drops span batches that failed to be exported. (#1860) -- Use `http://localhost:14268/api/traces` as default Jaeger collector endpoint instead of `http://localhost:14250`. (#1898) -- Allow trailing and leading whitespace in the parsing of a `tracestate` header. (#1931) -- Add logic to determine if the channel is closed to fix Jaeger exporter test panic with close closed channel. (#1870, #1973) -- Avoid transport security when OTLP endpoint is a Unix socket. (#2001) - -### Security - -## [0.20.0] - 2021-04-23 - -### Added - -- The OTLP exporter now has two new convenience functions, `NewExportPipeline` and `InstallNewPipeline`, setup and install the exporter in tracing and metrics pipelines. (#1373) -- Adds semantic conventions for exceptions. (#1492) -- Added Jaeger Environment variables: `OTEL_EXPORTER_JAEGER_AGENT_HOST`, `OTEL_EXPORTER_JAEGER_AGENT_PORT` - These environment variables can be used to override Jaeger agent hostname and port (#1752) -- Option `ExportTimeout` was added to batch span processor. (#1755) -- `trace.TraceFlags` is now a defined type over `byte` and `WithSampled(bool) TraceFlags` and `IsSampled() bool` methods have been added to it. (#1770) -- The `Event` and `Link` struct types from the `go.opentelemetry.io/otel` package now include a `DroppedAttributeCount` field to record the number of attributes that were not recorded due to configured limits being reached. (#1771) -- The Jaeger exporter now reports dropped attributes for a Span event in the exported log. (#1771) -- Adds test to check BatchSpanProcessor ignores `OnEnd` and `ForceFlush` post `Shutdown`. (#1772) -- Extract resource attributes from the `OTEL_RESOURCE_ATTRIBUTES` environment variable and merge them with the `resource.Default` resource as well as resources provided to the `TracerProvider` and metric `Controller`. (#1785) -- Added `WithOSType` resource configuration option to set OS (Operating System) type resource attribute (`os.type`). (#1788) -- Added `WithProcess*` resource configuration options to set Process resource attributes. (#1788) - - `process.pid` - - `process.executable.name` - - `process.executable.path` - - `process.command_args` - - `process.owner` - - `process.runtime.name` - - `process.runtime.version` - - `process.runtime.description` -- Adds `k8s.node.name` and `k8s.node.uid` attribute keys to the `semconv` package. (#1789) -- Added support for configuring OTLP/HTTP and OTLP/gRPC Endpoints, TLS Certificates, Headers, Compression and Timeout via Environment Variables. (#1758, #1769 and #1811) - - `OTEL_EXPORTER_OTLP_ENDPOINT` - - `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` - - `OTEL_EXPORTER_OTLP_METRICS_ENDPOINT` - - `OTEL_EXPORTER_OTLP_HEADERS` - - `OTEL_EXPORTER_OTLP_TRACES_HEADERS` - - `OTEL_EXPORTER_OTLP_METRICS_HEADERS` - - `OTEL_EXPORTER_OTLP_COMPRESSION` - - `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION` - - `OTEL_EXPORTER_OTLP_METRICS_COMPRESSION` - - `OTEL_EXPORTER_OTLP_TIMEOUT` - - `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` - - `OTEL_EXPORTER_OTLP_METRICS_TIMEOUT` - - `OTEL_EXPORTER_OTLP_CERTIFICATE` - - `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` - - `OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE` -- Adds `otlpgrpc.WithTimeout` option for configuring timeout to the otlp/gRPC exporter. (#1821) -- Adds `jaeger.WithMaxPacketSize` option for configuring maximum UDP packet size used when connecting to the Jaeger agent. (#1853) - -### Fixed - -- The `Span.IsRecording` implementation from `go.opentelemetry.io/otel/sdk/trace` always returns false when not being sampled. (#1750) -- The Jaeger exporter now correctly sets tags for the Span status code and message. - This means it uses the correct tag keys (`"otel.status_code"`, `"otel.status_description"`) and does not set the status message as a tag unless it is set on the span. (#1761) -- The Jaeger exporter now correctly records Span event's names using the `"event"` key for a tag. - Additionally, this tag is overridden, as specified in the OTel specification, if the event contains an attribute with that key. (#1768) -- Zipkin Exporter: Ensure mapping between OTel and Zipkin span data complies with the specification. (#1688) -- Fixed typo for default service name in Jaeger Exporter. (#1797) -- Fix flaky OTLP for the reconnnection of the client connection. (#1527, #1814) -- Fix Jaeger exporter dropping of span batches that exceed the UDP packet size limit. - Instead, the exporter now splits the batch into smaller sendable batches. (#1828) - -### Changed - -- Span `RecordError` now records an `exception` event to comply with the semantic convention specification. (#1492) -- Jaeger exporter was updated to use thrift v0.14.1. (#1712) -- Migrate from using internally built and maintained version of the OTLP to the one hosted at `go.opentelemetry.io/proto/otlp`. (#1713) -- Migrate from using `github.com/gogo/protobuf` to `google.golang.org/protobuf` to match `go.opentelemetry.io/proto/otlp`. (#1713) -- The storage of a local or remote Span in a `context.Context` using its SpanContext is unified to store just the current Span. - The Span's SpanContext can now self-identify as being remote or not. - This means that `"go.opentelemetry.io/otel/trace".ContextWithRemoteSpanContext` will now overwrite any existing current Span, not just existing remote Spans, and make it the current Span in a `context.Context`. (#1731) -- Improve OTLP/gRPC exporter connection errors. (#1737) -- Information about a parent span context in a `"go.opentelemetry.io/otel/export/trace".SpanSnapshot` is unified in a new `Parent` field. - The existing `ParentSpanID` and `HasRemoteParent` fields are removed in favor of this. (#1748) -- The `ParentContext` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is updated to hold a `context.Context` containing the parent span. - This changes it to make `SamplingParameters` conform with the OpenTelemetry specification. (#1749) -- Updated Jaeger Environment Variables: `JAEGER_ENDPOINT`, `JAEGER_USER`, `JAEGER_PASSWORD` - to `OTEL_EXPORTER_JAEGER_ENDPOINT`, `OTEL_EXPORTER_JAEGER_USER`, `OTEL_EXPORTER_JAEGER_PASSWORD` in compliance with OTel specification. (#1752) -- Modify `BatchSpanProcessor.ForceFlush` to abort after timeout/cancellation. (#1757) -- The `DroppedAttributeCount` field of the `Span` in the `go.opentelemetry.io/otel` package now only represents the number of attributes dropped for the span itself. - It no longer is a conglomerate of itself, events, and link attributes that have been dropped. (#1771) -- Make `ExportSpans` in Jaeger Exporter honor context deadline. (#1773) -- Modify Zipkin Exporter default service name, use default resource's serviceName instead of empty. (#1777) -- The `go.opentelemetry.io/otel/sdk/export/trace` package is merged into the `go.opentelemetry.io/otel/sdk/trace` package. (#1778) -- The prometheus.InstallNewPipeline example is moved from comment to example test (#1796) -- The convenience functions for the stdout exporter have been updated to return the `TracerProvider` implementation and enable the shutdown of the exporter. (#1800) -- Replace the flush function returned from the Jaeger exporter's convenience creation functions (`InstallNewPipeline` and `NewExportPipeline`) with the `TracerProvider` implementation they create. - This enables the caller to shutdown and flush using the related `TracerProvider` methods. (#1822) -- Updated the Jaeger exporter to have a default endpoint, `http://localhost:14250`, for the collector. (#1824) -- Changed the function `WithCollectorEndpoint` in the Jaeger exporter to no longer accept an endpoint as an argument. - The endpoint can be passed with the `CollectorEndpointOption` using the `WithEndpoint` function or by setting the `OTEL_EXPORTER_JAEGER_ENDPOINT` environment variable value appropriately. (#1824) -- The Jaeger exporter no longer batches exported spans itself, instead it relies on the SDK's `BatchSpanProcessor` for this functionality. (#1830) -- The Jaeger exporter creation functions (`NewRawExporter`, `NewExportPipeline`, and `InstallNewPipeline`) no longer accept the removed `Option` type as a variadic argument. (#1830) - -### Removed - -- Removed Jaeger Environment variables: `JAEGER_SERVICE_NAME`, `JAEGER_DISABLED`, `JAEGER_TAGS` - These environment variables will no longer be used to override values of the Jaeger exporter (#1752) -- No longer set the links for a `Span` in `go.opentelemetry.io/otel/sdk/trace` that is configured to be a new root. - This is unspecified behavior that the OpenTelemetry community plans to standardize in the future. - To prevent backwards incompatible changes when it is specified, these links are removed. (#1726) -- Setting error status while recording error with Span from oteltest package. (#1729) -- The concept of a remote and local Span stored in a context is unified to just the current Span. - Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed. - Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContex` can be used to return the current Span. - If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731) -- The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed. - This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749) -- The `trace.FlagsDebug` and `trace.FlagsDeferred` constants have been removed and will be localized to the B3 propagator. (#1770) -- Remove `Process` configuration, `WithProcessFromEnv` and `ProcessFromEnv`, and type from the Jaeger exporter package. - The information that could be configured in the `Process` struct should be configured in a `Resource` instead. (#1776, #1804) -- Remove the `WithDisabled` option from the Jaeger exporter. - To disable the exporter unregister it from the `TracerProvider` or use a no-operation `TracerProvider`. (#1806) -- Removed the functions `CollectorEndpointFromEnv` and `WithCollectorEndpointOptionFromEnv` from the Jaeger exporter. - These functions for retrieving specific environment variable values are redundant of other internal functions and - are not intended for end user use. (#1824) -- Removed the Jaeger exporter `WithSDKOptions` `Option`. - This option was used to set SDK options for the exporter creation convenience functions. - These functions are provided as a way to easily setup or install the exporter with what are deemed reasonable SDK settings for common use cases. - If the SDK needs to be configured differently, the `NewRawExporter` function and direct setup of the SDK with the desired settings should be used. (#1825) -- The `WithBufferMaxCount` and `WithBatchMaxCount` `Option`s from the Jaeger exporter are removed. - The exporter no longer batches exports, instead relying on the SDK's `BatchSpanProcessor` for this functionality. (#1830) -- The Jaeger exporter `Option` type is removed. - The type is no longer used by the exporter to configure anything. - All the previous configurations these options provided were duplicates of SDK configuration. - They have been removed in favor of using the SDK configuration and focuses the exporter configuration to be only about the endpoints it will send telemetry to. (#1830) - -## [0.19.0] - 2021-03-18 - -### Added - -- Added `Marshaler` config option to `otlphttp` to enable otlp over json or protobufs. (#1586) -- A `ForceFlush` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` to flush all registered `SpanProcessor`s. (#1608) -- Added `WithSampler` and `WithSpanLimits` to tracer provider. (#1633, #1702) -- `"go.opentelemetry.io/otel/trace".SpanContext` now has a `remote` property, and `IsRemote()` predicate, that is true when the `SpanContext` has been extracted from remote context data. (#1701) -- A `Valid` method to the `"go.opentelemetry.io/otel/attribute".KeyValue` type. (#1703) - -### Changed - -- `trace.SpanContext` is now immutable and has no exported fields. (#1573) - - `trace.NewSpanContext()` can be used in conjunction with the `trace.SpanContextConfig` struct to initialize a new `SpanContext` where all values are known. -- Update the `ForceFlush` method signature to the `"go.opentelemetry.io/otel/sdk/trace".SpanProcessor` to accept a `context.Context` and return an error. (#1608) -- Update the `Shutdown` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` return an error on shutdown failure. (#1608) -- The SimpleSpanProcessor will now shut down the enclosed `SpanExporter` and gracefully ignore subsequent calls to `OnEnd` after `Shutdown` is called. (#1612) -- `"go.opentelemetry.io/sdk/metric/controller.basic".WithPusher` is replaced with `WithExporter` to provide consistent naming across project. (#1656) -- Added non-empty string check for trace `Attribute` keys. (#1659) -- Add `description` to SpanStatus only when `StatusCode` is set to error. (#1662) -- Jaeger exporter falls back to `resource.Default`'s `service.name` if the exported Span does not have one. (#1673) -- Jaeger exporter populates Jaeger's Span Process from Resource. (#1673) -- Renamed the `LabelSet` method of `"go.opentelemetry.io/otel/sdk/resource".Resource` to `Set`. (#1692) -- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1693) -- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1693) - -### Removed - -- Removed `serviceName` parameter from Zipkin exporter and uses resource instead. (#1549) -- Removed `WithConfig` from tracer provider to avoid overriding configuration. (#1633) -- Removed the exported `SimpleSpanProcessor` and `BatchSpanProcessor` structs. - These are now returned as a SpanProcessor interface from their respective constructors. (#1638) -- Removed `WithRecord()` from `trace.SpanOption` when creating a span. (#1660) -- Removed setting status to `Error` while recording an error as a span event in `RecordError`. (#1663) -- Removed `jaeger.WithProcess` configuration option. (#1673) -- Removed `ApplyConfig` method from `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` and the now unneeded `Config` struct. (#1693) - -### Fixed - -- Jaeger Exporter: Ensure mapping between OTEL and Jaeger span data complies with the specification. (#1626) -- `SamplingResult.TraceState` is correctly propagated to a newly created span's `SpanContext`. (#1655) -- The `otel-collector` example now correctly flushes metric events prior to shutting down the exporter. (#1678) -- Do not set span status message in `SpanStatusFromHTTPStatusCode` if it can be inferred from `http.status_code`. (#1681) -- Synchronization issues in global trace delegate implementation. (#1686) -- Reduced excess memory usage by global `TracerProvider`. (#1687) - -## [0.18.0] - 2021-03-03 - -### Added - -- Added `resource.Default()` for use with meter and tracer providers. (#1507) -- `AttributePerEventCountLimit` and `AttributePerLinkCountLimit` for `SpanLimits`. (#1535) -- Added `Keys()` method to `propagation.TextMapCarrier` and `propagation.HeaderCarrier` to adapt `http.Header` to this interface. (#1544) -- Added `code` attributes to `go.opentelemetry.io/otel/semconv` package. (#1558) -- Compatibility testing suite in the CI system for the following systems. (#1567) - | OS | Go Version | Architecture | - | ------- | ---------- | ------------ | - | Ubuntu | 1.15 | amd64 | - | Ubuntu | 1.14 | amd64 | - | Ubuntu | 1.15 | 386 | - | Ubuntu | 1.14 | 386 | - | MacOS | 1.15 | amd64 | - | MacOS | 1.14 | amd64 | - | Windows | 1.15 | amd64 | - | Windows | 1.14 | amd64 | - | Windows | 1.15 | 386 | - | Windows | 1.14 | 386 | - -### Changed - -- Replaced interface `oteltest.SpanRecorder` with its existing implementation - `StandardSpanRecorder`. (#1542) -- Default span limit values to 128. (#1535) -- Rename `MaxEventsPerSpan`, `MaxAttributesPerSpan` and `MaxLinksPerSpan` to `EventCountLimit`, `AttributeCountLimit` and `LinkCountLimit`, and move these fields into `SpanLimits`. (#1535) -- Renamed the `otel/label` package to `otel/attribute`. (#1541) -- Vendor the Jaeger exporter's dependency on Apache Thrift. (#1551) -- Parallelize the CI linting and testing. (#1567) -- Stagger timestamps in exact aggregator tests. (#1569) -- Changed all examples to use `WithBatchTimeout(5 * time.Second)` rather than `WithBatchTimeout(5)`. (#1621) -- Prevent end-users from implementing some interfaces (#1575) - - ``` - "otel/exporters/otlp/otlphttp".Option - "otel/exporters/stdout".Option - "otel/oteltest".Option - "otel/trace".TracerOption - "otel/trace".SpanOption - "otel/trace".EventOption - "otel/trace".LifeCycleOption - "otel/trace".InstrumentationOption - "otel/sdk/resource".Option - "otel/sdk/trace".ParentBasedSamplerOption - "otel/sdk/trace".ReadOnlySpan - "otel/sdk/trace".ReadWriteSpan - ``` - -### Removed - -- Removed attempt to resample spans upon changing the span name with `span.SetName()`. (#1545) -- The `test-benchmark` is no longer a dependency of the `precommit` make target. (#1567) -- Removed the `test-386` make target. - This was replaced with a full compatibility testing suite (i.e. multi OS/arch) in the CI system. (#1567) - -### Fixed - -- The sequential timing check of timestamps in the stdout exporter are now setup explicitly to be sequential (#1571). (#1572) -- Windows build of Jaeger tests now compiles with OS specific functions (#1576). (#1577) -- The sequential timing check of timestamps of go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue are now setup explicitly to be sequential (#1578). (#1579) -- Validate tracestate header keys with vendors according to the W3C TraceContext specification (#1475). (#1581) -- The OTLP exporter includes related labels for translations of a GaugeArray (#1563). (#1570) - -## [0.17.0] - 2021-02-12 - -### Changed - -- Rename project default branch from `master` to `main`. (#1505) -- Reverse order in which `Resource` attributes are merged, per change in spec. (#1501) -- Add tooling to maintain "replace" directives in go.mod files automatically. (#1528) -- Create new modules: otel/metric, otel/trace, otel/oteltest, otel/sdk/export/metric, otel/sdk/metric (#1528) -- Move metric-related public global APIs from otel to otel/metric/global. (#1528) - -## Fixed - -- Fixed otlpgrpc reconnection issue. -- The example code in the README.md of `go.opentelemetry.io/otel/exporters/otlp` is moved to a compiled example test and used the new `WithAddress` instead of `WithEndpoint`. (#1513) -- The otel-collector example now uses the default OTLP receiver port of the collector. - -## [0.16.0] - 2021-01-13 - -### Added - -- Add the `ReadOnlySpan` and `ReadWriteSpan` interfaces to provide better control for accessing span data. (#1360) -- `NewGRPCDriver` function returns a `ProtocolDriver` that maintains a single gRPC connection to the collector. (#1369) -- Added documentation about the project's versioning policy. (#1388) -- Added `NewSplitDriver` for OTLP exporter that allows sending traces and metrics to different endpoints. (#1418) -- Added codeql workflow to GitHub Actions (#1428) -- Added Gosec workflow to GitHub Actions (#1429) -- Add new HTTP driver for OTLP exporter in `exporters/otlp/otlphttp`. Currently it only supports the binary protobuf payloads. (#1420) -- Add an OpenCensus exporter bridge. (#1444) - -### Changed - -- Rename `internal/testing` to `internal/internaltest`. (#1449) -- Rename `export.SpanData` to `export.SpanSnapshot` and use it only for exporting spans. (#1360) -- Store the parent's full `SpanContext` rather than just its span ID in the `span` struct. (#1360) -- Improve span duration accuracy. (#1360) -- Migrated CI/CD from CircleCI to GitHub Actions (#1382) -- Remove duplicate checkout from GitHub Actions workflow (#1407) -- Metric `array` aggregator renamed `exact` to match its `aggregation.Kind` (#1412) -- Metric `exact` aggregator includes per-point timestamps (#1412) -- Metric stdout exporter uses MinMaxSumCount aggregator for ValueRecorder instruments (#1412) -- `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369) -- Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369) -- Unify endpoint API that related to OTel exporter. (#1401) -- Optimize metric histogram aggregator to re-use its slice of buckets. (#1435) -- Metric aggregator Count() and histogram Bucket.Counts are consistently `uint64`. (1430) -- Histogram aggregator accepts functional options, uses default boundaries if none given. (#1434) -- `SamplingResult` now passed a `Tracestate` from the parent `SpanContext` (#1432) -- Moved gRPC driver for OTLP exporter to `exporters/otlp/otlpgrpc`. (#1420) -- The `TraceContext` propagator now correctly propagates `TraceState` through the `SpanContext`. (#1447) -- Metric Push and Pull Controller components are combined into a single "basic" Controller: - - `WithExporter()` and `Start()` to configure Push behavior - - `Start()` is optional; use `Collect()` and `ForEach()` for Pull behavior - - `Start()` and `Stop()` accept Context. (#1378) -- The `Event` type is moved from the `otel/sdk/export/trace` package to the `otel/trace` API package. (#1452) - -### Removed - -- Remove `errUninitializedSpan` as its only usage is now obsolete. (#1360) -- Remove Metric export functionality related to quantiles and summary data points: this is not specified (#1412) -- Remove DDSketch metric aggregator; our intention is to re-introduce this as an option of the histogram aggregator after [new OTLP histogram data types](https://github.com/open-telemetry/opentelemetry-proto/pull/226) are released (#1412) - -### Fixed - -- `BatchSpanProcessor.Shutdown()` will now shutdown underlying `export.SpanExporter`. (#1443) - -## [0.15.0] - 2020-12-10 - -### Added - -- The `WithIDGenerator` `TracerProviderOption` is added to the `go.opentelemetry.io/otel/trace` package to configure an `IDGenerator` for the `TracerProvider`. (#1363) - -### Changed - -- The Zipkin exporter now uses the Span status code to determine. (#1328) -- `NewExporter` and `Start` functions in `go.opentelemetry.io/otel/exporters/otlp` now receive `context.Context` as a first parameter. (#1357) -- Move the OpenCensus example into `example` directory. (#1359) -- Moved the SDK's `internal.IDGenerator` interface in to the `sdk/trace` package to enable support for externally-defined ID generators. (#1363) -- Bump `github.com/google/go-cmp` from 0.5.3 to 0.5.4 (#1374) -- Bump `github.com/golangci/golangci-lint` in `/internal/tools` (#1375) - -### Fixed - -- Metric SDK `SumObserver` and `UpDownSumObserver` instruments correctness fixes. (#1381) - -## [0.14.0] - 2020-11-19 - -### Added - -- An `EventOption` and the related `NewEventConfig` function are added to the `go.opentelemetry.io/otel` package to configure Span events. (#1254) -- A `TextMapPropagator` and associated `TextMapCarrier` are added to the `go.opentelemetry.io/otel/oteltest` package to test `TextMap` type propagators and their use. (#1259) -- `SpanContextFromContext` returns `SpanContext` from context. (#1255) -- `TraceState` has been added to `SpanContext`. (#1340) -- `DeploymentEnvironmentKey` added to `go.opentelemetry.io/otel/semconv` package. (#1323) -- Add an OpenCensus to OpenTelemetry tracing bridge. (#1305) -- Add a parent context argument to `SpanProcessor.OnStart` to follow the specification. (#1333) -- Add missing tests for `sdk/trace/attributes_map.go`. (#1337) - -### Changed - -- Move the `go.opentelemetry.io/otel/api/trace` package into `go.opentelemetry.io/otel/trace` with the following changes. (#1229) (#1307) - - `ID` has been renamed to `TraceID`. - - `IDFromHex` has been renamed to `TraceIDFromHex`. - - `EmptySpanContext` is removed. -- Move the `go.opentelemetry.io/otel/api/trace/tracetest` package into `go.opentelemetry.io/otel/oteltest`. (#1229) -- OTLP Exporter updates: - - supports OTLP v0.6.0 (#1230, #1354) - - supports configurable aggregation temporality (default: Cumulative, optional: Stateless). (#1296) -- The Sampler is now called on local child spans. (#1233) -- The `Kind` type from the `go.opentelemetry.io/otel/api/metric` package was renamed to `InstrumentKind` to more specifically describe what it is and avoid semantic ambiguity. (#1240) -- The `MetricKind` method of the `Descriptor` type in the `go.opentelemetry.io/otel/api/metric` package was renamed to `Descriptor.InstrumentKind`. - This matches the returned type and fixes misuse of the term metric. (#1240) -- Move test harness from the `go.opentelemetry.io/otel/api/apitest` package into `go.opentelemetry.io/otel/oteltest`. (#1241) -- Move the `go.opentelemetry.io/otel/api/metric/metrictest` package into `go.opentelemetry.io/oteltest` as part of #964. (#1252) -- Move the `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric` as part of #1303. (#1321) -- Move the `go.opentelemetry.io/otel/api/metric/registry` package into `go.opentelemetry.io/otel/metric/registry` as a part of #1303. (#1316) -- Move the `Number` type (together with related functions) from `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric/number` as a part of #1303. (#1316) -- The function signature of the Span `AddEvent` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required name and a variable number of `EventOption`s. (#1254) -- The function signature of the Span `RecordError` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required error value and a variable number of `EventOption`s. (#1254) -- Move the `go.opentelemetry.io/otel/api/global` package to `go.opentelemetry.io/otel`. (#1262) (#1330) -- Move the `Version` function from `go.opentelemetry.io/otel/sdk` to `go.opentelemetry.io/otel`. (#1330) -- Rename correlation context header from `"otcorrelations"` to `"baggage"` to match the OpenTelemetry specification. (#1267) -- Fix `Code.UnmarshalJSON` to work with valid JSON only. (#1276) -- The `resource.New()` method changes signature to support builtin attributes and functional options, including `telemetry.sdk.*` and - `host.name` semantic conventions; the former method is renamed `resource.NewWithAttributes`. (#1235) -- The Prometheus exporter now exports non-monotonic counters (i.e. `UpDownCounter`s) as gauges. (#1210) -- Correct the `Span.End` method documentation in the `otel` API to state updates are not allowed on a span after it has ended. (#1310) -- Updated span collection limits for attribute, event and link counts to 1000 (#1318) -- Renamed `semconv.HTTPUrlKey` to `semconv.HTTPURLKey`. (#1338) - -### Removed - -- The `ErrInvalidHexID`, `ErrInvalidTraceIDLength`, `ErrInvalidSpanIDLength`, `ErrInvalidSpanIDLength`, or `ErrNilSpanID` from the `go.opentelemetry.io/otel` package are unexported now. (#1243) -- The `AddEventWithTimestamp` method on the `Span` interface in `go.opentelemetry.io/otel` is removed due to its redundancy. - It is replaced by using the `AddEvent` method with a `WithTimestamp` option. (#1254) -- The `MockSpan` and `MockTracer` types are removed from `go.opentelemetry.io/otel/oteltest`. - `Tracer` and `Span` from the same module should be used in their place instead. (#1306) -- `WorkerCount` option is removed from `go.opentelemetry.io/otel/exporters/otlp`. (#1350) -- Remove the following labels types: INT32, UINT32, UINT64 and FLOAT32. (#1314) - -### Fixed - -- Rename `MergeItererator` to `MergeIterator` in the `go.opentelemetry.io/otel/label` package. (#1244) -- The `go.opentelemetry.io/otel/api/global` packages global TextMapPropagator now delegates functionality to a globally set delegate for all previously returned propagators. (#1258) -- Fix condition in `label.Any`. (#1299) -- Fix global `TracerProvider` to pass options to its configured provider. (#1329) -- Fix missing handler for `ExactKind` aggregator in OTLP metrics transformer (#1309) - -## [0.13.0] - 2020-10-08 - -### Added - -- OTLP Metric exporter supports Histogram aggregation. (#1209) -- The `Code` struct from the `go.opentelemetry.io/otel/codes` package now supports JSON marshaling and unmarshaling as well as implements the `Stringer` interface. (#1214) -- A Baggage API to implement the OpenTelemetry specification. (#1217) -- Add Shutdown method to sdk/trace/provider, shutdown processors in the order they were registered. (#1227) - -### Changed - -- Set default propagator to no-op propagator. (#1184) -- The `HTTPSupplier`, `HTTPExtractor`, `HTTPInjector`, and `HTTPPropagator` from the `go.opentelemetry.io/otel/api/propagation` package were replaced with unified `TextMapCarrier` and `TextMapPropagator` in the `go.opentelemetry.io/otel/propagation` package. (#1212) (#1325) -- The `New` function from the `go.opentelemetry.io/otel/api/propagation` package was replaced with `NewCompositeTextMapPropagator` in the `go.opentelemetry.io/otel` package. (#1212) -- The status codes of the `go.opentelemetry.io/otel/codes` package have been updated to match the latest OpenTelemetry specification. - They now are `Unset`, `Error`, and `Ok`. - They no longer track the gRPC codes. (#1214) -- The `StatusCode` field of the `SpanData` struct in the `go.opentelemetry.io/otel/sdk/export/trace` package now uses the codes package from this package instead of the gRPC project. (#1214) -- Move the `go.opentelemetry.io/otel/api/baggage` package into `go.opentelemetry.io/otel/baggage`. (#1217) (#1325) -- A `Shutdown` method of `SpanProcessor` and all its implementations receives a context and returns an error. (#1264) - -### Fixed - -- Copies of data from arrays and slices passed to `go.opentelemetry.io/otel/label.ArrayValue()` are now used in the returned `Value` instead of using the mutable data itself. (#1226) - -### Removed - -- The `ExtractHTTP` and `InjectHTTP` functions from the `go.opentelemetry.io/otel/api/propagation` package were removed. (#1212) -- The `Propagators` interface from the `go.opentelemetry.io/otel/api/propagation` package was removed to conform to the OpenTelemetry specification. - The explicit `TextMapPropagator` type can be used in its place as this is the `Propagator` type the specification defines. (#1212) -- The `SetAttribute` method of the `Span` from the `go.opentelemetry.io/otel/api/trace` package was removed given its redundancy with the `SetAttributes` method. (#1216) -- The internal implementation of Baggage storage is removed in favor of using the new Baggage API functionality. (#1217) -- Remove duplicate hostname key `HostHostNameKey` in Resource semantic conventions. (#1219) -- Nested array/slice support has been removed. (#1226) - -## [0.12.0] - 2020-09-24 - -### Added - -- A `SpanConfigure` function in `go.opentelemetry.io/otel/api/trace` to create a new `SpanConfig` from `SpanOption`s. (#1108) -- In the `go.opentelemetry.io/otel/api/trace` package, `NewTracerConfig` was added to construct new `TracerConfig`s. - This addition was made to conform with our project option conventions. (#1155) -- Instrumentation library information was added to the Zipkin exporter. (#1119) -- The `SpanProcessor` interface now has a `ForceFlush()` method. (#1166) -- More semantic conventions for k8s as resource attributes. (#1167) - -### Changed - -- Add reconnecting udp connection type to Jaeger exporter. - This change adds a new optional implementation of the udp conn interface used to detect changes to an agent's host dns record. - It then adopts the new destination address to ensure the exporter doesn't get stuck. This change was ported from jaegertracing/jaeger-client-go#520. (#1063) -- Replace `StartOption` and `EndOption` in `go.opentelemetry.io/otel/api/trace` with `SpanOption`. - This change is matched by replacing the `StartConfig` and `EndConfig` with a unified `SpanConfig`. (#1108) -- Replace the `LinkedTo` span option in `go.opentelemetry.io/otel/api/trace` with `WithLinks`. - This is be more consistent with our other option patterns, i.e. passing the item to be configured directly instead of its component parts, and provides a cleaner function signature. (#1108) -- The `go.opentelemetry.io/otel/api/trace` `TracerOption` was changed to an interface to conform to project option conventions. (#1109) -- Move the `B3` and `TraceContext` from within the `go.opentelemetry.io/otel/api/trace` package to their own `go.opentelemetry.io/otel/propagators` package. - This removal of the propagators is reflective of the OpenTelemetry specification for these propagators as well as cleans up the `go.opentelemetry.io/otel/api/trace` API. (#1118) -- Rename Jaeger tags used for instrumentation library information to reflect changes in OpenTelemetry specification. (#1119) -- Rename `ProbabilitySampler` to `TraceIDRatioBased` and change semantics to ignore parent span sampling status. (#1115) -- Move `tools` package under `internal`. (#1141) -- Move `go.opentelemetry.io/otel/api/correlation` package to `go.opentelemetry.io/otel/api/baggage`. (#1142) - The `correlation.CorrelationContext` propagator has been renamed `baggage.Baggage`. Other exported functions and types are unchanged. -- Rename `ParentOrElse` sampler to `ParentBased` and allow setting samplers depending on parent span. (#1153) -- In the `go.opentelemetry.io/otel/api/trace` package, `SpanConfigure` was renamed to `NewSpanConfig`. (#1155) -- Change `dependabot.yml` to add a `Skip Changelog` label to dependabot-sourced PRs. (#1161) -- The [configuration style guide](https://github.com/open-telemetry/opentelemetry-go/blob/master/CONTRIBUTING.md#config) has been updated to - recommend the use of `newConfig()` instead of `configure()`. (#1163) -- The `otlp.Config` type has been unexported and changed to `otlp.config`, along with its initializer. (#1163) -- Ensure exported interface types include parameter names and update the - Style Guide to reflect this styling rule. (#1172) -- Don't consider unset environment variable for resource detection to be an error. (#1170) -- Rename `go.opentelemetry.io/otel/api/metric.ConfigureInstrument` to `NewInstrumentConfig` and - `go.opentelemetry.io/otel/api/metric.ConfigureMeter` to `NewMeterConfig`. -- ValueObserver instruments use LastValue aggregator by default. (#1165) -- OTLP Metric exporter supports LastValue aggregation. (#1165) -- Move the `go.opentelemetry.io/otel/api/unit` package to `go.opentelemetry.io/otel/unit`. (#1185) -- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190) -- Rename `NoopProvider` to `NoopMeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190) -- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metric/metrictest` package. (#1190) -- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric/registry` package. (#1190) -- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metri/registryc` package. (#1190) -- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190) -- Rename `NoopProvider` to `NoopTracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190) -- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190) -- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190) -- Rename `WrapperProvider` to `WrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190) -- Rename `NewWrapperProvider` to `NewWrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190) -- Rename `Provider` method of the pull controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/pull` package. (#1190) -- Rename `Provider` method of the push controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/push` package. (#1190) -- Rename `ProviderOptions` to `TracerProviderConfig` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) -- Rename `ProviderOption` to `TracerProviderOption` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) -- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) -- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) -- Renamed `SamplingDecision` values to comply with OpenTelemetry specification change. (#1192) -- Renamed Zipkin attribute names from `ot.status_code & ot.status_description` to `otel.status_code & otel.status_description`. (#1201) -- The default SDK now invokes registered `SpanProcessor`s in the order they were registered with the `TracerProvider`. (#1195) -- Add test of spans being processed by the `SpanProcessor`s in the order they were registered. (#1203) - -### Removed - -- Remove the B3 propagator from `go.opentelemetry.io/otel/propagators`. It is now located in the - `go.opentelemetry.io/contrib/propagators/` module. (#1191) -- Remove the semantic convention for HTTP status text, `HTTPStatusTextKey` from package `go.opentelemetry.io/otel/semconv`. (#1194) - -### Fixed - -- Zipkin example no longer mentions `ParentSampler`, corrected to `ParentBased`. (#1171) -- Fix missing shutdown processor in otel-collector example. (#1186) -- Fix missing shutdown processor in basic and namedtracer examples. (#1197) - -## [0.11.0] - 2020-08-24 - -### Added - -- Support for exporting array-valued attributes via OTLP. (#992) -- `Noop` and `InMemory` `SpanBatcher` implementations to help with testing integrations. (#994) -- Support for filtering metric label sets. (#1047) -- A dimensionality-reducing metric Processor. (#1057) -- Integration tests for more OTel Collector Attribute types. (#1062) -- A new `WithSpanProcessor` `ProviderOption` is added to the `go.opentelemetry.io/otel/sdk/trace` package to create a `Provider` and automatically register the `SpanProcessor`. (#1078) - -### Changed - -- Rename `sdk/metric/processor/test` to `sdk/metric/processor/processortest`. (#1049) -- Rename `sdk/metric/controller/test` to `sdk/metric/controller/controllertest`. (#1049) -- Rename `api/testharness` to `api/apitest`. (#1049) -- Rename `api/trace/testtrace` to `api/trace/tracetest`. (#1049) -- Change Metric Processor to merge multiple observations. (#1024) -- The `go.opentelemetry.io/otel/bridge/opentracing` bridge package has been made into its own module. - This removes the package dependencies of this bridge from the rest of the OpenTelemetry based project. (#1038) -- Renamed `go.opentelemetry.io/otel/api/standard` package to `go.opentelemetry.io/otel/semconv` to avoid the ambiguous and generic name `standard` and better describe the package as containing OpenTelemetry semantic conventions. (#1016) -- The environment variable used for resource detection has been changed from `OTEL_RESOURCE_LABELS` to `OTEL_RESOURCE_ATTRIBUTES` (#1042) -- Replace `WithSyncer` with `WithBatcher` in examples. (#1044) -- Replace the `google.golang.org/grpc/codes` dependency in the API with an equivalent `go.opentelemetry.io/otel/codes` package. (#1046) -- Merge the `go.opentelemetry.io/otel/api/label` and `go.opentelemetry.io/otel/api/kv` into the new `go.opentelemetry.io/otel/label` package. (#1060) -- Unify Callback Function Naming. - Rename `*Callback` with `*Func`. (#1061) -- CI builds validate against last two versions of Go, dropping 1.13 and adding 1.15. (#1064) -- The `go.opentelemetry.io/otel/sdk/export/trace` interfaces `SpanSyncer` and `SpanBatcher` have been replaced with a specification compliant `Exporter` interface. - This interface still supports the export of `SpanData`, but only as a slice. - Implementation are also required now to return any error from `ExportSpans` if one occurs as well as implement a `Shutdown` method for exporter clean-up. (#1078) -- The `go.opentelemetry.io/otel/sdk/trace` `NewBatchSpanProcessor` function no longer returns an error. - If a `nil` exporter is passed as an argument to this function, instead of it returning an error, it now returns a `BatchSpanProcessor` that handles the export of `SpanData` by not taking any action. (#1078) -- The `go.opentelemetry.io/otel/sdk/trace` `NewProvider` function to create a `Provider` no longer returns an error, instead only a `*Provider`. - This change is related to `NewBatchSpanProcessor` not returning an error which was the only error this function would return. (#1078) - -### Removed - -- Duplicate, unused API sampler interface. (#999) - Use the [`Sampler` interface](https://github.com/open-telemetry/opentelemetry-go/blob/v0.11.0/sdk/trace/sampling.go) provided by the SDK instead. -- The `grpctrace` instrumentation was moved to the `go.opentelemetry.io/contrib` repository and out of this repository. - This move includes moving the `grpc` example to the `go.opentelemetry.io/contrib` as well. (#1027) -- The `WithSpan` method of the `Tracer` interface. - The functionality this method provided was limited compared to what a user can provide themselves. - It was removed with the understanding that if there is sufficient user need it can be added back based on actual user usage. (#1043) -- The `RegisterSpanProcessor` and `UnregisterSpanProcessor` functions. - These were holdovers from an approach prior to the TracerProvider design. They were not used anymore. (#1077) -- The `oterror` package. (#1026) -- The `othttp` and `httptrace` instrumentations were moved to `go.opentelemetry.io/contrib`. (#1032) - -### Fixed - -- The `semconv.HTTPServerMetricAttributesFromHTTPRequest()` function no longer generates the high-cardinality `http.request.content.length` label. (#1031) -- Correct instrumentation version tag in Jaeger exporter. (#1037) -- The SDK span will now set an error event if the `End` method is called during a panic (i.e. it was deferred). (#1043) -- Move internally generated protobuf code from the `go.opentelemetry.io/otel` to the OTLP exporter to reduce dependency overhead. (#1050) -- The `otel-collector` example referenced outdated collector processors. (#1006) - -## [0.10.0] - 2020-07-29 - -This release migrates the default OpenTelemetry SDK into its own Go module, decoupling the SDK from the API and reducing dependencies for instrumentation packages. - -### Added - -- The Zipkin exporter now has `NewExportPipeline` and `InstallNewPipeline` constructor functions to match the common pattern. - These function build a new exporter with default SDK options and register the exporter with the `global` package respectively. (#944) -- Add propagator option for gRPC instrumentation. (#986) -- The `testtrace` package now tracks the `trace.SpanKind` for each span. (#987) - -### Changed - -- Replace the `RegisterGlobal` `Option` in the Jaeger exporter with an `InstallNewPipeline` constructor function. - This matches the other exporter constructor patterns and will register a new exporter after building it with default configuration. (#944) -- The trace (`go.opentelemetry.io/otel/exporters/trace/stdout`) and metric (`go.opentelemetry.io/otel/exporters/metric/stdout`) `stdout` exporters are now merged into a single exporter at `go.opentelemetry.io/otel/exporters/stdout`. - This new exporter was made into its own Go module to follow the pattern of all exporters and decouple it from the `go.opentelemetry.io/otel` module. (#956, #963) -- Move the `go.opentelemetry.io/otel/exporters/test` test package to `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#962) -- The `go.opentelemetry.io/otel/api/kv/value` package was merged into the parent `go.opentelemetry.io/otel/api/kv` package. (#968) - - `value.Bool` was replaced with `kv.BoolValue`. - - `value.Int64` was replaced with `kv.Int64Value`. - - `value.Uint64` was replaced with `kv.Uint64Value`. - - `value.Float64` was replaced with `kv.Float64Value`. - - `value.Int32` was replaced with `kv.Int32Value`. - - `value.Uint32` was replaced with `kv.Uint32Value`. - - `value.Float32` was replaced with `kv.Float32Value`. - - `value.String` was replaced with `kv.StringValue`. - - `value.Int` was replaced with `kv.IntValue`. - - `value.Uint` was replaced with `kv.UintValue`. - - `value.Array` was replaced with `kv.ArrayValue`. -- Rename `Infer` to `Any` in the `go.opentelemetry.io/otel/api/kv` package. (#972) -- Change `othttp` to use the `httpsnoop` package to wrap the `ResponseWriter` so that optional interfaces (`http.Hijacker`, `http.Flusher`, etc.) that are implemented by the original `ResponseWriter`are also implemented by the wrapped `ResponseWriter`. (#979) -- Rename `go.opentelemetry.io/otel/sdk/metric/aggregator/test` package to `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest`. (#980) -- Make the SDK into its own Go module called `go.opentelemetry.io/otel/sdk`. (#985) -- Changed the default trace `Sampler` from `AlwaysOn` to `ParentOrElse(AlwaysOn)`. (#989) - -### Removed - -- The `IndexedAttribute` function from the `go.opentelemetry.io/otel/api/label` package was removed in favor of `IndexedLabel` which it was synonymous with. (#970) - -### Fixed - -- Bump github.com/golangci/golangci-lint from 1.28.3 to 1.29.0 in /tools. (#953) -- Bump github.com/google/go-cmp from 0.5.0 to 0.5.1. (#957) -- Use `global.Handle` for span export errors in the OTLP exporter. (#946) -- Correct Go language formatting in the README documentation. (#961) -- Remove default SDK dependencies from the `go.opentelemetry.io/otel/api` package. (#977) -- Remove default SDK dependencies from the `go.opentelemetry.io/otel/instrumentation` package. (#983) -- Move documented examples for `go.opentelemetry.io/otel/instrumentation/grpctrace` interceptors into Go example tests. (#984) - -## [0.9.0] - 2020-07-20 - -### Added - -- A new Resource Detector interface is included to allow resources to be automatically detected and included. (#939) -- A Detector to automatically detect resources from an environment variable. (#939) -- Github action to generate protobuf Go bindings locally in `internal/opentelemetry-proto-gen`. (#938) -- OTLP .proto files from `open-telemetry/opentelemetry-proto` imported as a git submodule under `internal/opentelemetry-proto`. - References to `github.com/open-telemetry/opentelemetry-proto` changed to `go.opentelemetry.io/otel/internal/opentelemetry-proto-gen`. (#942) - -### Changed - -- Non-nil value `struct`s for key-value pairs will be marshalled using JSON rather than `Sprintf`. (#948) - -### Removed - -- Removed dependency on `github.com/open-telemetry/opentelemetry-collector`. (#943) - -## [0.8.0] - 2020-07-09 - -### Added - -- The `B3Encoding` type to represent the B3 encoding(s) the B3 propagator can inject. - A value for HTTP supported encodings (Multiple Header: `MultipleHeader`, Single Header: `SingleHeader`) are included. (#882) -- The `FlagsDeferred` trace flag to indicate if the trace sampling decision has been deferred. (#882) -- The `FlagsDebug` trace flag to indicate if the trace is a debug trace. (#882) -- Add `peer.service` semantic attribute. (#898) -- Add database-specific semantic attributes. (#899) -- Add semantic convention for `faas.coldstart` and `container.id`. (#909) -- Add http content size semantic conventions. (#905) -- Include `http.request_content_length` in HTTP request basic attributes. (#905) -- Add semantic conventions for operating system process resource attribute keys. (#919) -- The Jaeger exporter now has a `WithBatchMaxCount` option to specify the maximum number of spans sent in a batch. (#931) - -### Changed - -- Update `CONTRIBUTING.md` to ask for updates to `CHANGELOG.md` with each pull request. (#879) -- Use lowercase header names for B3 Multiple Headers. (#881) -- The B3 propagator `SingleHeader` field has been replaced with `InjectEncoding`. - This new field can be set to combinations of the `B3Encoding` bitmasks and will inject trace information in these encodings. - If no encoding is set, the propagator will default to `MultipleHeader` encoding. (#882) -- The B3 propagator now extracts from either HTTP encoding of B3 (Single Header or Multiple Header) based on what is contained in the header. - Preference is given to Single Header encoding with Multiple Header being the fallback if Single Header is not found or is invalid. - This behavior change is made to dynamically support all correctly encoded traces received instead of having to guess the expected encoding prior to receiving. (#882) -- Extend semantic conventions for RPC. (#900) -- To match constant naming conventions in the `api/standard` package, the `FaaS*` key names are appended with a suffix of `Key`. (#920) - - `"api/standard".FaaSName` -> `FaaSNameKey` - - `"api/standard".FaaSID` -> `FaaSIDKey` - - `"api/standard".FaaSVersion` -> `FaaSVersionKey` - - `"api/standard".FaaSInstance` -> `FaaSInstanceKey` - -### Removed - -- The `FlagsUnused` trace flag is removed. - The purpose of this flag was to act as the inverse of `FlagsSampled`, the inverse of `FlagsSampled` is used instead. (#882) -- The B3 header constants (`B3SingleHeader`, `B3DebugFlagHeader`, `B3TraceIDHeader`, `B3SpanIDHeader`, `B3SampledHeader`, `B3ParentSpanIDHeader`) are removed. - If B3 header keys are needed [the authoritative OpenZipkin package constants](https://pkg.go.dev/github.com/openzipkin/zipkin-go@v0.2.2/propagation/b3?tab=doc#pkg-constants) should be used instead. (#882) - -### Fixed - -- The B3 Single Header name is now correctly `b3` instead of the previous `X-B3`. (#881) -- The B3 propagator now correctly supports sampling only values (`b3: 0`, `b3: 1`, or `b3: d`) for a Single B3 Header. (#882) -- The B3 propagator now propagates the debug flag. - This removes the behavior of changing the debug flag into a set sampling bit. - Instead, this now follow the B3 specification and omits the `X-B3-Sampling` header. (#882) -- The B3 propagator now tracks "unset" sampling state (meaning "defer the decision") and does not set the `X-B3-Sampling` header when injecting. (#882) -- Bump github.com/itchyny/gojq from 0.10.3 to 0.10.4 in /tools. (#883) -- Bump github.com/opentracing/opentracing-go from v1.1.1-0.20190913142402-a7454ce5950e to v1.2.0. (#885) -- The tracing time conversion for OTLP spans is now correctly set to `UnixNano`. (#896) -- Ensure span status is not set to `Unknown` when no HTTP status code is provided as it is assumed to be `200 OK`. (#908) -- Ensure `httptrace.clientTracer` closes `http.headers` span. (#912) -- Prometheus exporter will not apply stale updates or forget inactive metrics. (#903) -- Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905) -- Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913) -- Update otel-colector example to use the v0.5.0 collector. (#915) -- The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922) -- The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922) -- The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists. - This is in accordance with OpenTelemetry semantic conventions. (#922) -- Correlation Context extractor will no longer insert an empty map into the returned context when no valid values are extracted. (#923) -- Bump google.golang.org/api from 0.28.0 to 0.29.0 in /exporters/trace/jaeger. (#925) -- Bump github.com/itchyny/gojq from 0.10.4 to 0.11.0 in /tools. (#926) -- Bump github.com/golangci/golangci-lint from 1.28.1 to 1.28.2 in /tools. (#930) - -## [0.7.0] - 2020-06-26 - -This release implements the v0.5.0 version of the OpenTelemetry specification. - -### Added - -- The othttp instrumentation now includes default metrics. (#861) -- This CHANGELOG file to track all changes in the project going forward. -- Support for array type attributes. (#798) -- Apply transitive dependabot go.mod dependency updates as part of a new automatic Github workflow. (#844) -- Timestamps are now passed to exporters for each export. (#835) -- Add new `Accumulation` type to metric SDK to transport telemetry from `Accumulator`s to `Processor`s. - This replaces the prior `Record` `struct` use for this purpose. (#835) -- New dependabot integration to automate package upgrades. (#814) -- `Meter` and `Tracer` implementations accept instrumentation version version as an optional argument. - This instrumentation version is passed on to exporters. (#811) (#805) (#802) -- The OTLP exporter includes the instrumentation version in telemetry it exports. (#811) -- Environment variables for Jaeger exporter are supported. (#796) -- New `aggregation.Kind` in the export metric API. (#808) -- New example that uses OTLP and the collector. (#790) -- Handle errors in the span `SetName` during span initialization. (#791) -- Default service config to enable retries for retry-able failed requests in the OTLP exporter and an option to override this default. (#777) -- New `go.opentelemetry.io/otel/api/oterror` package to uniformly support error handling and definitions for the project. (#778) -- New `global` default implementation of the `go.opentelemetry.io/otel/api/oterror.Handler` interface to be used to handle errors prior to an user defined `Handler`. - There is also functionality for the user to register their `Handler` as well as a convenience function `Handle` to handle an error with this global `Handler`(#778) -- Options to specify propagators for httptrace and grpctrace instrumentation. (#784) -- The required `application/json` header for the Zipkin exporter is included in all exports. (#774) -- Integrate HTTP semantics helpers from the contrib repository into the `api/standard` package. #769 - -### Changed - -- Rename `Integrator` to `Processor` in the metric SDK. (#863) -- Rename `AggregationSelector` to `AggregatorSelector`. (#859) -- Rename `SynchronizedCopy` to `SynchronizedMove`. (#858) -- Rename `simple` integrator to `basic` integrator. (#857) -- Merge otlp collector examples. (#841) -- Change the metric SDK to support cumulative, delta, and pass-through exporters directly. - With these changes, cumulative and delta specific exporters are able to request the correct kind of aggregation from the SDK. (#840) -- The `Aggregator.Checkpoint` API is renamed to `SynchronizedCopy` and adds an argument, a different `Aggregator` into which the copy is stored. (#812) -- The `export.Aggregator` contract is that `Update()` and `SynchronizedCopy()` are synchronized with each other. - All the aggregation interfaces (`Sum`, `LastValue`, ...) are not meant to be synchronized, as the caller is expected to synchronize aggregators at a higher level after the `Accumulator`. - Some of the `Aggregators` used unnecessary locking and that has been cleaned up. (#812) -- Use of `metric.Number` was replaced by `int64` now that we use `sync.Mutex` in the `MinMaxSumCount` and `Histogram` `Aggregators`. (#812) -- Replace `AlwaysParentSample` with `ParentSample(fallback)` to match the OpenTelemetry v0.5.0 specification. (#810) -- Rename `sdk/export/metric/aggregator` to `sdk/export/metric/aggregation`. #808 -- Send configured headers with every request in the OTLP exporter, instead of just on connection creation. (#806) -- Update error handling for any one off error handlers, replacing, instead, with the `global.Handle` function. (#791) -- Rename `plugin` directory to `instrumentation` to match the OpenTelemetry specification. (#779) -- Makes the argument order to Histogram and DDSketch `New()` consistent. (#781) - -### Removed - -- `Uint64NumberKind` and related functions from the API. (#864) -- Context arguments from `Aggregator.Checkpoint` and `Integrator.Process` as they were unused. (#803) -- `SpanID` is no longer included in parameters for sampling decision to match the OpenTelemetry specification. (#775) - -### Fixed - -- Upgrade OTLP exporter to opentelemetry-proto matching the opentelemetry-collector v0.4.0 release. (#866) -- Allow changes to `go.sum` and `go.mod` when running dependabot tidy-up. (#871) -- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1. (#824) -- Bump github.com/prometheus/client_golang from 1.7.0 to 1.7.1 in /exporters/metric/prometheus. (#867) -- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/jaeger. (#853) -- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/zipkin. (#854) -- Bumps github.com/golang/protobuf from 1.3.2 to 1.4.2 (#848) -- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/otlp (#817) -- Bump github.com/golangci/golangci-lint from 1.25.1 to 1.27.0 in /tools (#828) -- Bump github.com/prometheus/client_golang from 1.5.0 to 1.7.0 in /exporters/metric/prometheus (#838) -- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/jaeger (#829) -- Bump github.com/benbjohnson/clock from 1.0.0 to 1.0.3 (#815) -- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/zipkin (#823) -- Bump github.com/itchyny/gojq from 0.10.1 to 0.10.3 in /tools (#830) -- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/metric/prometheus (#822) -- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/zipkin (#820) -- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/jaeger (#831) -- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 (#836) -- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/trace/jaeger (#837) -- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/otlp (#839) -- Bump google.golang.org/api from 0.20.0 to 0.28.0 in /exporters/trace/jaeger (#843) -- Set span status from HTTP status code in the othttp instrumentation. (#832) -- Fixed typo in push controller comment. (#834) -- The `Aggregator` testing has been updated and cleaned. (#812) -- `metric.Number(0)` expressions are replaced by `0` where possible. (#812) -- Fixed `global` `handler_test.go` test failure. #804 -- Fixed `BatchSpanProcessor.Shutdown` to wait until all spans are processed. (#766) -- Fixed OTLP example's accidental early close of exporter. (#807) -- Ensure zipkin exporter reads and closes response body. (#788) -- Update instrumentation to use `api/standard` keys instead of custom keys. (#782) -- Clean up tools and RELEASING documentation. (#762) - -## [0.6.0] - 2020-05-21 - -### Added - -- Support for `Resource`s in the prometheus exporter. (#757) -- New pull controller. (#751) -- New `UpDownSumObserver` instrument. (#750) -- OpenTelemetry collector demo. (#711) -- New `SumObserver` instrument. (#747) -- New `UpDownCounter` instrument. (#745) -- New timeout `Option` and configuration function `WithTimeout` to the push controller. (#742) -- New `api/standards` package to implement semantic conventions and standard key-value generation. (#731) - -### Changed - -- Rename `Register*` functions in the metric API to `New*` for all `Observer` instruments. (#761) -- Use `[]float64` for histogram boundaries, not `[]metric.Number`. (#758) -- Change OTLP example to use exporter as a trace `Syncer` instead of as an unneeded `Batcher`. (#756) -- Replace `WithResourceAttributes()` with `WithResource()` in the trace SDK. (#754) -- The prometheus exporter now uses the new pull controller. (#751) -- Rename `ScheduleDelayMillis` to `BatchTimeout` in the trace `BatchSpanProcessor`.(#752) -- Support use of synchronous instruments in asynchronous callbacks (#725) -- Move `Resource` from the `Export` method parameter into the metric export `Record`. (#739) -- Rename `Observer` instrument to `ValueObserver`. (#734) -- The push controller now has a method (`Provider()`) to return a `metric.Provider` instead of the old `Meter` method that acted as a `metric.Provider`. (#738) -- Replace `Measure` instrument by `ValueRecorder` instrument. (#732) -- Rename correlation context header from `"Correlation-Context"` to `"otcorrelations"` to match the OpenTelemetry specification. (#727) - -### Fixed - -- Ensure gRPC `ClientStream` override methods do not panic in grpctrace package. (#755) -- Disable parts of `BatchSpanProcessor` test until a fix is found. (#743) -- Fix `string` case in `kv` `Infer` function. (#746) -- Fix panic in grpctrace client interceptors. (#740) -- Refactor the `api/metrics` push controller and add `CheckpointSet` synchronization. (#737) -- Rewrite span batch process queue batching logic. (#719) -- Remove the push controller named Meter map. (#738) -- Fix Histogram aggregator initial state (fix #735). (#736) -- Ensure golang alpine image is running `golang-1.14` for examples. (#733) -- Added test for grpctrace `UnaryInterceptorClient`. (#695) -- Rearrange `api/metric` code layout. (#724) - -## [0.5.0] - 2020-05-13 - -### Added - -- Batch `Observer` callback support. (#717) -- Alias `api` types to root package of project. (#696) -- Create basic `othttp.Transport` for simple client instrumentation. (#678) -- `SetAttribute(string, interface{})` to the trace API. (#674) -- Jaeger exporter option that allows user to specify custom http client. (#671) -- `Stringer` and `Infer` methods to `key`s. (#662) - -### Changed - -- Rename `NewKey` in the `kv` package to just `Key`. (#721) -- Move `core` and `key` to `kv` package. (#720) -- Make the metric API `Meter` a `struct` so the abstract `MeterImpl` can be passed and simplify implementation. (#709) -- Rename SDK `Batcher` to `Integrator` to match draft OpenTelemetry SDK specification. (#710) -- Rename SDK `Ungrouped` integrator to `simple.Integrator` to match draft OpenTelemetry SDK specification. (#710) -- Rename SDK `SDK` `struct` to `Accumulator` to match draft OpenTelemetry SDK specification. (#710) -- Move `Number` from `core` to `api/metric` package. (#706) -- Move `SpanContext` from `core` to `trace` package. (#692) -- Change traceparent header from `Traceparent` to `traceparent` to implement the W3C specification. (#681) - -### Fixed - -- Update tooling to run generators in all submodules. (#705) -- gRPC interceptor regexp to match methods without a service name. (#683) -- Use a `const` for padding 64-bit B3 trace IDs. (#701) -- Update `mockZipkin` listen address from `:0` to `127.0.0.1:0`. (#700) -- Left-pad 64-bit B3 trace IDs with zero. (#698) -- Propagate at least the first W3C tracestate header. (#694) -- Remove internal `StateLocker` implementation. (#688) -- Increase instance size CI system uses. (#690) -- Add a `key` benchmark and use reflection in `key.Infer()`. (#679) -- Fix internal `global` test by using `global.Meter` with `RecordBatch()`. (#680) -- Reimplement histogram using mutex instead of `StateLocker`. (#669) -- Switch `MinMaxSumCount` to a mutex lock implementation instead of `StateLocker`. (#667) -- Update documentation to not include any references to `WithKeys`. (#672) -- Correct misspelling. (#668) -- Fix clobbering of the span context if extraction fails. (#656) -- Bump `golangci-lint` and work around the corrupting bug. (#666) (#670) - -## [0.4.3] - 2020-04-24 - -### Added - -- `Dockerfile` and `docker-compose.yml` to run example code. (#635) -- New `grpctrace` package that provides gRPC client and server interceptors for both unary and stream connections. (#621) -- New `api/label` package, providing common label set implementation. (#651) -- Support for JSON marshaling of `Resources`. (#654) -- `TraceID` and `SpanID` implementations for `Stringer` interface. (#642) -- `RemoteAddrKey` in the othttp plugin to include the HTTP client address in top-level spans. (#627) -- `WithSpanFormatter` option to the othttp plugin. (#617) -- Updated README to include section for compatible libraries and include reference to the contrib repository. (#612) -- The prometheus exporter now supports exporting histograms. (#601) -- A `String` method to the `Resource` to return a hashable identifier for a now unique resource. (#613) -- An `Iter` method to the `Resource` to return an array `AttributeIterator`. (#613) -- An `Equal` method to the `Resource` test the equivalence of resources. (#613) -- An iterable structure (`AttributeIterator`) for `Resource` attributes. - -### Changed - -- zipkin export's `NewExporter` now requires a `serviceName` argument to ensure this needed values is provided. (#644) -- Pass `Resources` through the metrics export pipeline. (#659) - -### Removed - -- `WithKeys` option from the metric API. (#639) - -### Fixed - -- Use the `label.Set.Equivalent` value instead of an encoding in the batcher. (#658) -- Correct typo `trace.Exporter` to `trace.SpanSyncer` in comments. (#653) -- Use type names for return values in jaeger exporter. (#648) -- Increase the visibility of the `api/key` package by updating comments and fixing usages locally. (#650) -- `Checkpoint` only after `Update`; Keep records in the `sync.Map` longer. (#647) -- Do not cache `reflect.ValueOf()` in metric Labels. (#649) -- Batch metrics exported from the OTLP exporter based on `Resource` and labels. (#626) -- Add error wrapping to the prometheus exporter. (#631) -- Update the OTLP exporter batching of traces to use a unique `string` representation of an associated `Resource` as the batching key. (#623) -- Update OTLP `SpanData` transform to only include the `ParentSpanID` if one exists. (#614) -- Update `Resource` internal representation to uniquely and reliably identify resources. (#613) -- Check return value from `CheckpointSet.ForEach` in prometheus exporter. (#622) -- Ensure spans created by httptrace client tracer reflect operation structure. (#618) -- Create a new recorder rather than reuse when multiple observations in same epoch for asynchronous instruments. #610 -- The default port the OTLP exporter uses to connect to the OpenTelemetry collector is updated to match the one the collector listens on by default. (#611) - -## [0.4.2] - 2020-03-31 - -### Fixed - -- Fix `pre_release.sh` to update version in `sdk/opentelemetry.go`. (#607) -- Fix time conversion from internal to OTLP in OTLP exporter. (#606) - -## [0.4.1] - 2020-03-31 - -### Fixed - -- Update `tag.sh` to create signed tags. (#604) - -## [0.4.0] - 2020-03-30 - -### Added - -- New API package `api/metric/registry` that exposes a `MeterImpl` wrapper for use by SDKs to generate unique instruments. (#580) -- Script to verify examples after a new release. (#579) - -### Removed - -- The dogstatsd exporter due to lack of support. - This additionally removes support for statsd. (#591) -- `LabelSet` from the metric API. - This is replaced by a `[]core.KeyValue` slice. (#595) -- `Labels` from the metric API's `Meter` interface. (#595) - -### Changed - -- The metric `export.Labels` became an interface which the SDK implements and the `export` package provides a simple, immutable implementation of this interface intended for testing purposes. (#574) -- Renamed `internal/metric.Meter` to `MeterImpl`. (#580) -- Renamed `api/global/internal.obsImpl` to `asyncImpl`. (#580) - -### Fixed - -- Corrected missing return in mock span. (#582) -- Update License header for all source files to match CNCF guidelines and include a test to ensure it is present. (#586) (#596) -- Update to v0.3.0 of the OTLP in the OTLP exporter. (#588) -- Update pre-release script to be compatible between GNU and BSD based systems. (#592) -- Add a `RecordBatch` benchmark. (#594) -- Moved span transforms of the OTLP exporter to the internal package. (#593) -- Build both go-1.13 and go-1.14 in circleci to test for all supported versions of Go. (#569) -- Removed unneeded allocation on empty labels in OLTP exporter. (#597) -- Update `BatchedSpanProcessor` to process the queue until no data but respect max batch size. (#599) -- Update project documentation godoc.org links to pkg.go.dev. (#602) - -## [0.3.0] - 2020-03-21 - -This is a first official beta release, which provides almost fully complete metrics, tracing, and context propagation functionality. -There is still a possibility of breaking changes. - -### Added - -- Add `Observer` metric instrument. (#474) -- Add global `Propagators` functionality to enable deferred initialization for propagators registered before the first Meter SDK is installed. (#494) -- Simplified export setup pipeline for the jaeger exporter to match other exporters. (#459) -- The zipkin trace exporter. (#495) -- The OTLP exporter to export metric and trace telemetry to the OpenTelemetry collector. (#497) (#544) (#545) -- Add `StatusMessage` field to the trace `Span`. (#524) -- Context propagation in OpenTracing bridge in terms of OpenTelemetry context propagation. (#525) -- The `Resource` type was added to the SDK. (#528) -- The global API now supports a `Tracer` and `Meter` function as shortcuts to getting a global `*Provider` and calling these methods directly. (#538) -- The metric API now defines a generic `MeterImpl` interface to support general purpose `Meter` construction. - Additionally, `SyncImpl` and `AsyncImpl` are added to support general purpose instrument construction. (#560) -- A metric `Kind` is added to represent the `MeasureKind`, `ObserverKind`, and `CounterKind`. (#560) -- Scripts to better automate the release process. (#576) - -### Changed - -- Default to to use `AlwaysSampler` instead of `ProbabilitySampler` to match OpenTelemetry specification. (#506) -- Renamed `AlwaysSampleSampler` to `AlwaysOnSampler` in the trace API. (#511) -- Renamed `NeverSampleSampler` to `AlwaysOffSampler` in the trace API. (#511) -- The `Status` field of the `Span` was changed to `StatusCode` to disambiguate with the added `StatusMessage`. (#524) -- Updated the trace `Sampler` interface conform to the OpenTelemetry specification. (#531) -- Rename metric API `Options` to `Config`. (#541) -- Rename metric `Counter` aggregator to be `Sum`. (#541) -- Unify metric options into `Option` from instrument specific options. (#541) -- The trace API's `TraceProvider` now support `Resource`s. (#545) -- Correct error in zipkin module name. (#548) -- The jaeger trace exporter now supports `Resource`s. (#551) -- Metric SDK now supports `Resource`s. - The `WithResource` option was added to configure a `Resource` on creation and the `Resource` method was added to the metric `Descriptor` to return the associated `Resource`. (#552) -- Replace `ErrNoLastValue` and `ErrEmptyDataSet` by `ErrNoData` in the metric SDK. (#557) -- The stdout trace exporter now supports `Resource`s. (#558) -- The metric `Descriptor` is now included at the API instead of the SDK. (#560) -- Replace `Ordered` with an iterator in `export.Labels`. (#567) - -### Removed - -- The vendor specific Stackdriver. It is now hosted on 3rd party vendor infrastructure. (#452) -- The `Unregister` method for metric observers as it is not in the OpenTelemetry specification. (#560) -- `GetDescriptor` from the metric SDK. (#575) -- The `Gauge` instrument from the metric API. (#537) - -### Fixed - -- Make histogram aggregator checkpoint consistent. (#438) -- Update README with import instructions and how to build and test. (#505) -- The default label encoding was updated to be unique. (#508) -- Use `NewRoot` in the othttp plugin for public endpoints. (#513) -- Fix data race in `BatchedSpanProcessor`. (#518) -- Skip test-386 for Mac OS 10.15.x (Catalina and upwards). #521 -- Use a variable-size array to represent ordered labels in maps. (#523) -- Update the OTLP protobuf and update changed import path. (#532) -- Use `StateLocker` implementation in `MinMaxSumCount`. (#546) -- Eliminate goroutine leak in histogram stress test. (#547) -- Update OTLP exporter with latest protobuf. (#550) -- Add filters to the othttp plugin. (#556) -- Provide an implementation of the `Header*` filters that do not depend on Go 1.14. (#565) -- Encode labels once during checkpoint. - The checkpoint function is executed in a single thread so we can do the encoding lazily before passing the encoded version of labels to the exporter. - This is a cheap and quick way to avoid encoding the labels on every collection interval. (#572) -- Run coverage over all packages in `COVERAGE_MOD_DIR`. (#573) - -## [0.2.3] - 2020-03-04 - -### Added - -- `RecordError` method on `Span`s in the trace API to Simplify adding error events to spans. (#473) -- Configurable push frequency for exporters setup pipeline. (#504) - -### Changed - -- Rename the `exporter` directory to `exporters`. - The `go.opentelemetry.io/otel/exporter/trace/jaeger` package was mistakenly released with a `v1.0.0` tag instead of `v0.1.0`. - This resulted in all subsequent releases not becoming the default latest. - A consequence of this was that all `go get`s pulled in the incompatible `v0.1.0` release of that package when pulling in more recent packages from other otel packages. - Renaming the `exporter` directory to `exporters` fixes this issue by renaming the package and therefore clearing any existing dependency tags. - Consequentially, this action also renames *all* exporter packages. (#502) - -### Removed - -- The `CorrelationContextHeader` constant in the `correlation` package is no longer exported. (#503) - -## [0.2.2] - 2020-02-27 - -### Added - -- `HTTPSupplier` interface in the propagation API to specify methods to retrieve and store a single value for a key to be associated with a carrier. (#467) -- `HTTPExtractor` interface in the propagation API to extract information from an `HTTPSupplier` into a context. (#467) -- `HTTPInjector` interface in the propagation API to inject information into an `HTTPSupplier.` (#467) -- `Config` and configuring `Option` to the propagator API. (#467) -- `Propagators` interface in the propagation API to contain the set of injectors and extractors for all supported carrier formats. (#467) -- `HTTPPropagator` interface in the propagation API to inject and extract from an `HTTPSupplier.` (#467) -- `WithInjectors` and `WithExtractors` functions to the propagator API to configure injectors and extractors to use. (#467) -- `ExtractHTTP` and `InjectHTTP` functions to apply configured HTTP extractors and injectors to a passed context. (#467) -- Histogram aggregator. (#433) -- `DefaultPropagator` function and have it return `trace.TraceContext` as the default context propagator. (#456) -- `AlwaysParentSample` sampler to the trace API. (#455) -- `WithNewRoot` option function to the trace API to specify the created span should be considered a root span. (#451) - -### Changed - -- Renamed `WithMap` to `ContextWithMap` in the correlation package. (#481) -- Renamed `FromContext` to `MapFromContext` in the correlation package. (#481) -- Move correlation context propagation to correlation package. (#479) -- Do not default to putting remote span context into links. (#480) -- `Tracer.WithSpan` updated to accept `StartOptions`. (#472) -- Renamed `MetricKind` to `Kind` to not stutter in the type usage. (#432) -- Renamed the `export` package to `metric` to match directory structure. (#432) -- Rename the `api/distributedcontext` package to `api/correlation`. (#444) -- Rename the `api/propagators` package to `api/propagation`. (#444) -- Move the propagators from the `propagators` package into the `trace` API package. (#444) -- Update `Float64Gauge`, `Int64Gauge`, `Float64Counter`, `Int64Counter`, `Float64Measure`, and `Int64Measure` metric methods to use value receivers instead of pointers. (#462) -- Moved all dependencies of tools package to a tools directory. (#466) - -### Removed - -- Binary propagators. (#467) -- NOOP propagator. (#467) - -### Fixed - -- Upgraded `github.com/golangci/golangci-lint` from `v1.21.0` to `v1.23.6` in `tools/`. (#492) -- Fix a possible nil-dereference crash (#478) -- Correct comments for `InstallNewPipeline` in the stdout exporter. (#483) -- Correct comments for `InstallNewPipeline` in the dogstatsd exporter. (#484) -- Correct comments for `InstallNewPipeline` in the prometheus exporter. (#482) -- Initialize `onError` based on `Config` in prometheus exporter. (#486) -- Correct module name in prometheus exporter README. (#475) -- Removed tracer name prefix from span names. (#430) -- Fix `aggregator_test.go` import package comment. (#431) -- Improved detail in stdout exporter. (#436) -- Fix a dependency issue (generate target should depend on stringer, not lint target) in Makefile. (#442) -- Reorders the Makefile targets within `precommit` target so we generate files and build the code before doing linting, so we can get much nicer errors about syntax errors from the compiler. (#442) -- Reword function documentation in gRPC plugin. (#446) -- Send the `span.kind` tag to Jaeger from the jaeger exporter. (#441) -- Fix `metadataSupplier` in the jaeger exporter to overwrite the header if existing instead of appending to it. (#441) -- Upgraded to Go 1.13 in CI. (#465) -- Correct opentelemetry.io URL in trace SDK documentation. (#464) -- Refactored reference counting logic in SDK determination of stale records. (#468) -- Add call to `runtime.Gosched` in instrument `acquireHandle` logic to not block the collector. (#469) - -## [0.2.1.1] - 2020-01-13 - -### Fixed - -- Use stateful batcher on Prometheus exporter fixing regression introduced in #395. (#428) - -## [0.2.1] - 2020-01-08 - -### Added - -- Global meter forwarding implementation. - This enables deferred initialization for metric instruments registered before the first Meter SDK is installed. (#392) -- Global trace forwarding implementation. - This enables deferred initialization for tracers registered before the first Trace SDK is installed. (#406) -- Standardize export pipeline creation in all exporters. (#395) -- A testing, organization, and comments for 64-bit field alignment. (#418) -- Script to tag all modules in the project. (#414) - -### Changed - -- Renamed `propagation` package to `propagators`. (#362) -- Renamed `B3Propagator` propagator to `B3`. (#362) -- Renamed `TextFormatPropagator` propagator to `TextFormat`. (#362) -- Renamed `BinaryPropagator` propagator to `Binary`. (#362) -- Renamed `BinaryFormatPropagator` propagator to `BinaryFormat`. (#362) -- Renamed `NoopTextFormatPropagator` propagator to `NoopTextFormat`. (#362) -- Renamed `TraceContextPropagator` propagator to `TraceContext`. (#362) -- Renamed `SpanOption` to `StartOption` in the trace API. (#369) -- Renamed `StartOptions` to `StartConfig` in the trace API. (#369) -- Renamed `EndOptions` to `EndConfig` in the trace API. (#369) -- `Number` now has a pointer receiver for its methods. (#375) -- Renamed `CurrentSpan` to `SpanFromContext` in the trace API. (#379) -- Renamed `SetCurrentSpan` to `ContextWithSpan` in the trace API. (#379) -- Renamed `Message` in Event to `Name` in the trace API. (#389) -- Prometheus exporter no longer aggregates metrics, instead it only exports them. (#385) -- Renamed `HandleImpl` to `BoundInstrumentImpl` in the metric API. (#400) -- Renamed `Float64CounterHandle` to `Float64CounterBoundInstrument` in the metric API. (#400) -- Renamed `Int64CounterHandle` to `Int64CounterBoundInstrument` in the metric API. (#400) -- Renamed `Float64GaugeHandle` to `Float64GaugeBoundInstrument` in the metric API. (#400) -- Renamed `Int64GaugeHandle` to `Int64GaugeBoundInstrument` in the metric API. (#400) -- Renamed `Float64MeasureHandle` to `Float64MeasureBoundInstrument` in the metric API. (#400) -- Renamed `Int64MeasureHandle` to `Int64MeasureBoundInstrument` in the metric API. (#400) -- Renamed `Release` method for bound instruments in the metric API to `Unbind`. (#400) -- Renamed `AcquireHandle` method for bound instruments in the metric API to `Bind`. (#400) -- Renamed the `File` option in the stdout exporter to `Writer`. (#404) -- Renamed all `Options` to `Config` for all metric exports where this wasn't already the case. - -### Fixed - -- Aggregator import path corrected. (#421) -- Correct links in README. (#368) -- The README was updated to match latest code changes in its examples. (#374) -- Don't capitalize error statements. (#375) -- Fix ignored errors. (#375) -- Fix ambiguous variable naming. (#375) -- Removed unnecessary type casting. (#375) -- Use named parameters. (#375) -- Updated release schedule. (#378) -- Correct http-stackdriver example module name. (#394) -- Removed the `http.request` span in `httptrace` package. (#397) -- Add comments in the metrics SDK (#399) -- Initialize checkpoint when creating ddsketch aggregator to prevent panic when merging into a empty one. (#402) (#403) -- Add documentation of compatible exporters in the README. (#405) -- Typo fix. (#408) -- Simplify span check logic in SDK tracer implementation. (#419) - -## [0.2.0] - 2019-12-03 - -### Added - -- Unary gRPC tracing example. (#351) -- Prometheus exporter. (#334) -- Dogstatsd metrics exporter. (#326) - -### Changed - -- Rename `MaxSumCount` aggregation to `MinMaxSumCount` and add the `Min` interface for this aggregation. (#352) -- Rename `GetMeter` to `Meter`. (#357) -- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355) -- Rename `HTTPB3Propagator` to `B3Propagator`. (#355) -- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355) -- Move `/global` package to `/api/global`. (#356) -- Rename `GetTracer` to `Tracer`. (#347) - -### Removed - -- `SetAttribute` from the `Span` interface in the trace API. (#361) -- `AddLink` from the `Span` interface in the trace API. (#349) -- `Link` from the `Span` interface in the trace API. (#349) - -### Fixed - -- Exclude example directories from coverage report. (#365) -- Lint make target now implements automatic fixes with `golangci-lint` before a second run to report the remaining issues. (#360) -- Drop `GO111MODULE` environment variable in Makefile as Go 1.13 is the project specified minimum version and this is environment variable is not needed for that version of Go. (#359) -- Run the race checker for all test. (#354) -- Redundant commands in the Makefile are removed. (#354) -- Split the `generate` and `lint` targets of the Makefile. (#354) -- Renames `circle-ci` target to more generic `ci` in Makefile. (#354) -- Add example Prometheus binary to gitignore. (#358) -- Support negative numbers with the `MaxSumCount`. (#335) -- Resolve race conditions in `push_test.go` identified in #339. (#340) -- Use `/usr/bin/env bash` as a shebang in scripts rather than `/bin/bash`. (#336) -- Trace benchmark now tests both `AlwaysSample` and `NeverSample`. - Previously it was testing `AlwaysSample` twice. (#325) -- Trace benchmark now uses a `[]byte` for `TraceID` to fix failing test. (#325) -- Added a trace benchmark to test variadic functions in `setAttribute` vs `setAttributes` (#325) -- The `defaultkeys` batcher was only using the encoded label set as its map key while building a checkpoint. - This allowed distinct label sets through, but any metrics sharing a label set could be overwritten or merged incorrectly. - This was corrected. (#333) - -## [0.1.2] - 2019-11-18 - -### Fixed - -- Optimized the `simplelru` map for attributes to reduce the number of allocations. (#328) -- Removed unnecessary unslicing of parameters that are already a slice. (#324) - -## [0.1.1] - 2019-11-18 - -This release contains a Metrics SDK with stdout exporter and supports basic aggregations such as counter, gauges, array, maxsumcount, and ddsketch. - -### Added - -- Metrics stdout export pipeline. (#265) -- Array aggregation for raw measure metrics. (#282) -- The core.Value now have a `MarshalJSON` method. (#281) - -### Removed - -- `WithService`, `WithResources`, and `WithComponent` methods of tracers. (#314) -- Prefix slash in `Tracer.Start()` for the Jaeger example. (#292) - -### Changed - -- Allocation in LabelSet construction to reduce GC overhead. (#318) -- `trace.WithAttributes` to append values instead of replacing (#315) -- Use a formula for tolerance in sampling tests. (#298) -- Move export types into trace and metric-specific sub-directories. (#289) -- `SpanKind` back to being based on an `int` type. (#288) - -### Fixed - -- URL to OpenTelemetry website in README. (#323) -- Name of othttp default tracer. (#321) -- `ExportSpans` for the stackdriver exporter now handles `nil` context. (#294) -- CI modules cache to correctly restore/save from/to the cache. (#316) -- Fix metric SDK race condition between `LoadOrStore` and the assignment `rec.recorder = i.meter.exporter.AggregatorFor(rec)`. (#293) -- README now reflects the new code structure introduced with these changes. (#291) -- Make the basic example work. (#279) - -## [0.1.0] - 2019-11-04 - -This is the first release of open-telemetry go library. -It contains api and sdk for trace and meter. - -### Added - -- Initial OpenTelemetry trace and metric API prototypes. -- Initial OpenTelemetry trace, metric, and export SDK packages. -- A wireframe bridge to support compatibility with OpenTracing. -- Example code for a basic, http-stackdriver, http, jaeger, and named tracer setup. -- Exporters for Jaeger, Stackdriver, and stdout. -- Propagators for binary, B3, and trace-context protocols. -- Project information and guidelines in the form of a README and CONTRIBUTING. -- Tools to build the project and a Makefile to automate the process. -- Apache-2.0 license. -- CircleCI build CI manifest files. -- CODEOWNERS file to track owners of this project. - -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.16.0...HEAD -[1.16.0/0.39.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0 -[1.16.0-rc.1/0.39.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0-rc.1 -[1.15.1/0.38.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.1 -[1.15.0/0.38.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0 -[1.15.0-rc.2/0.38.0-rc.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.2 -[1.15.0-rc.1/0.38.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.1 -[1.14.0/0.37.0/0.0.4]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.14.0 -[1.13.0/0.36.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.13.0 -[1.12.0/0.35.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.12.0 -[1.11.2/0.34.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.2 -[1.11.1/0.33.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.1 -[1.11.0/0.32.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.0 -[0.32.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.2 -[0.32.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.1 -[0.32.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.0 -[1.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.10.0 -[1.9.0/0.0.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.9.0 -[1.8.0/0.31.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.8.0 -[1.7.0/0.30.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.7.0 -[0.29.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.29.0 -[1.6.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.3 -[1.6.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.2 -[1.6.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.1 -[1.6.0/0.28.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.0 -[1.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.5.0 -[1.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.1 -[1.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.0 -[1.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.3.0 -[1.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.2.0 -[1.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.1.0 -[1.0.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.1 -[Metrics 0.24.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.24.0 -[1.0.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0 -[1.0.0-RC3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC3 -[1.0.0-RC2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC2 -[Experimental Metrics v0.22.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.22.0 -[1.0.0-RC1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC1 -[0.20.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.20.0 -[0.19.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.19.0 -[0.18.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.18.0 -[0.17.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.17.0 -[0.16.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.16.0 -[0.15.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.15.0 -[0.14.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.14.0 -[0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.13.0 -[0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.12.0 -[0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.11.0 -[0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.10.0 -[0.9.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.9.0 -[0.8.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.8.0 -[0.7.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.7.0 -[0.6.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.6.0 -[0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.5.0 -[0.4.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.3 -[0.4.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.2 -[0.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.1 -[0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.0 -[0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.3.0 -[0.2.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.3 -[0.2.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.2 -[0.2.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1.1 -[0.2.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1 -[0.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.0 -[0.1.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.2 -[0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 -[0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 - -[Go 1.20]: https://go.dev/doc/go1.20 -[Go 1.19]: https://go.dev/doc/go1.19 -[Go 1.18]: https://go.dev/doc/go1.18 - -[metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS deleted file mode 100644 index f6f6a313..00000000 --- a/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ /dev/null @@ -1,17 +0,0 @@ -##################################################### -# -# List of approvers for this repository -# -##################################################### -# -# Learn about membership in OpenTelemetry community: -# https://github.com/open-telemetry/community/blob/main/community-membership.md -# -# -# Learn about CODEOWNERS file format: -# https://help.github.com/en/articles/about-code-owners -# - -* @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu - -CODEOWNERS @MrAlias @Aneurysm9 @MadVikingGod diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md deleted file mode 100644 index b2df5de3..00000000 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ /dev/null @@ -1,562 +0,0 @@ -# Contributing to opentelemetry-go - -The Go special interest group (SIG) meets regularly. See the -OpenTelemetry -[community](https://github.com/open-telemetry/community#golang-sdk) -repo for information on this and other language SIGs. - -See the [public meeting -notes](https://docs.google.com/document/d/1E5e7Ld0NuU1iVvf-42tOBpu2VBBLYnh73GJuITGJTTU/edit) -for a summary description of past meetings. To request edit access, -join the meeting or get in touch on -[Slack](https://cloud-native.slack.com/archives/C01NPAXACKT). - -## Development - -You can view and edit the source code by cloning this repository: - -```sh -git clone https://github.com/open-telemetry/opentelemetry-go.git -``` - -Run `make test` to run the tests instead of `go test`. - -There are some generated files checked into the repo. To make sure -that the generated files are up-to-date, run `make` (or `make -precommit` - the `precommit` target is the default). - -The `precommit` target also fixes the formatting of the code and -checks the status of the go module files. - -Additionally, there is a `codespell` target that checks for common -typos in the code. It is not run by default, but you can run it -manually with `make codespell`. It will set up a virtual environment -in `venv` and install `codespell` there. - -If after running `make precommit` the output of `git status` contains -`nothing to commit, working tree clean` then it means that everything -is up-to-date and properly formatted. - -## Pull Requests - -### How to Send Pull Requests - -Everyone is welcome to contribute code to `opentelemetry-go` via -GitHub pull requests (PRs). - -To create a new PR, fork the project in GitHub and clone the upstream -repo: - -```sh -go get -d go.opentelemetry.io/otel -``` - -(This may print some warning about "build constraints exclude all Go -files", just ignore it.) - -This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You -can alternatively use `git` directly with: - -```sh -git clone https://github.com/open-telemetry/opentelemetry-go -``` - -(Note that `git clone` is *not* using the `go.opentelemetry.io/otel` name - -that name is a kind of a redirector to GitHub that `go get` can -understand, but `git` does not.) - -This would put the project in the `opentelemetry-go` directory in -current working directory. - -Enter the newly created directory and add your fork as a new remote: - -```sh -git remote add git@github.com:/opentelemetry-go -``` - -Check out a new branch, make modifications, run linters and tests, update -`CHANGELOG.md`, and push the branch to your fork: - -```sh -git checkout -b -# edit files -# update changelog -make precommit -git add -p -git commit -git push -``` - -Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull -request ID to the entry you added to `CHANGELOG.md`. - -### How to Receive Comments - -* If the PR is not ready for review, please put `[WIP]` in the title, - tag it as `work-in-progress`, or mark it as - [`draft`](https://github.blog/2019-02-14-introducing-draft-pull-requests/). -* Make sure CLA is signed and CI is clear. - -### How to Get PRs Merged - -A PR is considered **ready to merge** when: - -* It has received two qualified approvals[^1]. - - This is not enforced through automation, but needs to be validated by the - maintainer merging. - * The qualified approvals need to be from [Approver]s/[Maintainer]s - affiliated with different companies. Two qualified approvals from - [Approver]s or [Maintainer]s affiliated with the same company counts as a - single qualified approval. - * PRs introducing changes that have already been discussed and consensus - reached only need one qualified approval. The discussion and resolution - needs to be linked to the PR. - * Trivial changes[^2] only need one qualified approval. - -* All feedback has been addressed. - * All PR comments and suggestions are resolved. - * All GitHub Pull Request reviews with a status of "Request changes" have - been addressed. Another review by the objecting reviewer with a different - status can be submitted to clear the original review, or the review can be - dismissed by a [Maintainer] when the issues from the original review have - been addressed. - * Any comments or reviews that cannot be resolved between the PR author and - reviewers can be submitted to the community [Approver]s and [Maintainer]s - during the weekly SIG meeting. If consensus is reached among the - [Approver]s and [Maintainer]s during the SIG meeting the objections to the - PR may be dismissed or resolved or the PR closed by a [Maintainer]. - * Any substantive changes to the PR require existing Approval reviews be - cleared unless the approver explicitly states that their approval persists - across changes. This includes changes resulting from other feedback. - [Approver]s and [Maintainer]s can help in clearing reviews and they should - be consulted if there are any questions. - -* The PR branch is up to date with the base branch it is merging into. - * To ensure this does not block the PR, it should be configured to allow - maintainers to update it. - -* It has been open for review for at least one working day. This gives people - reasonable time to review. - * Trivial changes[^2] do not have to wait for one day and may be merged with - a single [Maintainer]'s approval. - -* All required GitHub workflows have succeeded. -* Urgent fix can take exception as long as it has been actively communicated - among [Maintainer]s. - -Any [Maintainer] can merge the PR once the above criteria have been met. - -[^1]: A qualified approval is a GitHub Pull Request review with "Approve" - status from an OpenTelemetry Go [Approver] or [Maintainer]. -[^2]: Trivial changes include: typo corrections, cosmetic non-substantive - changes, documentation corrections or updates, dependency updates, etc. - -## Design Choices - -As with other OpenTelemetry clients, opentelemetry-go follows the -[OpenTelemetry Specification](https://opentelemetry.io/docs/specs/otel). - -It's especially valuable to read through the [library -guidelines](https://opentelemetry.io/docs/specs/otel/library-guidelines). - -### Focus on Capabilities, Not Structure Compliance - -OpenTelemetry is an evolving specification, one where the desires and -use cases are clear, but the method to satisfy those uses cases are -not. - -As such, Contributions should provide functionality and behavior that -conforms to the specification, but the interface and structure is -flexible. - -It is preferable to have contributions follow the idioms of the -language rather than conform to specific API names or argument -patterns in the spec. - -For a deeper discussion, see -[this](https://github.com/open-telemetry/opentelemetry-specification/issues/165). - -## Documentation - -Each non-example Go Module should have its own `README.md` containing: - -- A pkg.go.dev badge which can be generated [here](https://pkg.go.dev/badge/). -- Brief description. -- Installation instructions (and requirements if applicable). -- Hyperlink to an example. Depending on the component the example can be: - - An `example_test.go` like [here](exporters/stdout/stdouttrace/example_test.go). - - A sample Go application with its own `README.md`, like [here](example/zipkin). -- Additional documentation sections such us: - - Configuration, - - Contributing, - - References. - -[Here](exporters/jaeger/README.md) is an example of a concise `README.md`. - -Moreover, it should be possible to navigate to any `README.md` from the -root `README.md`. - -## Style Guide - -One of the primary goals of this project is that it is actually used by -developers. With this goal in mind the project strives to build -user-friendly and idiomatic Go code adhering to the Go community's best -practices. - -For a non-comprehensive but foundational overview of these best practices -the [Effective Go](https://golang.org/doc/effective_go.html) documentation -is an excellent starting place. - -As a convenience for developers building this project the `make precommit` -will format, lint, validate, and in some cases fix the changes you plan to -submit. This check will need to pass for your changes to be able to be -merged. - -In addition to idiomatic Go, the project has adopted certain standards for -implementations of common patterns. These standards should be followed as a -default, and if they are not followed documentation needs to be included as -to the reasons why. - -### Configuration - -When creating an instantiation function for a complex `type T struct`, it is -useful to allow variable number of options to be applied. However, the strong -type system of Go restricts the function design options. There are a few ways -to solve this problem, but we have landed on the following design. - -#### `config` - -Configuration should be held in a `struct` named `config`, or prefixed with -specific type name this Configuration applies to if there are multiple -`config` in the package. This type must contain configuration options. - -```go -// config contains configuration options for a thing. -type config struct { - // options ... -} -``` - -In general the `config` type will not need to be used externally to the -package and should be unexported. If, however, it is expected that the user -will likely want to build custom options for the configuration, the `config` -should be exported. Please, include in the documentation for the `config` -how the user can extend the configuration. - -It is important that internal `config` are not shared across package boundaries. -Meaning a `config` from one package should not be directly used by another. The -one exception is the API packages. The configs from the base API, eg. -`go.opentelemetry.io/otel/trace.TracerConfig` and -`go.opentelemetry.io/otel/metric.InstrumentConfig`, are intended to be consumed -by the SDK therefore it is expected that these are exported. - -When a config is exported we want to maintain forward and backward -compatibility, to achieve this no fields should be exported but should -instead be accessed by methods. - -Optionally, it is common to include a `newConfig` function (with the same -naming scheme). This function wraps any defaults setting and looping over -all options to create a configured `config`. - -```go -// newConfig returns an appropriately configured config. -func newConfig(options ...Option) config { - // Set default values for config. - config := config{/* […] */} - for _, option := range options { - config = option.apply(config) - } - // Perform any validation here. - return config -} -``` - -If validation of the `config` options is also performed this can return an -error as well that is expected to be handled by the instantiation function -or propagated to the user. - -Given the design goal of not having the user need to work with the `config`, -the `newConfig` function should also be unexported. - -#### `Option` - -To set the value of the options a `config` contains, a corresponding -`Option` interface type should be used. - -```go -type Option interface { - apply(config) config -} -``` - -Having `apply` unexported makes sure that it will not be used externally. -Moreover, the interface becomes sealed so the user cannot easily implement -the interface on its own. - -The `apply` method should return a modified version of the passed config. -This approach, instead of passing a pointer, is used to prevent the config from being allocated to the heap. - -The name of the interface should be prefixed in the same way the -corresponding `config` is (if at all). - -#### Options - -All user configurable options for a `config` must have a related unexported -implementation of the `Option` interface and an exported configuration -function that wraps this implementation. - -The wrapping function name should be prefixed with `With*` (or in the -special case of a boolean options `Without*`) and should have the following -function signature. - -```go -func With*(…) Option { … } -``` - -##### `bool` Options - -```go -type defaultFalseOption bool - -func (o defaultFalseOption) apply(c config) config { - c.Bool = bool(o) - return c -} - -// WithOption sets a T to have an option included. -func WithOption() Option { - return defaultFalseOption(true) -} -``` - -```go -type defaultTrueOption bool - -func (o defaultTrueOption) apply(c config) config { - c.Bool = bool(o) - return c -} - -// WithoutOption sets a T to have Bool option excluded. -func WithoutOption() Option { - return defaultTrueOption(false) -} -``` - -##### Declared Type Options - -```go -type myTypeOption struct { - MyType MyType -} - -func (o myTypeOption) apply(c config) config { - c.MyType = o.MyType - return c -} - -// WithMyType sets T to have include MyType. -func WithMyType(t MyType) Option { - return myTypeOption{t} -} -``` - -##### Functional Options - -```go -type optionFunc func(config) config - -func (fn optionFunc) apply(c config) config { - return fn(c) -} - -// WithMyType sets t as MyType. -func WithMyType(t MyType) Option { - return optionFunc(func(c config) config { - c.MyType = t - return c - }) -} -``` - -#### Instantiation - -Using this configuration pattern to configure instantiation with a `NewT` -function. - -```go -func NewT(options ...Option) T {…} -``` - -Any required parameters can be declared before the variadic `options`. - -#### Dealing with Overlap - -Sometimes there are multiple complex `struct` that share common -configuration and also have distinct configuration. To avoid repeated -portions of `config`s, a common `config` can be used with the union of -options being handled with the `Option` interface. - -For example. - -```go -// config holds options for all animals. -type config struct { - Weight float64 - Color string - MaxAltitude float64 -} - -// DogOption apply Dog specific options. -type DogOption interface { - applyDog(config) config -} - -// BirdOption apply Bird specific options. -type BirdOption interface { - applyBird(config) config -} - -// Option apply options for all animals. -type Option interface { - BirdOption - DogOption -} - -type weightOption float64 - -func (o weightOption) applyDog(c config) config { - c.Weight = float64(o) - return c -} - -func (o weightOption) applyBird(c config) config { - c.Weight = float64(o) - return c -} - -func WithWeight(w float64) Option { return weightOption(w) } - -type furColorOption string - -func (o furColorOption) applyDog(c config) config { - c.Color = string(o) - return c -} - -func WithFurColor(c string) DogOption { return furColorOption(c) } - -type maxAltitudeOption float64 - -func (o maxAltitudeOption) applyBird(c config) config { - c.MaxAltitude = float64(o) - return c -} - -func WithMaxAltitude(a float64) BirdOption { return maxAltitudeOption(a) } - -func NewDog(name string, o ...DogOption) Dog {…} -func NewBird(name string, o ...BirdOption) Bird {…} -``` - -### Interfaces - -To allow other developers to better comprehend the code, it is important -to ensure it is sufficiently documented. One simple measure that contributes -to this aim is self-documenting by naming method parameters. Therefore, -where appropriate, methods of every exported interface type should have -their parameters appropriately named. - -#### Interface Stability - -All exported stable interfaces that include the following warning in their -documentation are allowed to be extended with additional methods. - -> Warning: methods may be added to this interface in minor releases. - -Otherwise, stable interfaces MUST NOT be modified. - -If new functionality is needed for an interface that cannot be changed it MUST -be added by including an additional interface. That added interface can be a -simple interface for the specific functionality that you want to add or it can -be a super-set of the original interface. For example, if you wanted to a -`Close` method to the `Exporter` interface: - -```go -type Exporter interface { - Export() -} -``` - -A new interface, `Closer`, can be added: - -```go -type Closer interface { - Close() -} -``` - -Code that is passed the `Exporter` interface can now check to see if the passed -value also satisfies the new interface. E.g. - -```go -func caller(e Exporter) { - /* ... */ - if c, ok := e.(Closer); ok { - c.Close() - } - /* ... */ -} -``` - -Alternatively, a new type that is the super-set of an `Exporter` can be created. - -```go -type ClosingExporter struct { - Exporter - Close() -} -``` - -This new type can be used similar to the simple interface above in that a -passed `Exporter` type can be asserted to satisfy the `ClosingExporter` type -and the `Close` method called. - -This super-set approach can be useful if there is explicit behavior that needs -to be coupled with the original type and passed as a unified type to a new -function, but, because of this coupling, it also limits the applicability of -the added functionality. If there exist other interfaces where this -functionality should be added, each one will need their own super-set -interfaces and will duplicate the pattern. For this reason, the simple targeted -interface that defines the specific functionality should be preferred. - -## Approvers and Maintainers - -### Approvers - -- [Evan Torrie](https://github.com/evantorrie), Verizon Media -- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics -- [David Ashpole](https://github.com/dashpole), Google -- [Robert Pająk](https://github.com/pellared), Splunk -- [Chester Cheung](https://github.com/hanyuancheung), Tencent -- [Damien Mathieu](https://github.com/dmathieu), Elastic - -### Maintainers - -- [Aaron Clawson](https://github.com/MadVikingGod), LightStep -- [Anthony Mirabella](https://github.com/Aneurysm9), AWS -- [Tyler Yahn](https://github.com/MrAlias), Splunk - -### Emeritus - -- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep -- [Josh MacDonald](https://github.com/jmacd), LightStep - -### Become an Approver or a Maintainer - -See the [community membership document in OpenTelemetry community -repo](https://github.com/open-telemetry/community/blob/main/community-membership.md). - -[Approver]: #approvers -[Maintainer]: #maintainers diff --git a/vendor/go.opentelemetry.io/otel/LICENSE b/vendor/go.opentelemetry.io/otel/LICENSE deleted file mode 100644 index 261eeb9e..00000000 --- a/vendor/go.opentelemetry.io/otel/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile deleted file mode 100644 index 26e4bed2..00000000 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ /dev/null @@ -1,269 +0,0 @@ -# Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -TOOLS_MOD_DIR := ./internal/tools - -ALL_DOCS := $(shell find . -name '*.md' -type f | sort) -ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort) -OTEL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(ALL_GO_MOD_DIRS)) -ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | grep -E -v '^./example|^$(TOOLS_MOD_DIR)' | sort) - -GO = go -TIMEOUT = 60 - -.DEFAULT_GOAL := precommit - -.PHONY: precommit ci -precommit: generate dependabot-generate license-check vanity-import-fix misspell go-mod-tidy golangci-lint-fix test-default -ci: generate dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage - -# Tools - -TOOLS = $(CURDIR)/.tools - -$(TOOLS): - @mkdir -p $@ -$(TOOLS)/%: | $(TOOLS) - cd $(TOOLS_MOD_DIR) && \ - $(GO) build -o $@ $(PACKAGE) - -MULTIMOD = $(TOOLS)/multimod -$(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod - -SEMCONVGEN = $(TOOLS)/semconvgen -$(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen - -CROSSLINK = $(TOOLS)/crosslink -$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink - -SEMCONVKIT = $(TOOLS)/semconvkit -$(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit - -DBOTCONF = $(TOOLS)/dbotconf -$(TOOLS)/dbotconf: PACKAGE=go.opentelemetry.io/build-tools/dbotconf - -GOLANGCI_LINT = $(TOOLS)/golangci-lint -$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint - -MISSPELL = $(TOOLS)/misspell -$(TOOLS)/misspell: PACKAGE=github.com/client9/misspell/cmd/misspell - -GOCOVMERGE = $(TOOLS)/gocovmerge -$(TOOLS)/gocovmerge: PACKAGE=github.com/wadey/gocovmerge - -STRINGER = $(TOOLS)/stringer -$(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer - -PORTO = $(TOOLS)/porto -$(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto - -GOJQ = $(TOOLS)/gojq -$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq - -.PHONY: tools -tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) - -# Virtualized python tools via docker - -# The directory where the virtual environment is created. -VENVDIR := venv - -# The directory where the python tools are installed. -PYTOOLS := $(VENVDIR)/bin - -# The pip executable in the virtual environment. -PIP := $(PYTOOLS)/pip - -# The directory in the docker image where the current directory is mounted. -WORKDIR := /workdir - -# The python image to use for the virtual environment. -PYTHONIMAGE := python:3.11.3-slim-bullseye - -# Run the python image with the current directory mounted. -DOCKERPY := docker run --rm -v "$(CURDIR):$(WORKDIR)" -w $(WORKDIR) $(PYTHONIMAGE) - -# Create a virtual environment for Python tools. -$(PYTOOLS): -# The `--upgrade` flag is needed to ensure that the virtual environment is -# created with the latest pip version. - @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade pip" - -# Install python packages into the virtual environment. -$(PYTOOLS)/%: | $(PYTOOLS) - @$(DOCKERPY) $(PIP) install -r requirements.txt - -CODESPELL = $(PYTOOLS)/codespell -$(CODESPELL): PACKAGE=codespell - -# Generate - -.PHONY: generate - -generate: $(OTEL_GO_MOD_DIRS:%=generate/%) -generate/%: DIR=$* -generate/%: | $(STRINGER) $(PORTO) - @echo "$(GO) generate $(DIR)/..." \ - && cd $(DIR) \ - && PATH="$(TOOLS):$${PATH}" $(GO) generate ./... && $(PORTO) -w . - -# Build - -.PHONY: build - -build: $(OTEL_GO_MOD_DIRS:%=build/%) $(OTEL_GO_MOD_DIRS:%=build-tests/%) -build/%: DIR=$* -build/%: - @echo "$(GO) build $(DIR)/..." \ - && cd $(DIR) \ - && $(GO) build ./... - -build-tests/%: DIR=$* -build-tests/%: - @echo "$(GO) build tests $(DIR)/..." \ - && cd $(DIR) \ - && $(GO) list ./... \ - | grep -v third_party \ - | xargs $(GO) test -vet=off -run xxxxxMatchNothingxxxxx >/dev/null - -# Tests - -TEST_TARGETS := test-default test-bench test-short test-verbose test-race -.PHONY: $(TEST_TARGETS) test -test-default test-race: ARGS=-race -test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=. -test-short: ARGS=-short -test-verbose: ARGS=-v -race -$(TEST_TARGETS): test -test: $(OTEL_GO_MOD_DIRS:%=test/%) -test/%: DIR=$* -test/%: - @echo "$(GO) test -timeout $(TIMEOUT)s $(ARGS) $(DIR)/..." \ - && cd $(DIR) \ - && $(GO) list ./... \ - | grep -v third_party \ - | xargs $(GO) test -timeout $(TIMEOUT)s $(ARGS) - -COVERAGE_MODE = atomic -COVERAGE_PROFILE = coverage.out -.PHONY: test-coverage -test-coverage: | $(GOCOVMERGE) - @set -e; \ - printf "" > coverage.txt; \ - for dir in $(ALL_COVERAGE_MOD_DIRS); do \ - echo "$(GO) test -coverpkg=go.opentelemetry.io/otel/... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" $${dir}/..."; \ - (cd "$${dir}" && \ - $(GO) list ./... \ - | grep -v third_party \ - | grep -v 'semconv/v.*' \ - | xargs $(GO) test -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" && \ - $(GO) tool cover -html=coverage.out -o coverage.html); \ - done; \ - $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt - -.PHONY: golangci-lint golangci-lint-fix -golangci-lint-fix: ARGS=--fix -golangci-lint-fix: golangci-lint -golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%) -golangci-lint/%: DIR=$* -golangci-lint/%: | $(GOLANGCI_LINT) - @echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \ - && cd $(DIR) \ - && $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS) - -.PHONY: crosslink -crosslink: | $(CROSSLINK) - @echo "Updating intra-repository dependencies in all go modules" \ - && $(CROSSLINK) --root=$(shell pwd) --prune - -.PHONY: go-mod-tidy -go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%) -go-mod-tidy/%: DIR=$* -go-mod-tidy/%: | crosslink - @echo "$(GO) mod tidy in $(DIR)" \ - && cd $(DIR) \ - && $(GO) mod tidy -compat=1.19 - -.PHONY: lint-modules -lint-modules: go-mod-tidy - -.PHONY: lint -lint: misspell lint-modules golangci-lint - -.PHONY: vanity-import-check -vanity-import-check: | $(PORTO) - @$(PORTO) --include-internal -l . || echo "(run: make vanity-import-fix)" - -.PHONY: vanity-import-fix -vanity-import-fix: | $(PORTO) - @$(PORTO) --include-internal -w . - -.PHONY: misspell -misspell: | $(MISSPELL) - @$(MISSPELL) -w $(ALL_DOCS) - -.PHONY: codespell -codespell: | $(CODESPELL) - @$(DOCKERPY) $(CODESPELL) - -.PHONY: license-check -license-check: - @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \ - awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=3 { found=1; next } END { if (!found) print FILENAME }' $$f; \ - done); \ - if [ -n "$${licRes}" ]; then \ - echo "license header checking failed:"; echo "$${licRes}"; \ - exit 1; \ - fi - -DEPENDABOT_CONFIG = .github/dependabot.yml -.PHONY: dependabot-check -dependabot-check: | $(DBOTCONF) - @$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || echo "(run: make dependabot-generate)" - -.PHONY: dependabot-generate -dependabot-generate: | $(DBOTCONF) - @$(DBOTCONF) generate > $(DEPENDABOT_CONFIG) - -.PHONY: check-clean-work-tree -check-clean-work-tree: - @if ! git diff --quiet; then \ - echo; \ - echo 'Working tree is not clean, did you forget to run "make precommit"?'; \ - echo; \ - git status; \ - exit 1; \ - fi - -SEMCONVPKG ?= "semconv/" -.PHONY: semconv-generate -semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT) - [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry specification tag"; exit 1 ) - [ "$(OTEL_SPEC_REPO)" ] || ( echo "OTEL_SPEC_REPO unset: missing path to opentelemetry specification repo"; exit 1 ) - $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" - $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" - $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" - $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" - $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" - -.PHONY: prerelease -prerelease: | $(MULTIMOD) - @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) - $(MULTIMOD) verify && $(MULTIMOD) prerelease -m ${MODSET} - -COMMIT ?= "HEAD" -.PHONY: add-tags -add-tags: | $(MULTIMOD) - @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) - $(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md deleted file mode 100644 index e138a8a0..00000000 --- a/vendor/go.opentelemetry.io/otel/README.md +++ /dev/null @@ -1,109 +0,0 @@ -# OpenTelemetry-Go - -[![CI](https://github.com/open-telemetry/opentelemetry-go/workflows/ci/badge.svg)](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain) -[![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main) -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel) -[![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel) -[![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT) - -OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/). -It provides a set of APIs to directly measure performance and behavior of your software and send this data to observability platforms. - -## Project Status - -| Signal | Status | Project | -| ------- | ---------- | ------- | -| Traces | Stable | N/A | -| Metrics | Beta | N/A | -| Logs | Frozen [1] | N/A | - -- [1]: The Logs signal development is halted for this project while we develop both Traces and Metrics. - No Logs Pull Requests are currently being accepted. - -Progress and status specific to this repository is tracked in our local -[project boards](https://github.com/open-telemetry/opentelemetry-go/projects) -and -[milestones](https://github.com/open-telemetry/opentelemetry-go/milestones). - -Project versioning information and stability guarantees can be found in the -[versioning documentation](./VERSIONING.md). - -### Compatibility - -OpenTelemetry-Go ensures compatibility with the current supported versions of -the [Go language](https://golang.org/doc/devel/release#policy): - -> Each major Go release is supported until there are two newer major releases. -> For example, Go 1.5 was supported until the Go 1.7 release, and Go 1.6 was supported until the Go 1.8 release. - -For versions of Go that are no longer supported upstream, opentelemetry-go will -stop ensuring compatibility with these versions in the following manner: - -- A minor release of opentelemetry-go will be made to add support for the new - supported release of Go. -- The following minor release of opentelemetry-go will remove compatibility - testing for the oldest (now archived upstream) version of Go. This, and - future, releases of opentelemetry-go may include features only supported by - the currently supported versions of Go. - -Currently, this project supports the following environments. - -| OS | Go Version | Architecture | -| ------- | ---------- | ------------ | -| Ubuntu | 1.20 | amd64 | -| Ubuntu | 1.19 | amd64 | -| Ubuntu | 1.20 | 386 | -| Ubuntu | 1.19 | 386 | -| MacOS | 1.20 | amd64 | -| MacOS | 1.19 | amd64 | -| Windows | 1.20 | amd64 | -| Windows | 1.19 | amd64 | -| Windows | 1.20 | 386 | -| Windows | 1.19 | 386 | - -While this project should work for other systems, no compatibility guarantees -are made for those systems currently. - -## Getting Started - -You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/go/getting-started/). - -OpenTelemetry's goal is to provide a single set of APIs to capture distributed -traces and metrics from your application and send them to an observability -platform. This project allows you to do just that for applications written in -Go. There are two steps to this process: instrument your application, and -configure an exporter. - -### Instrumentation - -To start capturing distributed traces and metric events from your application -it first needs to be instrumented. The easiest way to do this is by using an -instrumentation library for your code. Be sure to check out [the officially -supported instrumentation -libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/instrumentation). - -If you need to extend the telemetry an instrumentation library provides or want -to build your own instrumentation for your application directly you will need -to use the -[Go otel](https://pkg.go.dev/go.opentelemetry.io/otel) -package. The included [examples](./example/) are a good way to see some -practical uses of this process. - -### Export - -Now that your application is instrumented to collect telemetry, it needs an -export pipeline to send that telemetry to an observability platform. - -All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters). - -| Exporter | Metrics | Traces | -| :-----------------------------------: | :-----: | :----: | -| [Jaeger](./exporters/jaeger/) | | ✓ | -| [OTLP](./exporters/otlp/) | ✓ | ✓ | -| [Prometheus](./exporters/prometheus/) | ✓ | | -| [stdout](./exporters/stdout/) | ✓ | ✓ | -| [Zipkin](./exporters/zipkin/) | | ✓ | - -## Contributing - -See the [contributing documentation](CONTRIBUTING.md). diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md deleted file mode 100644 index 5e6daf6c..00000000 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ /dev/null @@ -1,126 +0,0 @@ -# Release Process - -## Semantic Convention Generation - -New versions of the [OpenTelemetry Specification] mean new versions of the `semconv` package need to be generated. -The `semconv-generate` make target is used for this. - -1. Checkout a local copy of the [OpenTelemetry Specification] to the desired release tag. -2. Pull the latest `otel/semconvgen` image: `docker pull otel/semconvgen:latest` -3. Run the `make semconv-generate ...` target from this repository. - -For example, - -```sh -export TAG="v1.13.0" # Change to the release version you are generating. -export OTEL_SPEC_REPO="/absolute/path/to/opentelemetry-specification" -docker pull otel/semconvgen:latest -make semconv-generate # Uses the exported TAG and OTEL_SPEC_REPO. -``` - -This should create a new sub-package of [`semconv`](./semconv). -Ensure things look correct before submitting a pull request to include the addition. - -**Note**, the generation code was changed to generate versions >= 1.13. -To generate versions prior to this, checkout the old release of this repository (i.e. [2fe8861](https://github.com/open-telemetry/opentelemetry-go/commit/2fe8861a24e20088c065b116089862caf9e3cd8b)). - -## Pre-Release - -First, decide which module sets will be released and update their versions -in `versions.yaml`. Commit this change to a new branch. - -Update go.mod for submodules to depend on the new release which will happen in the next step. - -1. Run the `prerelease` make target. It creates a branch - `prerelease__` that will contain all release changes. - - ``` - make prerelease MODSET= - ``` - -2. Verify the changes. - - ``` - git diff ...prerelease__ - ``` - - This should have changed the version for all modules to be ``. - If these changes look correct, merge them into your pre-release branch: - - ```go - git merge prerelease__ - ``` - -3. Update the [Changelog](./CHANGELOG.md). - - Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand. - To verify this, you can look directly at the commits since the ``. - - ``` - git --no-pager log --pretty=oneline "..HEAD" - ``` - - - Move all the `Unreleased` changes into a new section following the title scheme (`[] - `). - - Update all the appropriate links at the bottom. - -4. Push the changes to upstream and create a Pull Request on GitHub. - Be sure to include the curated changes from the [Changelog](./CHANGELOG.md) in the description. - -## Tag - -Once the Pull Request with all the version changes has been approved and merged it is time to tag the merged commit. - -***IMPORTANT***: It is critical you use the same tag that you used in the Pre-Release step! -Failure to do so will leave things in a broken state. As long as you do not -change `versions.yaml` between pre-release and this step, things should be fine. - -***IMPORTANT***: [There is currently no way to remove an incorrectly tagged version of a Go module](https://github.com/golang/go/issues/34189). -It is critical you make sure the version you push upstream is correct. -[Failure to do so will lead to minor emergencies and tough to work around](https://github.com/open-telemetry/opentelemetry-go/issues/331). - -1. For each module set that will be released, run the `add-tags` make target - using the `` of the commit on the main branch for the merged Pull Request. - - ``` - make add-tags MODSET= COMMIT= - ``` - - It should only be necessary to provide an explicit `COMMIT` value if the - current `HEAD` of your working directory is not the correct commit. - -2. Push tags to the upstream remote (not your fork: `github.com/open-telemetry/opentelemetry-go.git`). - Make sure you push all sub-modules as well. - - ``` - git push upstream - git push upstream - ... - ``` - -## Release - -Finally create a Release for the new `` on GitHub. -The release body should include all the release notes from the Changelog for this release. - -## Verify Examples - -After releasing verify that examples build outside of the repository. - -``` -./verify_examples.sh -``` - -The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them. -This ensures they build with the published release, not the local copy. - -## Post-Release - -### Contrib Repository - -Once verified be sure to [make a release for the `contrib` repository](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md) that uses this release. - -### Website Documentation - -Update [the documentation](./website_docs) for [the OpenTelemetry website](https://opentelemetry.io/docs/go/). -Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate. - -[OpenTelemetry Specification]: https://github.com/open-telemetry/opentelemetry-specification diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md deleted file mode 100644 index 412f1e36..00000000 --- a/vendor/go.opentelemetry.io/otel/VERSIONING.md +++ /dev/null @@ -1,224 +0,0 @@ -# Versioning - -This document describes the versioning policy for this repository. This policy -is designed so the following goals can be achieved. - -**Users are provided a codebase of value that is stable and secure.** - -## Policy - -* Versioning of this project will be idiomatic of a Go project using [Go - modules](https://github.com/golang/go/wiki/Modules). - * [Semantic import - versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) - will be used. - * Versions will comply with [semver - 2.0](https://semver.org/spec/v2.0.0.html) with the following exceptions. - * New methods may be added to exported API interfaces. All exported - interfaces that fall within this exception will include the following - paragraph in their public documentation. - - > Warning: methods may be added to this interface in minor releases. - - * If a module is version `v2` or higher, the major version of the module - must be included as a `/vN` at the end of the module paths used in - `go.mod` files (e.g., `module go.opentelemetry.io/otel/v2`, `require - go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path - (e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the - paths used in `go get` commands (e.g., `go get - go.opentelemetry.io/otel/v2@v2.0.1`. Note there is both a `/v2` and a - `@v2.0.1` in that example. One way to think about it is that the module - name now includes the `/v2`, so include `/v2` whenever you are using the - module name). - * If a module is version `v0` or `v1`, do not include the major version in - either the module path or the import path. - * Modules will be used to encapsulate signals and components. - * Experimental modules still under active development will be versioned at - `v0` to imply the stability guarantee defined by - [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). - - > Major version zero (0.y.z) is for initial development. Anything MAY - > change at any time. The public API SHOULD NOT be considered stable. - - * Mature modules for which we guarantee a stable public API will be versioned - with a major version greater than `v0`. - * The decision to make a module stable will be made on a case-by-case - basis by the maintainers of this project. - * Experimental modules will start their versioning at `v0.0.0` and will - increment their minor version when backwards incompatible changes are - released and increment their patch version when backwards compatible - changes are released. - * All stable modules that use the same major version number will use the - same entire version number. - * Stable modules may be released with an incremented minor or patch - version even though that module has not been changed, but rather so - that it will remain at the same version as other stable modules that - did undergo change. - * When an experimental module becomes stable a new stable module version - will be released and will include this now stable module. The new - stable module version will be an increment of the minor version number - and will be applied to all existing stable modules as well as the newly - stable module being released. -* Versioning of the associated [contrib - repository](https://github.com/open-telemetry/opentelemetry-go-contrib) of - this project will be idiomatic of a Go project using [Go - modules](https://github.com/golang/go/wiki/Modules). - * [Semantic import - versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) - will be used. - * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html). - * If a module is version `v2` or higher, the - major version of the module must be included as a `/vN` at the end of the - module paths used in `go.mod` files (e.g., `module - go.opentelemetry.io/contrib/instrumentation/host/v2`, `require - go.opentelemetry.io/contrib/instrumentation/host/v2 v2.0.1`) and in the - package import path (e.g., `import - "go.opentelemetry.io/contrib/instrumentation/host/v2"`). This includes - the paths used in `go get` commands (e.g., `go get - go.opentelemetry.io/contrib/instrumentation/host/v2@v2.0.1`. Note there - is both a `/v2` and a `@v2.0.1` in that example. One way to think about - it is that the module name now includes the `/v2`, so include `/v2` - whenever you are using the module name). - * If a module is version `v0` or `v1`, do not include the major version - in either the module path or the import path. - * In addition to public APIs, telemetry produced by stable instrumentation - will remain stable and backwards compatible. This is to avoid breaking - alerts and dashboard. - * Modules will be used to encapsulate instrumentation, detectors, exporters, - propagators, and any other independent sets of related components. - * Experimental modules still under active development will be versioned at - `v0` to imply the stability guarantee defined by - [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). - - > Major version zero (0.y.z) is for initial development. Anything MAY - > change at any time. The public API SHOULD NOT be considered stable. - - * Mature modules for which we guarantee a stable public API and telemetry will - be versioned with a major version greater than `v0`. - * Experimental modules will start their versioning at `v0.0.0` and will - increment their minor version when backwards incompatible changes are - released and increment their patch version when backwards compatible - changes are released. - * Stable contrib modules cannot depend on experimental modules from this - project. - * All stable contrib modules of the same major version with this project - will use the same entire version as this project. - * Stable modules may be released with an incremented minor or patch - version even though that module's code has not been changed. Instead - the only change that will have been included is to have updated that - modules dependency on this project's stable APIs. - * When an experimental module in contrib becomes stable a new stable - module version will be released and will include this now stable - module. The new stable module version will be an increment of the minor - version number and will be applied to all existing stable contrib - modules, this project's modules, and the newly stable module being - released. - * Contrib modules will be kept up to date with this project's releases. - * Due to the dependency contrib modules will implicitly have on this - project's modules the release of stable contrib modules to match the - released version number will be staggered after this project's release. - There is no explicit time guarantee for how long after this projects - release the contrib release will be. Effort should be made to keep them - as close in time as possible. - * No additional stable release in this project can be made until the - contrib repository has a matching stable release. - * No release can be made in the contrib repository after this project's - stable release except for a stable release of the contrib repository. -* GitHub releases will be made for all releases. -* Go modules will be made available at Go package mirrors. - -## Example Versioning Lifecycle - -To better understand the implementation of the above policy the following -example is provided. This project is simplified to include only the following -modules and their versions: - -* `otel`: `v0.14.0` -* `otel/trace`: `v0.14.0` -* `otel/metric`: `v0.14.0` -* `otel/baggage`: `v0.14.0` -* `otel/sdk/trace`: `v0.14.0` -* `otel/sdk/metric`: `v0.14.0` - -These modules have been developed to a point where the `otel/trace`, -`otel/baggage`, and `otel/sdk/trace` modules have reached a point that they -should be considered for a stable release. The `otel/metric` and -`otel/sdk/metric` are still under active development and the `otel` module -depends on both `otel/trace` and `otel/metric`. - -The `otel` package is refactored to remove its dependencies on `otel/metric` so -it can be released as stable as well. With that done the following release -candidates are made: - -* `otel`: `v1.0.0-RC1` -* `otel/trace`: `v1.0.0-RC1` -* `otel/baggage`: `v1.0.0-RC1` -* `otel/sdk/trace`: `v1.0.0-RC1` - -The `otel/metric` and `otel/sdk/metric` modules remain at `v0.14.0`. - -A few minor issues are discovered in the `otel/trace` package. These issues are -resolved with some minor, but backwards incompatible, changes and are released -as a second release candidate: - -* `otel`: `v1.0.0-RC2` -* `otel/trace`: `v1.0.0-RC2` -* `otel/baggage`: `v1.0.0-RC2` -* `otel/sdk/trace`: `v1.0.0-RC2` - -Notice that all module version numbers are incremented to adhere to our -versioning policy. - -After these release candidates have been evaluated to satisfaction, they are -released as version `v1.0.0`. - -* `otel`: `v1.0.0` -* `otel/trace`: `v1.0.0` -* `otel/baggage`: `v1.0.0` -* `otel/sdk/trace`: `v1.0.0` - -Since both the `go` utility and the Go module system support [the semantic -versioning definition of -precedence](https://semver.org/spec/v2.0.0.html#spec-item-11), this release -will correctly be interpreted as the successor to the previous release -candidates. - -Active development of this project continues. The `otel/metric` module now has -backwards incompatible changes to its API that need to be released and the -`otel/baggage` module has a minor bug fix that needs to be released. The -following release is made: - -* `otel`: `v1.0.1` -* `otel/trace`: `v1.0.1` -* `otel/metric`: `v0.15.0` -* `otel/baggage`: `v1.0.1` -* `otel/sdk/trace`: `v1.0.1` -* `otel/sdk/metric`: `v0.15.0` - -Notice that, again, all stable module versions are incremented in unison and -the `otel/sdk/metric` package, which depends on the `otel/metric` package, also -bumped its version. This bump of the `otel/sdk/metric` package makes sense -given their coupling, though it is not explicitly required by our versioning -policy. - -As we progress, the `otel/metric` and `otel/sdk/metric` packages have reached a -point where they should be evaluated for stability. The `otel` module is -reintegrated with the `otel/metric` package and the following release is made: - -* `otel`: `v1.1.0-RC1` -* `otel/trace`: `v1.1.0-RC1` -* `otel/metric`: `v1.1.0-RC1` -* `otel/baggage`: `v1.1.0-RC1` -* `otel/sdk/trace`: `v1.1.0-RC1` -* `otel/sdk/metric`: `v1.1.0-RC1` - -All the modules are evaluated and determined to a viable stable release. They -are then released as version `v1.1.0` (the minor version is incremented to -indicate the addition of new signal). - -* `otel`: `v1.1.0` -* `otel/trace`: `v1.1.0` -* `otel/metric`: `v1.1.0` -* `otel/baggage`: `v1.1.0` -* `otel/sdk/trace`: `v1.1.0` -* `otel/sdk/metric`: `v1.1.0` diff --git a/vendor/go.opentelemetry.io/otel/attribute/doc.go b/vendor/go.opentelemetry.io/otel/attribute/doc.go deleted file mode 100644 index dafe7424..00000000 --- a/vendor/go.opentelemetry.io/otel/attribute/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package attribute provides key and value attributes. -package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go deleted file mode 100644 index fe2bc576..00000000 --- a/vendor/go.opentelemetry.io/otel/attribute/encoder.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package attribute // import "go.opentelemetry.io/otel/attribute" - -import ( - "bytes" - "sync" - "sync/atomic" -) - -type ( - // Encoder is a mechanism for serializing an attribute set into a specific - // string representation that supports caching, to avoid repeated - // serialization. An example could be an exporter encoding the attribute - // set into a wire representation. - Encoder interface { - // Encode returns the serialized encoding of the attribute set using - // its Iterator. This result may be cached by a attribute.Set. - Encode(iterator Iterator) string - - // ID returns a value that is unique for each class of attribute - // encoder. Attribute encoders allocate these using `NewEncoderID`. - ID() EncoderID - } - - // EncoderID is used to identify distinct Encoder - // implementations, for caching encoded results. - EncoderID struct { - value uint64 - } - - // defaultAttrEncoder uses a sync.Pool of buffers to reduce the number of - // allocations used in encoding attributes. This implementation encodes a - // comma-separated list of key=value, with '/'-escaping of '=', ',', and - // '\'. - defaultAttrEncoder struct { - // pool is a pool of attribute set builders. The buffers in this pool - // grow to a size that most attribute encodings will not allocate new - // memory. - pool sync.Pool // *bytes.Buffer - } -) - -// escapeChar is used to ensure uniqueness of the attribute encoding where -// keys or values contain either '=' or ','. Since there is no parser needed -// for this encoding and its only requirement is to be unique, this choice is -// arbitrary. Users will see these in some exporters (e.g., stdout), so the -// backslash ('\') is used as a conventional choice. -const escapeChar = '\\' - -var ( - _ Encoder = &defaultAttrEncoder{} - - // encoderIDCounter is for generating IDs for other attribute encoders. - encoderIDCounter uint64 - - defaultEncoderOnce sync.Once - defaultEncoderID = NewEncoderID() - defaultEncoderInstance *defaultAttrEncoder -) - -// NewEncoderID returns a unique attribute encoder ID. It should be called -// once per each type of attribute encoder. Preferably in init() or in var -// definition. -func NewEncoderID() EncoderID { - return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)} -} - -// DefaultEncoder returns an attribute encoder that encodes attributes in such -// a way that each escaped attribute's key is followed by an equal sign and -// then by an escaped attribute's value. All key-value pairs are separated by -// a comma. -// -// Escaping is done by prepending a backslash before either a backslash, equal -// sign or a comma. -func DefaultEncoder() Encoder { - defaultEncoderOnce.Do(func() { - defaultEncoderInstance = &defaultAttrEncoder{ - pool: sync.Pool{ - New: func() interface{} { - return &bytes.Buffer{} - }, - }, - } - }) - return defaultEncoderInstance -} - -// Encode is a part of an implementation of the AttributeEncoder interface. -func (d *defaultAttrEncoder) Encode(iter Iterator) string { - buf := d.pool.Get().(*bytes.Buffer) - defer d.pool.Put(buf) - buf.Reset() - - for iter.Next() { - i, keyValue := iter.IndexedAttribute() - if i > 0 { - _, _ = buf.WriteRune(',') - } - copyAndEscape(buf, string(keyValue.Key)) - - _, _ = buf.WriteRune('=') - - if keyValue.Value.Type() == STRING { - copyAndEscape(buf, keyValue.Value.AsString()) - } else { - _, _ = buf.WriteString(keyValue.Value.Emit()) - } - } - return buf.String() -} - -// ID is a part of an implementation of the AttributeEncoder interface. -func (*defaultAttrEncoder) ID() EncoderID { - return defaultEncoderID -} - -// copyAndEscape escapes `=`, `,` and its own escape character (`\`), -// making the default encoding unique. -func copyAndEscape(buf *bytes.Buffer, val string) { - for _, ch := range val { - switch ch { - case '=', ',', escapeChar: - _, _ = buf.WriteRune(escapeChar) - } - _, _ = buf.WriteRune(ch) - } -} - -// Valid returns true if this encoder ID was allocated by -// `NewEncoderID`. Invalid encoder IDs will not be cached. -func (id EncoderID) Valid() bool { - return id.value != 0 -} diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go deleted file mode 100644 index 841b271f..00000000 --- a/vendor/go.opentelemetry.io/otel/attribute/iterator.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package attribute // import "go.opentelemetry.io/otel/attribute" - -// Iterator allows iterating over the set of attributes in order, sorted by -// key. -type Iterator struct { - storage *Set - idx int -} - -// MergeIterator supports iterating over two sets of attributes while -// eliminating duplicate values from the combined set. The first iterator -// value takes precedence. -type MergeIterator struct { - one oneIterator - two oneIterator - current KeyValue -} - -type oneIterator struct { - iter Iterator - done bool - attr KeyValue -} - -// Next moves the iterator to the next position. Returns false if there are no -// more attributes. -func (i *Iterator) Next() bool { - i.idx++ - return i.idx < i.Len() -} - -// Label returns current KeyValue. Must be called only after Next returns -// true. -// -// Deprecated: Use Attribute instead. -func (i *Iterator) Label() KeyValue { - return i.Attribute() -} - -// Attribute returns the current KeyValue of the Iterator. It must be called -// only after Next returns true. -func (i *Iterator) Attribute() KeyValue { - kv, _ := i.storage.Get(i.idx) - return kv -} - -// IndexedLabel returns current index and attribute. Must be called only -// after Next returns true. -// -// Deprecated: Use IndexedAttribute instead. -func (i *Iterator) IndexedLabel() (int, KeyValue) { - return i.idx, i.Attribute() -} - -// IndexedAttribute returns current index and attribute. Must be called only -// after Next returns true. -func (i *Iterator) IndexedAttribute() (int, KeyValue) { - return i.idx, i.Attribute() -} - -// Len returns a number of attributes in the iterated set. -func (i *Iterator) Len() int { - return i.storage.Len() -} - -// ToSlice is a convenience function that creates a slice of attributes from -// the passed iterator. The iterator is set up to start from the beginning -// before creating the slice. -func (i *Iterator) ToSlice() []KeyValue { - l := i.Len() - if l == 0 { - return nil - } - i.idx = -1 - slice := make([]KeyValue, 0, l) - for i.Next() { - slice = append(slice, i.Attribute()) - } - return slice -} - -// NewMergeIterator returns a MergeIterator for merging two attribute sets. -// Duplicates are resolved by taking the value from the first set. -func NewMergeIterator(s1, s2 *Set) MergeIterator { - mi := MergeIterator{ - one: makeOne(s1.Iter()), - two: makeOne(s2.Iter()), - } - return mi -} - -func makeOne(iter Iterator) oneIterator { - oi := oneIterator{ - iter: iter, - } - oi.advance() - return oi -} - -func (oi *oneIterator) advance() { - if oi.done = !oi.iter.Next(); !oi.done { - oi.attr = oi.iter.Attribute() - } -} - -// Next returns true if there is another attribute available. -func (m *MergeIterator) Next() bool { - if m.one.done && m.two.done { - return false - } - if m.one.done { - m.current = m.two.attr - m.two.advance() - return true - } - if m.two.done { - m.current = m.one.attr - m.one.advance() - return true - } - if m.one.attr.Key == m.two.attr.Key { - m.current = m.one.attr // first iterator attribute value wins - m.one.advance() - m.two.advance() - return true - } - if m.one.attr.Key < m.two.attr.Key { - m.current = m.one.attr - m.one.advance() - return true - } - m.current = m.two.attr - m.two.advance() - return true -} - -// Label returns the current value after Next() returns true. -// -// Deprecated: Use Attribute instead. -func (m *MergeIterator) Label() KeyValue { - return m.current -} - -// Attribute returns the current value after Next() returns true. -func (m *MergeIterator) Attribute() KeyValue { - return m.current -} diff --git a/vendor/go.opentelemetry.io/otel/attribute/key.go b/vendor/go.opentelemetry.io/otel/attribute/key.go deleted file mode 100644 index 0656a04e..00000000 --- a/vendor/go.opentelemetry.io/otel/attribute/key.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package attribute // import "go.opentelemetry.io/otel/attribute" - -// Key represents the key part in key-value pairs. It's a string. The -// allowed character set in the key depends on the use of the key. -type Key string - -// Bool creates a KeyValue instance with a BOOL Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- Bool(name, value). -func (k Key) Bool(v bool) KeyValue { - return KeyValue{ - Key: k, - Value: BoolValue(v), - } -} - -// BoolSlice creates a KeyValue instance with a BOOLSLICE Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- BoolSlice(name, value). -func (k Key) BoolSlice(v []bool) KeyValue { - return KeyValue{ - Key: k, - Value: BoolSliceValue(v), - } -} - -// Int creates a KeyValue instance with an INT64 Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- Int(name, value). -func (k Key) Int(v int) KeyValue { - return KeyValue{ - Key: k, - Value: IntValue(v), - } -} - -// IntSlice creates a KeyValue instance with an INT64SLICE Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- IntSlice(name, value). -func (k Key) IntSlice(v []int) KeyValue { - return KeyValue{ - Key: k, - Value: IntSliceValue(v), - } -} - -// Int64 creates a KeyValue instance with an INT64 Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- Int64(name, value). -func (k Key) Int64(v int64) KeyValue { - return KeyValue{ - Key: k, - Value: Int64Value(v), - } -} - -// Int64Slice creates a KeyValue instance with an INT64SLICE Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- Int64Slice(name, value). -func (k Key) Int64Slice(v []int64) KeyValue { - return KeyValue{ - Key: k, - Value: Int64SliceValue(v), - } -} - -// Float64 creates a KeyValue instance with a FLOAT64 Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- Float64(name, value). -func (k Key) Float64(v float64) KeyValue { - return KeyValue{ - Key: k, - Value: Float64Value(v), - } -} - -// Float64Slice creates a KeyValue instance with a FLOAT64SLICE Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- Float64(name, value). -func (k Key) Float64Slice(v []float64) KeyValue { - return KeyValue{ - Key: k, - Value: Float64SliceValue(v), - } -} - -// String creates a KeyValue instance with a STRING Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- String(name, value). -func (k Key) String(v string) KeyValue { - return KeyValue{ - Key: k, - Value: StringValue(v), - } -} - -// StringSlice creates a KeyValue instance with a STRINGSLICE Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- StringSlice(name, value). -func (k Key) StringSlice(v []string) KeyValue { - return KeyValue{ - Key: k, - Value: StringSliceValue(v), - } -} - -// Defined returns true for non-empty keys. -func (k Key) Defined() bool { - return len(k) != 0 -} diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go deleted file mode 100644 index 1ddf3ce0..00000000 --- a/vendor/go.opentelemetry.io/otel/attribute/kv.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package attribute // import "go.opentelemetry.io/otel/attribute" - -import ( - "fmt" -) - -// KeyValue holds a key and value pair. -type KeyValue struct { - Key Key - Value Value -} - -// Valid returns if kv is a valid OpenTelemetry attribute. -func (kv KeyValue) Valid() bool { - return kv.Key.Defined() && kv.Value.Type() != INVALID -} - -// Bool creates a KeyValue with a BOOL Value type. -func Bool(k string, v bool) KeyValue { - return Key(k).Bool(v) -} - -// BoolSlice creates a KeyValue with a BOOLSLICE Value type. -func BoolSlice(k string, v []bool) KeyValue { - return Key(k).BoolSlice(v) -} - -// Int creates a KeyValue with an INT64 Value type. -func Int(k string, v int) KeyValue { - return Key(k).Int(v) -} - -// IntSlice creates a KeyValue with an INT64SLICE Value type. -func IntSlice(k string, v []int) KeyValue { - return Key(k).IntSlice(v) -} - -// Int64 creates a KeyValue with an INT64 Value type. -func Int64(k string, v int64) KeyValue { - return Key(k).Int64(v) -} - -// Int64Slice creates a KeyValue with an INT64SLICE Value type. -func Int64Slice(k string, v []int64) KeyValue { - return Key(k).Int64Slice(v) -} - -// Float64 creates a KeyValue with a FLOAT64 Value type. -func Float64(k string, v float64) KeyValue { - return Key(k).Float64(v) -} - -// Float64Slice creates a KeyValue with a FLOAT64SLICE Value type. -func Float64Slice(k string, v []float64) KeyValue { - return Key(k).Float64Slice(v) -} - -// String creates a KeyValue with a STRING Value type. -func String(k, v string) KeyValue { - return Key(k).String(v) -} - -// StringSlice creates a KeyValue with a STRINGSLICE Value type. -func StringSlice(k string, v []string) KeyValue { - return Key(k).StringSlice(v) -} - -// Stringer creates a new key-value pair with a passed name and a string -// value generated by the passed Stringer interface. -func Stringer(k string, v fmt.Stringer) KeyValue { - return Key(k).String(v.String()) -} diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go deleted file mode 100644 index b976367e..00000000 --- a/vendor/go.opentelemetry.io/otel/attribute/set.go +++ /dev/null @@ -1,436 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package attribute // import "go.opentelemetry.io/otel/attribute" - -import ( - "encoding/json" - "reflect" - "sort" - "sync" -) - -type ( - // Set is the representation for a distinct attribute set. It manages an - // immutable set of attributes, with an internal cache for storing - // attribute encodings. - // - // This type supports the Equivalent method of comparison using values of - // type Distinct. - Set struct { - equivalent Distinct - } - - // Distinct wraps a variable-size array of KeyValue, constructed with keys - // in sorted order. This can be used as a map key or for equality checking - // between Sets. - Distinct struct { - iface interface{} - } - - // Filter supports removing certain attributes from attribute sets. When - // the filter returns true, the attribute will be kept in the filtered - // attribute set. When the filter returns false, the attribute is excluded - // from the filtered attribute set, and the attribute instead appears in - // the removed list of excluded attributes. - Filter func(KeyValue) bool - - // Sortable implements sort.Interface, used for sorting KeyValue. This is - // an exported type to support a memory optimization. A pointer to one of - // these is needed for the call to sort.Stable(), which the caller may - // provide in order to avoid an allocation. See NewSetWithSortable(). - Sortable []KeyValue -) - -var ( - // keyValueType is used in computeDistinctReflect. - keyValueType = reflect.TypeOf(KeyValue{}) - - // emptySet is returned for empty attribute sets. - emptySet = &Set{ - equivalent: Distinct{ - iface: [0]KeyValue{}, - }, - } - - // sortables is a pool of Sortables used to create Sets with a user does - // not provide one. - sortables = sync.Pool{ - New: func() interface{} { return new(Sortable) }, - } -) - -// EmptySet returns a reference to a Set with no elements. -// -// This is a convenience provided for optimized calling utility. -func EmptySet() *Set { - return emptySet -} - -// reflectValue abbreviates reflect.ValueOf(d). -func (d Distinct) reflectValue() reflect.Value { - return reflect.ValueOf(d.iface) -} - -// Valid returns true if this value refers to a valid Set. -func (d Distinct) Valid() bool { - return d.iface != nil -} - -// Len returns the number of attributes in this set. -func (l *Set) Len() int { - if l == nil || !l.equivalent.Valid() { - return 0 - } - return l.equivalent.reflectValue().Len() -} - -// Get returns the KeyValue at ordered position idx in this set. -func (l *Set) Get(idx int) (KeyValue, bool) { - if l == nil || !l.equivalent.Valid() { - return KeyValue{}, false - } - value := l.equivalent.reflectValue() - - if idx >= 0 && idx < value.Len() { - // Note: The Go compiler successfully avoids an allocation for - // the interface{} conversion here: - return value.Index(idx).Interface().(KeyValue), true - } - - return KeyValue{}, false -} - -// Value returns the value of a specified key in this set. -func (l *Set) Value(k Key) (Value, bool) { - if l == nil || !l.equivalent.Valid() { - return Value{}, false - } - rValue := l.equivalent.reflectValue() - vlen := rValue.Len() - - idx := sort.Search(vlen, func(idx int) bool { - return rValue.Index(idx).Interface().(KeyValue).Key >= k - }) - if idx >= vlen { - return Value{}, false - } - keyValue := rValue.Index(idx).Interface().(KeyValue) - if k == keyValue.Key { - return keyValue.Value, true - } - return Value{}, false -} - -// HasValue tests whether a key is defined in this set. -func (l *Set) HasValue(k Key) bool { - if l == nil { - return false - } - _, ok := l.Value(k) - return ok -} - -// Iter returns an iterator for visiting the attributes in this set. -func (l *Set) Iter() Iterator { - return Iterator{ - storage: l, - idx: -1, - } -} - -// ToSlice returns the set of attributes belonging to this set, sorted, where -// keys appear no more than once. -func (l *Set) ToSlice() []KeyValue { - iter := l.Iter() - return iter.ToSlice() -} - -// Equivalent returns a value that may be used as a map key. The Distinct type -// guarantees that the result will equal the equivalent. Distinct value of any -// attribute set with the same elements as this, where sets are made unique by -// choosing the last value in the input for any given key. -func (l *Set) Equivalent() Distinct { - if l == nil || !l.equivalent.Valid() { - return emptySet.equivalent - } - return l.equivalent -} - -// Equals returns true if the argument set is equivalent to this set. -func (l *Set) Equals(o *Set) bool { - return l.Equivalent() == o.Equivalent() -} - -// Encoded returns the encoded form of this set, according to encoder. -func (l *Set) Encoded(encoder Encoder) string { - if l == nil || encoder == nil { - return "" - } - - return encoder.Encode(l.Iter()) -} - -func empty() Set { - return Set{ - equivalent: emptySet.equivalent, - } -} - -// NewSet returns a new Set. See the documentation for -// NewSetWithSortableFiltered for more details. -// -// Except for empty sets, this method adds an additional allocation compared -// with calls that include a Sortable. -func NewSet(kvs ...KeyValue) Set { - // Check for empty set. - if len(kvs) == 0 { - return empty() - } - srt := sortables.Get().(*Sortable) - s, _ := NewSetWithSortableFiltered(kvs, srt, nil) - sortables.Put(srt) - return s -} - -// NewSetWithSortable returns a new Set. See the documentation for -// NewSetWithSortableFiltered for more details. -// -// This call includes a Sortable option as a memory optimization. -func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set { - // Check for empty set. - if len(kvs) == 0 { - return empty() - } - s, _ := NewSetWithSortableFiltered(kvs, tmp, nil) - return s -} - -// NewSetWithFiltered returns a new Set. See the documentation for -// NewSetWithSortableFiltered for more details. -// -// This call includes a Filter to include/exclude attribute keys from the -// return value. Excluded keys are returned as a slice of attribute values. -func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { - // Check for empty set. - if len(kvs) == 0 { - return empty(), nil - } - srt := sortables.Get().(*Sortable) - s, filtered := NewSetWithSortableFiltered(kvs, srt, filter) - sortables.Put(srt) - return s, filtered -} - -// NewSetWithSortableFiltered returns a new Set. -// -// Duplicate keys are eliminated by taking the last value. This -// re-orders the input slice so that unique last-values are contiguous -// at the end of the slice. -// -// This ensures the following: -// -// - Last-value-wins semantics -// - Caller sees the reordering, but doesn't lose values -// - Repeated call preserve last-value wins. -// -// Note that methods are defined on Set, although this returns Set. Callers -// can avoid memory allocations by: -// -// - allocating a Sortable for use as a temporary in this method -// - allocating a Set for storing the return value of this constructor. -// -// The result maintains a cache of encoded attributes, by attribute.EncoderID. -// This value should not be copied after its first use. -// -// The second []KeyValue return value is a list of attributes that were -// excluded by the Filter (if non-nil). -func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (Set, []KeyValue) { - // Check for empty set. - if len(kvs) == 0 { - return empty(), nil - } - - *tmp = kvs - - // Stable sort so the following de-duplication can implement - // last-value-wins semantics. - sort.Stable(tmp) - - *tmp = nil - - position := len(kvs) - 1 - offset := position - 1 - - // The requirements stated above require that the stable - // result be placed in the end of the input slice, while - // overwritten values are swapped to the beginning. - // - // De-duplicate with last-value-wins semantics. Preserve - // duplicate values at the beginning of the input slice. - for ; offset >= 0; offset-- { - if kvs[offset].Key == kvs[position].Key { - continue - } - position-- - kvs[offset], kvs[position] = kvs[position], kvs[offset] - } - if filter != nil { - return filterSet(kvs[position:], filter) - } - return Set{ - equivalent: computeDistinct(kvs[position:]), - }, nil -} - -// filterSet reorders kvs so that included keys are contiguous at the end of -// the slice, while excluded keys precede the included keys. -func filterSet(kvs []KeyValue, filter Filter) (Set, []KeyValue) { - var excluded []KeyValue - - // Move attributes that do not match the filter so they're adjacent before - // calling computeDistinct(). - distinctPosition := len(kvs) - - // Swap indistinct keys forward and distinct keys toward the - // end of the slice. - offset := len(kvs) - 1 - for ; offset >= 0; offset-- { - if filter(kvs[offset]) { - distinctPosition-- - kvs[offset], kvs[distinctPosition] = kvs[distinctPosition], kvs[offset] - continue - } - } - excluded = kvs[:distinctPosition] - - return Set{ - equivalent: computeDistinct(kvs[distinctPosition:]), - }, excluded -} - -// Filter returns a filtered copy of this Set. See the documentation for -// NewSetWithSortableFiltered for more details. -func (l *Set) Filter(re Filter) (Set, []KeyValue) { - if re == nil { - return Set{ - equivalent: l.equivalent, - }, nil - } - - // Note: This could be refactored to avoid the temporary slice - // allocation, if it proves to be expensive. - return filterSet(l.ToSlice(), re) -} - -// computeDistinct returns a Distinct using either the fixed- or -// reflect-oriented code path, depending on the size of the input. The input -// slice is assumed to already be sorted and de-duplicated. -func computeDistinct(kvs []KeyValue) Distinct { - iface := computeDistinctFixed(kvs) - if iface == nil { - iface = computeDistinctReflect(kvs) - } - return Distinct{ - iface: iface, - } -} - -// computeDistinctFixed computes a Distinct for small slices. It returns nil -// if the input is too large for this code path. -func computeDistinctFixed(kvs []KeyValue) interface{} { - switch len(kvs) { - case 1: - ptr := new([1]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - case 2: - ptr := new([2]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - case 3: - ptr := new([3]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - case 4: - ptr := new([4]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - case 5: - ptr := new([5]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - case 6: - ptr := new([6]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - case 7: - ptr := new([7]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - case 8: - ptr := new([8]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - case 9: - ptr := new([9]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - case 10: - ptr := new([10]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - default: - return nil - } -} - -// computeDistinctReflect computes a Distinct using reflection, works for any -// size input. -func computeDistinctReflect(kvs []KeyValue) interface{} { - at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem() - for i, keyValue := range kvs { - *(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue - } - return at.Interface() -} - -// MarshalJSON returns the JSON encoding of the Set. -func (l *Set) MarshalJSON() ([]byte, error) { - return json.Marshal(l.equivalent.iface) -} - -// MarshalLog is the marshaling function used by the logging system to represent this exporter. -func (l Set) MarshalLog() interface{} { - kvs := make(map[string]string) - for _, kv := range l.ToSlice() { - kvs[string(kv.Key)] = kv.Value.Emit() - } - return kvs -} - -// Len implements sort.Interface. -func (l *Sortable) Len() int { - return len(*l) -} - -// Swap implements sort.Interface. -func (l *Sortable) Swap(i, j int) { - (*l)[i], (*l)[j] = (*l)[j], (*l)[i] -} - -// Less implements sort.Interface. -func (l *Sortable) Less(i, j int) bool { - return (*l)[i].Key < (*l)[j].Key -} diff --git a/vendor/go.opentelemetry.io/otel/attribute/type_string.go b/vendor/go.opentelemetry.io/otel/attribute/type_string.go deleted file mode 100644 index e584b247..00000000 --- a/vendor/go.opentelemetry.io/otel/attribute/type_string.go +++ /dev/null @@ -1,31 +0,0 @@ -// Code generated by "stringer -type=Type"; DO NOT EDIT. - -package attribute - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[INVALID-0] - _ = x[BOOL-1] - _ = x[INT64-2] - _ = x[FLOAT64-3] - _ = x[STRING-4] - _ = x[BOOLSLICE-5] - _ = x[INT64SLICE-6] - _ = x[FLOAT64SLICE-7] - _ = x[STRINGSLICE-8] -} - -const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICESTRINGSLICE" - -var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71} - -func (i Type) String() string { - if i < 0 || i >= Type(len(_Type_index)-1) { - return "Type(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _Type_name[_Type_index[i]:_Type_index[i+1]] -} diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go deleted file mode 100644 index cb21dd5c..00000000 --- a/vendor/go.opentelemetry.io/otel/attribute/value.go +++ /dev/null @@ -1,270 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package attribute // import "go.opentelemetry.io/otel/attribute" - -import ( - "encoding/json" - "fmt" - "reflect" - "strconv" - - "go.opentelemetry.io/otel/internal" - "go.opentelemetry.io/otel/internal/attribute" -) - -//go:generate stringer -type=Type - -// Type describes the type of the data Value holds. -type Type int // nolint: revive // redefines builtin Type. - -// Value represents the value part in key-value pairs. -type Value struct { - vtype Type - numeric uint64 - stringly string - slice interface{} -} - -const ( - // INVALID is used for a Value with no value set. - INVALID Type = iota - // BOOL is a boolean Type Value. - BOOL - // INT64 is a 64-bit signed integral Type Value. - INT64 - // FLOAT64 is a 64-bit floating point Type Value. - FLOAT64 - // STRING is a string Type Value. - STRING - // BOOLSLICE is a slice of booleans Type Value. - BOOLSLICE - // INT64SLICE is a slice of 64-bit signed integral numbers Type Value. - INT64SLICE - // FLOAT64SLICE is a slice of 64-bit floating point numbers Type Value. - FLOAT64SLICE - // STRINGSLICE is a slice of strings Type Value. - STRINGSLICE -) - -// BoolValue creates a BOOL Value. -func BoolValue(v bool) Value { - return Value{ - vtype: BOOL, - numeric: internal.BoolToRaw(v), - } -} - -// BoolSliceValue creates a BOOLSLICE Value. -func BoolSliceValue(v []bool) Value { - return Value{vtype: BOOLSLICE, slice: attribute.BoolSliceValue(v)} -} - -// IntValue creates an INT64 Value. -func IntValue(v int) Value { - return Int64Value(int64(v)) -} - -// IntSliceValue creates an INTSLICE Value. -func IntSliceValue(v []int) Value { - var int64Val int64 - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(int64Val))) - for i, val := range v { - cp.Elem().Index(i).SetInt(int64(val)) - } - return Value{ - vtype: INT64SLICE, - slice: cp.Elem().Interface(), - } -} - -// Int64Value creates an INT64 Value. -func Int64Value(v int64) Value { - return Value{ - vtype: INT64, - numeric: internal.Int64ToRaw(v), - } -} - -// Int64SliceValue creates an INT64SLICE Value. -func Int64SliceValue(v []int64) Value { - return Value{vtype: INT64SLICE, slice: attribute.Int64SliceValue(v)} -} - -// Float64Value creates a FLOAT64 Value. -func Float64Value(v float64) Value { - return Value{ - vtype: FLOAT64, - numeric: internal.Float64ToRaw(v), - } -} - -// Float64SliceValue creates a FLOAT64SLICE Value. -func Float64SliceValue(v []float64) Value { - return Value{vtype: FLOAT64SLICE, slice: attribute.Float64SliceValue(v)} -} - -// StringValue creates a STRING Value. -func StringValue(v string) Value { - return Value{ - vtype: STRING, - stringly: v, - } -} - -// StringSliceValue creates a STRINGSLICE Value. -func StringSliceValue(v []string) Value { - return Value{vtype: STRINGSLICE, slice: attribute.StringSliceValue(v)} -} - -// Type returns a type of the Value. -func (v Value) Type() Type { - return v.vtype -} - -// AsBool returns the bool value. Make sure that the Value's type is -// BOOL. -func (v Value) AsBool() bool { - return internal.RawToBool(v.numeric) -} - -// AsBoolSlice returns the []bool value. Make sure that the Value's type is -// BOOLSLICE. -func (v Value) AsBoolSlice() []bool { - if v.vtype != BOOLSLICE { - return nil - } - return v.asBoolSlice() -} - -func (v Value) asBoolSlice() []bool { - return attribute.AsBoolSlice(v.slice) -} - -// AsInt64 returns the int64 value. Make sure that the Value's type is -// INT64. -func (v Value) AsInt64() int64 { - return internal.RawToInt64(v.numeric) -} - -// AsInt64Slice returns the []int64 value. Make sure that the Value's type is -// INT64SLICE. -func (v Value) AsInt64Slice() []int64 { - if v.vtype != INT64SLICE { - return nil - } - return v.asInt64Slice() -} - -func (v Value) asInt64Slice() []int64 { - return attribute.AsInt64Slice(v.slice) -} - -// AsFloat64 returns the float64 value. Make sure that the Value's -// type is FLOAT64. -func (v Value) AsFloat64() float64 { - return internal.RawToFloat64(v.numeric) -} - -// AsFloat64Slice returns the []float64 value. Make sure that the Value's type is -// FLOAT64SLICE. -func (v Value) AsFloat64Slice() []float64 { - if v.vtype != FLOAT64SLICE { - return nil - } - return v.asFloat64Slice() -} - -func (v Value) asFloat64Slice() []float64 { - return attribute.AsFloat64Slice(v.slice) -} - -// AsString returns the string value. Make sure that the Value's type -// is STRING. -func (v Value) AsString() string { - return v.stringly -} - -// AsStringSlice returns the []string value. Make sure that the Value's type is -// STRINGSLICE. -func (v Value) AsStringSlice() []string { - if v.vtype != STRINGSLICE { - return nil - } - return v.asStringSlice() -} - -func (v Value) asStringSlice() []string { - return attribute.AsStringSlice(v.slice) -} - -type unknownValueType struct{} - -// AsInterface returns Value's data as interface{}. -func (v Value) AsInterface() interface{} { - switch v.Type() { - case BOOL: - return v.AsBool() - case BOOLSLICE: - return v.asBoolSlice() - case INT64: - return v.AsInt64() - case INT64SLICE: - return v.asInt64Slice() - case FLOAT64: - return v.AsFloat64() - case FLOAT64SLICE: - return v.asFloat64Slice() - case STRING: - return v.stringly - case STRINGSLICE: - return v.asStringSlice() - } - return unknownValueType{} -} - -// Emit returns a string representation of Value's data. -func (v Value) Emit() string { - switch v.Type() { - case BOOLSLICE: - return fmt.Sprint(v.asBoolSlice()) - case BOOL: - return strconv.FormatBool(v.AsBool()) - case INT64SLICE: - return fmt.Sprint(v.asInt64Slice()) - case INT64: - return strconv.FormatInt(v.AsInt64(), 10) - case FLOAT64SLICE: - return fmt.Sprint(v.asFloat64Slice()) - case FLOAT64: - return fmt.Sprint(v.AsFloat64()) - case STRINGSLICE: - return fmt.Sprint(v.asStringSlice()) - case STRING: - return v.stringly - default: - return "unknown" - } -} - -// MarshalJSON returns the JSON encoding of the Value. -func (v Value) MarshalJSON() ([]byte, error) { - var jsonVal struct { - Type string - Value interface{} - } - jsonVal.Type = v.Type().String() - jsonVal.Value = v.AsInterface() - return json.Marshal(jsonVal) -} diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go deleted file mode 100644 index 46e523a8..00000000 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ /dev/null @@ -1,562 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package baggage // import "go.opentelemetry.io/otel/baggage" - -import ( - "errors" - "fmt" - "net/url" - "regexp" - "strings" - - "go.opentelemetry.io/otel/internal/baggage" -) - -const ( - maxMembers = 180 - maxBytesPerMembers = 4096 - maxBytesPerBaggageString = 8192 - - listDelimiter = "," - keyValueDelimiter = "=" - propertyDelimiter = ";" - - keyDef = `([\x21\x23-\x27\x2A\x2B\x2D\x2E\x30-\x39\x41-\x5a\x5e-\x7a\x7c\x7e]+)` - valueDef = `([\x21\x23-\x2b\x2d-\x3a\x3c-\x5B\x5D-\x7e]*)` - keyValueDef = `\s*` + keyDef + `\s*` + keyValueDelimiter + `\s*` + valueDef + `\s*` -) - -var ( - keyRe = regexp.MustCompile(`^` + keyDef + `$`) - valueRe = regexp.MustCompile(`^` + valueDef + `$`) - propertyRe = regexp.MustCompile(`^(?:\s*` + keyDef + `\s*|` + keyValueDef + `)$`) -) - -var ( - errInvalidKey = errors.New("invalid key") - errInvalidValue = errors.New("invalid value") - errInvalidProperty = errors.New("invalid baggage list-member property") - errInvalidMember = errors.New("invalid baggage list-member") - errMemberNumber = errors.New("too many list-members in baggage-string") - errMemberBytes = errors.New("list-member too large") - errBaggageBytes = errors.New("baggage-string too large") -) - -// Property is an additional metadata entry for a baggage list-member. -type Property struct { - key, value string - - // hasValue indicates if a zero-value value means the property does not - // have a value or if it was the zero-value. - hasValue bool - - // hasData indicates whether the created property contains data or not. - // Properties that do not contain data are invalid with no other check - // required. - hasData bool -} - -// NewKeyProperty returns a new Property for key. -// -// If key is invalid, an error will be returned. -func NewKeyProperty(key string) (Property, error) { - if !keyRe.MatchString(key) { - return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) - } - - p := Property{key: key, hasData: true} - return p, nil -} - -// NewKeyValueProperty returns a new Property for key with value. -// -// If key or value are invalid, an error will be returned. -func NewKeyValueProperty(key, value string) (Property, error) { - if !keyRe.MatchString(key) { - return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) - } - if !valueRe.MatchString(value) { - return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) - } - - p := Property{ - key: key, - value: value, - hasValue: true, - hasData: true, - } - return p, nil -} - -func newInvalidProperty() Property { - return Property{} -} - -// parseProperty attempts to decode a Property from the passed string. It -// returns an error if the input is invalid according to the W3C Baggage -// specification. -func parseProperty(property string) (Property, error) { - if property == "" { - return newInvalidProperty(), nil - } - - match := propertyRe.FindStringSubmatch(property) - if len(match) != 4 { - return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property) - } - - p := Property{hasData: true} - if match[1] != "" { - p.key = match[1] - } else { - p.key = match[2] - p.value = match[3] - p.hasValue = true - } - - return p, nil -} - -// validate ensures p conforms to the W3C Baggage specification, returning an -// error otherwise. -func (p Property) validate() error { - errFunc := func(err error) error { - return fmt.Errorf("invalid property: %w", err) - } - - if !p.hasData { - return errFunc(fmt.Errorf("%w: %q", errInvalidProperty, p)) - } - - if !keyRe.MatchString(p.key) { - return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) - } - if p.hasValue && !valueRe.MatchString(p.value) { - return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value)) - } - if !p.hasValue && p.value != "" { - return errFunc(errors.New("inconsistent value")) - } - return nil -} - -// Key returns the Property key. -func (p Property) Key() string { - return p.key -} - -// Value returns the Property value. Additionally, a boolean value is returned -// indicating if the returned value is the empty if the Property has a value -// that is empty or if the value is not set. -func (p Property) Value() (string, bool) { - return p.value, p.hasValue -} - -// String encodes Property into a string compliant with the W3C Baggage -// specification. -func (p Property) String() string { - if p.hasValue { - return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, p.value) - } - return p.key -} - -type properties []Property - -func fromInternalProperties(iProps []baggage.Property) properties { - if len(iProps) == 0 { - return nil - } - - props := make(properties, len(iProps)) - for i, p := range iProps { - props[i] = Property{ - key: p.Key, - value: p.Value, - hasValue: p.HasValue, - } - } - return props -} - -func (p properties) asInternal() []baggage.Property { - if len(p) == 0 { - return nil - } - - iProps := make([]baggage.Property, len(p)) - for i, prop := range p { - iProps[i] = baggage.Property{ - Key: prop.key, - Value: prop.value, - HasValue: prop.hasValue, - } - } - return iProps -} - -func (p properties) Copy() properties { - if len(p) == 0 { - return nil - } - - props := make(properties, len(p)) - copy(props, p) - return props -} - -// validate ensures each Property in p conforms to the W3C Baggage -// specification, returning an error otherwise. -func (p properties) validate() error { - for _, prop := range p { - if err := prop.validate(); err != nil { - return err - } - } - return nil -} - -// String encodes properties into a string compliant with the W3C Baggage -// specification. -func (p properties) String() string { - props := make([]string, len(p)) - for i, prop := range p { - props[i] = prop.String() - } - return strings.Join(props, propertyDelimiter) -} - -// Member is a list-member of a baggage-string as defined by the W3C Baggage -// specification. -type Member struct { - key, value string - properties properties - - // hasData indicates whether the created property contains data or not. - // Properties that do not contain data are invalid with no other check - // required. - hasData bool -} - -// NewMember returns a new Member from the passed arguments. The key will be -// used directly while the value will be url decoded after validation. An error -// is returned if the created Member would be invalid according to the W3C -// Baggage specification. -func NewMember(key, value string, props ...Property) (Member, error) { - m := Member{ - key: key, - value: value, - properties: properties(props).Copy(), - hasData: true, - } - if err := m.validate(); err != nil { - return newInvalidMember(), err - } - decodedValue, err := url.QueryUnescape(value) - if err != nil { - return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) - } - m.value = decodedValue - return m, nil -} - -func newInvalidMember() Member { - return Member{} -} - -// parseMember attempts to decode a Member from the passed string. It returns -// an error if the input is invalid according to the W3C Baggage -// specification. -func parseMember(member string) (Member, error) { - if n := len(member); n > maxBytesPerMembers { - return newInvalidMember(), fmt.Errorf("%w: %d", errMemberBytes, n) - } - - var ( - key, value string - props properties - ) - - keyValue, properties, found := strings.Cut(member, propertyDelimiter) - if found { - // Parse the member properties. - for _, pStr := range strings.Split(properties, propertyDelimiter) { - p, err := parseProperty(pStr) - if err != nil { - return newInvalidMember(), err - } - props = append(props, p) - } - } - // Parse the member key/value pair. - - // Take into account a value can contain equal signs (=). - k, v, found := strings.Cut(keyValue, keyValueDelimiter) - if !found { - return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidMember, member) - } - // "Leading and trailing whitespaces are allowed but MUST be trimmed - // when converting the header into a data structure." - key = strings.TrimSpace(k) - var err error - value, err = url.QueryUnescape(strings.TrimSpace(v)) - if err != nil { - return newInvalidMember(), fmt.Errorf("%w: %q", err, value) - } - if !keyRe.MatchString(key) { - return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) - } - if !valueRe.MatchString(value) { - return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) - } - - return Member{key: key, value: value, properties: props, hasData: true}, nil -} - -// validate ensures m conforms to the W3C Baggage specification. -// A key is just an ASCII string, but a value must be URL encoded UTF-8, -// returning an error otherwise. -func (m Member) validate() error { - if !m.hasData { - return fmt.Errorf("%w: %q", errInvalidMember, m) - } - - if !keyRe.MatchString(m.key) { - return fmt.Errorf("%w: %q", errInvalidKey, m.key) - } - if !valueRe.MatchString(m.value) { - return fmt.Errorf("%w: %q", errInvalidValue, m.value) - } - return m.properties.validate() -} - -// Key returns the Member key. -func (m Member) Key() string { return m.key } - -// Value returns the Member value. -func (m Member) Value() string { return m.value } - -// Properties returns a copy of the Member properties. -func (m Member) Properties() []Property { return m.properties.Copy() } - -// String encodes Member into a string compliant with the W3C Baggage -// specification. -func (m Member) String() string { - // A key is just an ASCII string, but a value is URL encoded UTF-8. - s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, url.QueryEscape(m.value)) - if len(m.properties) > 0 { - s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String()) - } - return s -} - -// Baggage is a list of baggage members representing the baggage-string as -// defined by the W3C Baggage specification. -type Baggage struct { //nolint:golint - list baggage.List -} - -// New returns a new valid Baggage. It returns an error if it results in a -// Baggage exceeding limits set in that specification. -// -// It expects all the provided members to have already been validated. -func New(members ...Member) (Baggage, error) { - if len(members) == 0 { - return Baggage{}, nil - } - - b := make(baggage.List) - for _, m := range members { - if !m.hasData { - return Baggage{}, errInvalidMember - } - - // OpenTelemetry resolves duplicates by last-one-wins. - b[m.key] = baggage.Item{ - Value: m.value, - Properties: m.properties.asInternal(), - } - } - - // Check member numbers after deduplication. - if len(b) > maxMembers { - return Baggage{}, errMemberNumber - } - - bag := Baggage{b} - if n := len(bag.String()); n > maxBytesPerBaggageString { - return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) - } - - return bag, nil -} - -// Parse attempts to decode a baggage-string from the passed string. It -// returns an error if the input is invalid according to the W3C Baggage -// specification. -// -// If there are duplicate list-members contained in baggage, the last one -// defined (reading left-to-right) will be the only one kept. This diverges -// from the W3C Baggage specification which allows duplicate list-members, but -// conforms to the OpenTelemetry Baggage specification. -func Parse(bStr string) (Baggage, error) { - if bStr == "" { - return Baggage{}, nil - } - - if n := len(bStr); n > maxBytesPerBaggageString { - return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) - } - - b := make(baggage.List) - for _, memberStr := range strings.Split(bStr, listDelimiter) { - m, err := parseMember(memberStr) - if err != nil { - return Baggage{}, err - } - // OpenTelemetry resolves duplicates by last-one-wins. - b[m.key] = baggage.Item{ - Value: m.value, - Properties: m.properties.asInternal(), - } - } - - // OpenTelemetry does not allow for duplicate list-members, but the W3C - // specification does. Now that we have deduplicated, ensure the baggage - // does not exceed list-member limits. - if len(b) > maxMembers { - return Baggage{}, errMemberNumber - } - - return Baggage{b}, nil -} - -// Member returns the baggage list-member identified by key. -// -// If there is no list-member matching the passed key the returned Member will -// be a zero-value Member. -// The returned member is not validated, as we assume the validation happened -// when it was added to the Baggage. -func (b Baggage) Member(key string) Member { - v, ok := b.list[key] - if !ok { - // We do not need to worry about distinguishing between the situation - // where a zero-valued Member is included in the Baggage because a - // zero-valued Member is invalid according to the W3C Baggage - // specification (it has an empty key). - return newInvalidMember() - } - - return Member{ - key: key, - value: v.Value, - properties: fromInternalProperties(v.Properties), - hasData: true, - } -} - -// Members returns all the baggage list-members. -// The order of the returned list-members does not have significance. -// -// The returned members are not validated, as we assume the validation happened -// when they were added to the Baggage. -func (b Baggage) Members() []Member { - if len(b.list) == 0 { - return nil - } - - members := make([]Member, 0, len(b.list)) - for k, v := range b.list { - members = append(members, Member{ - key: k, - value: v.Value, - properties: fromInternalProperties(v.Properties), - hasData: true, - }) - } - return members -} - -// SetMember returns a copy the Baggage with the member included. If the -// baggage contains a Member with the same key the existing Member is -// replaced. -// -// If member is invalid according to the W3C Baggage specification, an error -// is returned with the original Baggage. -func (b Baggage) SetMember(member Member) (Baggage, error) { - if !member.hasData { - return b, errInvalidMember - } - - n := len(b.list) - if _, ok := b.list[member.key]; !ok { - n++ - } - list := make(baggage.List, n) - - for k, v := range b.list { - // Do not copy if we are just going to overwrite. - if k == member.key { - continue - } - list[k] = v - } - - list[member.key] = baggage.Item{ - Value: member.value, - Properties: member.properties.asInternal(), - } - - return Baggage{list: list}, nil -} - -// DeleteMember returns a copy of the Baggage with the list-member identified -// by key removed. -func (b Baggage) DeleteMember(key string) Baggage { - n := len(b.list) - if _, ok := b.list[key]; ok { - n-- - } - list := make(baggage.List, n) - - for k, v := range b.list { - if k == key { - continue - } - list[k] = v - } - - return Baggage{list: list} -} - -// Len returns the number of list-members in the Baggage. -func (b Baggage) Len() int { - return len(b.list) -} - -// String encodes Baggage into a string compliant with the W3C Baggage -// specification. The returned string will be invalid if the Baggage contains -// any invalid list-members. -func (b Baggage) String() string { - members := make([]string, 0, len(b.list)) - for k, v := range b.list { - members = append(members, Member{ - key: k, - value: v.Value, - properties: fromInternalProperties(v.Properties), - }.String()) - } - return strings.Join(members, listDelimiter) -} diff --git a/vendor/go.opentelemetry.io/otel/baggage/context.go b/vendor/go.opentelemetry.io/otel/baggage/context.go deleted file mode 100644 index 24b34b75..00000000 --- a/vendor/go.opentelemetry.io/otel/baggage/context.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package baggage // import "go.opentelemetry.io/otel/baggage" - -import ( - "context" - - "go.opentelemetry.io/otel/internal/baggage" -) - -// ContextWithBaggage returns a copy of parent with baggage. -func ContextWithBaggage(parent context.Context, b Baggage) context.Context { - // Delegate so any hooks for the OpenTracing bridge are handled. - return baggage.ContextWithList(parent, b.list) -} - -// ContextWithoutBaggage returns a copy of parent with no baggage. -func ContextWithoutBaggage(parent context.Context) context.Context { - // Delegate so any hooks for the OpenTracing bridge are handled. - return baggage.ContextWithList(parent, nil) -} - -// FromContext returns the baggage contained in ctx. -func FromContext(ctx context.Context) Baggage { - // Delegate so any hooks for the OpenTracing bridge are handled. - return Baggage{list: baggage.ListFromContext(ctx)} -} diff --git a/vendor/go.opentelemetry.io/otel/baggage/doc.go b/vendor/go.opentelemetry.io/otel/baggage/doc.go deleted file mode 100644 index 4545100d..00000000 --- a/vendor/go.opentelemetry.io/otel/baggage/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package baggage provides functionality for storing and retrieving -baggage items in Go context. For propagating the baggage, see the -go.opentelemetry.io/otel/propagation package. -*/ -package baggage // import "go.opentelemetry.io/otel/baggage" diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go deleted file mode 100644 index 587ebae4..00000000 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package codes // import "go.opentelemetry.io/otel/codes" - -import ( - "encoding/json" - "fmt" - "strconv" -) - -const ( - // Unset is the default status code. - Unset Code = 0 - - // Error indicates the operation contains an error. - // - // NOTE: The error code in OTLP is 2. - // The value of this enum is only relevant to the internals - // of the Go SDK. - Error Code = 1 - - // Ok indicates operation has been validated by an Application developers - // or Operator to have completed successfully, or contain no error. - // - // NOTE: The Ok code in OTLP is 1. - // The value of this enum is only relevant to the internals - // of the Go SDK. - Ok Code = 2 - - maxCode = 3 -) - -// Code is an 32-bit representation of a status state. -type Code uint32 - -var codeToStr = map[Code]string{ - Unset: "Unset", - Error: "Error", - Ok: "Ok", -} - -var strToCode = map[string]Code{ - `"Unset"`: Unset, - `"Error"`: Error, - `"Ok"`: Ok, -} - -// String returns the Code as a string. -func (c Code) String() string { - return codeToStr[c] -} - -// UnmarshalJSON unmarshals b into the Code. -// -// This is based on the functionality in the gRPC codes package: -// https://github.com/grpc/grpc-go/blob/bb64fee312b46ebee26be43364a7a966033521b1/codes/codes.go#L218-L244 -func (c *Code) UnmarshalJSON(b []byte) error { - // From json.Unmarshaler: By convention, to approximate the behavior of - // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as - // a no-op. - if string(b) == "null" { - return nil - } - if c == nil { - return fmt.Errorf("nil receiver passed to UnmarshalJSON") - } - - var x interface{} - if err := json.Unmarshal(b, &x); err != nil { - return err - } - switch x.(type) { - case string: - if jc, ok := strToCode[string(b)]; ok { - *c = jc - return nil - } - return fmt.Errorf("invalid code: %q", string(b)) - case float64: - if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { - if ci >= maxCode { - return fmt.Errorf("invalid code: %q", ci) - } - - *c = Code(ci) - return nil - } - return fmt.Errorf("invalid code: %q", string(b)) - default: - return fmt.Errorf("invalid code: %q", string(b)) - } -} - -// MarshalJSON returns c as the JSON encoding of c. -func (c *Code) MarshalJSON() ([]byte, error) { - if c == nil { - return []byte("null"), nil - } - str, ok := codeToStr[*c] - if !ok { - return nil, fmt.Errorf("invalid code: %d", *c) - } - return []byte(fmt.Sprintf("%q", str)), nil -} diff --git a/vendor/go.opentelemetry.io/otel/codes/doc.go b/vendor/go.opentelemetry.io/otel/codes/doc.go deleted file mode 100644 index 4e328fbb..00000000 --- a/vendor/go.opentelemetry.io/otel/codes/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package codes defines the canonical error codes used by OpenTelemetry. - -It conforms to [the OpenTelemetry -specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/trace/api.md#set-status). -*/ -package codes // import "go.opentelemetry.io/otel/codes" diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go deleted file mode 100644 index daa36c89..00000000 --- a/vendor/go.opentelemetry.io/otel/doc.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package otel provides global access to the OpenTelemetry API. The subpackages of -the otel package provide an implementation of the OpenTelemetry API. - -The provided API is used to instrument code and measure data about that code's -performance and operation. The measured data, by default, is not processed or -transmitted anywhere. An implementation of the OpenTelemetry SDK, like the -default SDK implementation (go.opentelemetry.io/otel/sdk), and associated -exporters are used to process and transport this data. - -To read the getting started guide, see https://opentelemetry.io/docs/go/getting-started/. - -To read more about tracing, see go.opentelemetry.io/otel/trace. - -To read more about metrics, see go.opentelemetry.io/otel/metric. - -To read more about propagation, see go.opentelemetry.io/otel/propagation and -go.opentelemetry.io/otel/baggage. -*/ -package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/error_handler.go b/vendor/go.opentelemetry.io/otel/error_handler.go deleted file mode 100644 index 72fad854..00000000 --- a/vendor/go.opentelemetry.io/otel/error_handler.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otel // import "go.opentelemetry.io/otel" - -// ErrorHandler handles irremediable events. -type ErrorHandler interface { - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // Handle handles any error deemed irremediable by an OpenTelemetry - // component. - Handle(error) - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. -} - -// ErrorHandlerFunc is a convenience adapter to allow the use of a function -// as an ErrorHandler. -type ErrorHandlerFunc func(error) - -var _ ErrorHandler = ErrorHandlerFunc(nil) - -// Handle handles the irremediable error by calling the ErrorHandlerFunc itself. -func (f ErrorHandlerFunc) Handle(err error) { - f(err) -} diff --git a/vendor/go.opentelemetry.io/otel/handler.go b/vendor/go.opentelemetry.io/otel/handler.go deleted file mode 100644 index 4115fe3b..00000000 --- a/vendor/go.opentelemetry.io/otel/handler.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otel // import "go.opentelemetry.io/otel" - -import ( - "go.opentelemetry.io/otel/internal/global" -) - -var ( - // Compile-time check global.ErrDelegator implements ErrorHandler. - _ ErrorHandler = (*global.ErrDelegator)(nil) - // Compile-time check global.ErrLogger implements ErrorHandler. - _ ErrorHandler = (*global.ErrLogger)(nil) -) - -// GetErrorHandler returns the global ErrorHandler instance. -// -// The default ErrorHandler instance returned will log all errors to STDERR -// until an override ErrorHandler is set with SetErrorHandler. All -// ErrorHandler returned prior to this will automatically forward errors to -// the set instance instead of logging. -// -// Subsequent calls to SetErrorHandler after the first will not forward errors -// to the new ErrorHandler for prior returned instances. -func GetErrorHandler() ErrorHandler { return global.GetErrorHandler() } - -// SetErrorHandler sets the global ErrorHandler to h. -// -// The first time this is called all ErrorHandler previously returned from -// GetErrorHandler will send errors to h instead of the default logging -// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not -// delegate errors to h. -func SetErrorHandler(h ErrorHandler) { global.SetErrorHandler(h) } - -// Handle is a convenience function for ErrorHandler().Handle(err). -func Handle(err error) { global.Handle(err) } diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go deleted file mode 100644 index 622c3ee3..00000000 --- a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package attribute provide several helper functions for some commonly used -logic of processing attributes. -*/ -package attribute // import "go.opentelemetry.io/otel/internal/attribute" - -import ( - "reflect" -) - -// BoolSliceValue converts a bool slice into an array with same elements as slice. -func BoolSliceValue(v []bool) interface{} { - var zero bool - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]bool), v) - return cp.Elem().Interface() -} - -// Int64SliceValue converts an int64 slice into an array with same elements as slice. -func Int64SliceValue(v []int64) interface{} { - var zero int64 - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]int64), v) - return cp.Elem().Interface() -} - -// Float64SliceValue converts a float64 slice into an array with same elements as slice. -func Float64SliceValue(v []float64) interface{} { - var zero float64 - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]float64), v) - return cp.Elem().Interface() -} - -// StringSliceValue converts a string slice into an array with same elements as slice. -func StringSliceValue(v []string) interface{} { - var zero string - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]string), v) - return cp.Elem().Interface() -} - -// AsBoolSlice converts a bool array into a slice into with same elements as array. -func AsBoolSlice(v interface{}) []bool { - rv := reflect.ValueOf(v) - if rv.Type().Kind() != reflect.Array { - return nil - } - var zero bool - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]bool) -} - -// AsInt64Slice converts an int64 array into a slice into with same elements as array. -func AsInt64Slice(v interface{}) []int64 { - rv := reflect.ValueOf(v) - if rv.Type().Kind() != reflect.Array { - return nil - } - var zero int64 - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]int64) -} - -// AsFloat64Slice converts a float64 array into a slice into with same elements as array. -func AsFloat64Slice(v interface{}) []float64 { - rv := reflect.ValueOf(v) - if rv.Type().Kind() != reflect.Array { - return nil - } - var zero float64 - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]float64) -} - -// AsStringSlice converts a string array into a slice into with same elements as array. -func AsStringSlice(v interface{}) []string { - rv := reflect.ValueOf(v) - if rv.Type().Kind() != reflect.Array { - return nil - } - var zero string - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]string) -} diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go deleted file mode 100644 index b96e5408..00000000 --- a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package baggage provides base types and functionality to store and retrieve -baggage in Go context. This package exists because the OpenTracing bridge to -OpenTelemetry needs to synchronize state whenever baggage for a context is -modified and that context contains an OpenTracing span. If it were not for -this need this package would not need to exist and the -`go.opentelemetry.io/otel/baggage` package would be the singular place where -W3C baggage is handled. -*/ -package baggage // import "go.opentelemetry.io/otel/internal/baggage" - -// List is the collection of baggage members. The W3C allows for duplicates, -// but OpenTelemetry does not, therefore, this is represented as a map. -type List map[string]Item - -// Item is the value and metadata properties part of a list-member. -type Item struct { - Value string - Properties []Property -} - -// Property is a metadata entry for a list-member. -type Property struct { - Key, Value string - - // HasValue indicates if a zero-value value means the property does not - // have a value or if it was the zero-value. - HasValue bool -} diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go deleted file mode 100644 index 4469700d..00000000 --- a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package baggage // import "go.opentelemetry.io/otel/internal/baggage" - -import "context" - -type baggageContextKeyType int - -const baggageKey baggageContextKeyType = iota - -// SetHookFunc is a callback called when storing baggage in the context. -type SetHookFunc func(context.Context, List) context.Context - -// GetHookFunc is a callback called when getting baggage from the context. -type GetHookFunc func(context.Context, List) List - -type baggageState struct { - list List - - setHook SetHookFunc - getHook GetHookFunc -} - -// ContextWithSetHook returns a copy of parent with hook configured to be -// invoked every time ContextWithBaggage is called. -// -// Passing nil SetHookFunc creates a context with no set hook to call. -func ContextWithSetHook(parent context.Context, hook SetHookFunc) context.Context { - var s baggageState - if v, ok := parent.Value(baggageKey).(baggageState); ok { - s = v - } - - s.setHook = hook - return context.WithValue(parent, baggageKey, s) -} - -// ContextWithGetHook returns a copy of parent with hook configured to be -// invoked every time FromContext is called. -// -// Passing nil GetHookFunc creates a context with no get hook to call. -func ContextWithGetHook(parent context.Context, hook GetHookFunc) context.Context { - var s baggageState - if v, ok := parent.Value(baggageKey).(baggageState); ok { - s = v - } - - s.getHook = hook - return context.WithValue(parent, baggageKey, s) -} - -// ContextWithList returns a copy of parent with baggage. Passing nil list -// returns a context without any baggage. -func ContextWithList(parent context.Context, list List) context.Context { - var s baggageState - if v, ok := parent.Value(baggageKey).(baggageState); ok { - s = v - } - - s.list = list - ctx := context.WithValue(parent, baggageKey, s) - if s.setHook != nil { - ctx = s.setHook(ctx, list) - } - - return ctx -} - -// ListFromContext returns the baggage contained in ctx. -func ListFromContext(ctx context.Context) List { - switch v := ctx.Value(baggageKey).(type) { - case baggageState: - if v.getHook != nil { - return v.getHook(ctx, v.list) - } - return v.list - default: - return nil - } -} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/handler.go b/vendor/go.opentelemetry.io/otel/internal/global/handler.go deleted file mode 100644 index 3dcd1caa..00000000 --- a/vendor/go.opentelemetry.io/otel/internal/global/handler.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package global // import "go.opentelemetry.io/otel/internal/global" - -import ( - "log" - "os" - "sync/atomic" - "unsafe" -) - -var ( - // GlobalErrorHandler provides an ErrorHandler that can be used - // throughout an OpenTelemetry instrumented project. When a user - // specified ErrorHandler is registered (`SetErrorHandler`) all calls to - // `Handle` and will be delegated to the registered ErrorHandler. - GlobalErrorHandler = defaultErrorHandler() - - // Compile-time check that delegator implements ErrorHandler. - _ ErrorHandler = (*ErrDelegator)(nil) - // Compile-time check that errLogger implements ErrorHandler. - _ ErrorHandler = (*ErrLogger)(nil) -) - -// ErrorHandler handles irremediable events. -type ErrorHandler interface { - // Handle handles any error deemed irremediable by an OpenTelemetry - // component. - Handle(error) -} - -type ErrDelegator struct { - delegate unsafe.Pointer -} - -func (d *ErrDelegator) Handle(err error) { - d.getDelegate().Handle(err) -} - -func (d *ErrDelegator) getDelegate() ErrorHandler { - return *(*ErrorHandler)(atomic.LoadPointer(&d.delegate)) -} - -// setDelegate sets the ErrorHandler delegate. -func (d *ErrDelegator) setDelegate(eh ErrorHandler) { - atomic.StorePointer(&d.delegate, unsafe.Pointer(&eh)) -} - -func defaultErrorHandler() *ErrDelegator { - d := &ErrDelegator{} - d.setDelegate(&ErrLogger{l: log.New(os.Stderr, "", log.LstdFlags)}) - return d -} - -// ErrLogger logs errors if no delegate is set, otherwise they are delegated. -type ErrLogger struct { - l *log.Logger -} - -// Handle logs err if no delegate is set, otherwise it is delegated. -func (h *ErrLogger) Handle(err error) { - h.l.Print(err) -} - -// GetErrorHandler returns the global ErrorHandler instance. -// -// The default ErrorHandler instance returned will log all errors to STDERR -// until an override ErrorHandler is set with SetErrorHandler. All -// ErrorHandler returned prior to this will automatically forward errors to -// the set instance instead of logging. -// -// Subsequent calls to SetErrorHandler after the first will not forward errors -// to the new ErrorHandler for prior returned instances. -func GetErrorHandler() ErrorHandler { - return GlobalErrorHandler -} - -// SetErrorHandler sets the global ErrorHandler to h. -// -// The first time this is called all ErrorHandler previously returned from -// GetErrorHandler will send errors to h instead of the default logging -// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not -// delegate errors to h. -func SetErrorHandler(h ErrorHandler) { - GlobalErrorHandler.setDelegate(h) -} - -// Handle is a convenience function for ErrorHandler().Handle(err). -func Handle(err error) { - GetErrorHandler().Handle(err) -} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go deleted file mode 100644 index a33eded8..00000000 --- a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go +++ /dev/null @@ -1,359 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package global // import "go.opentelemetry.io/otel/internal/global" - -import ( - "context" - "sync/atomic" - - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/embedded" -) - -// unwrapper unwraps to return the underlying instrument implementation. -type unwrapper interface { - Unwrap() metric.Observable -} - -type afCounter struct { - embedded.Float64ObservableCounter - metric.Float64Observable - - name string - opts []metric.Float64ObservableCounterOption - - delegate atomic.Value //metric.Float64ObservableCounter -} - -var _ unwrapper = (*afCounter)(nil) -var _ metric.Float64ObservableCounter = (*afCounter)(nil) - -func (i *afCounter) setDelegate(m metric.Meter) { - ctr, err := m.Float64ObservableCounter(i.name, i.opts...) - if err != nil { - GetErrorHandler().Handle(err) - return - } - i.delegate.Store(ctr) -} - -func (i *afCounter) Unwrap() metric.Observable { - if ctr := i.delegate.Load(); ctr != nil { - return ctr.(metric.Float64ObservableCounter) - } - return nil -} - -type afUpDownCounter struct { - embedded.Float64ObservableUpDownCounter - metric.Float64Observable - - name string - opts []metric.Float64ObservableUpDownCounterOption - - delegate atomic.Value //metric.Float64ObservableUpDownCounter -} - -var _ unwrapper = (*afUpDownCounter)(nil) -var _ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil) - -func (i *afUpDownCounter) setDelegate(m metric.Meter) { - ctr, err := m.Float64ObservableUpDownCounter(i.name, i.opts...) - if err != nil { - GetErrorHandler().Handle(err) - return - } - i.delegate.Store(ctr) -} - -func (i *afUpDownCounter) Unwrap() metric.Observable { - if ctr := i.delegate.Load(); ctr != nil { - return ctr.(metric.Float64ObservableUpDownCounter) - } - return nil -} - -type afGauge struct { - embedded.Float64ObservableGauge - metric.Float64Observable - - name string - opts []metric.Float64ObservableGaugeOption - - delegate atomic.Value //metric.Float64ObservableGauge -} - -var _ unwrapper = (*afGauge)(nil) -var _ metric.Float64ObservableGauge = (*afGauge)(nil) - -func (i *afGauge) setDelegate(m metric.Meter) { - ctr, err := m.Float64ObservableGauge(i.name, i.opts...) - if err != nil { - GetErrorHandler().Handle(err) - return - } - i.delegate.Store(ctr) -} - -func (i *afGauge) Unwrap() metric.Observable { - if ctr := i.delegate.Load(); ctr != nil { - return ctr.(metric.Float64ObservableGauge) - } - return nil -} - -type aiCounter struct { - embedded.Int64ObservableCounter - metric.Int64Observable - - name string - opts []metric.Int64ObservableCounterOption - - delegate atomic.Value //metric.Int64ObservableCounter -} - -var _ unwrapper = (*aiCounter)(nil) -var _ metric.Int64ObservableCounter = (*aiCounter)(nil) - -func (i *aiCounter) setDelegate(m metric.Meter) { - ctr, err := m.Int64ObservableCounter(i.name, i.opts...) - if err != nil { - GetErrorHandler().Handle(err) - return - } - i.delegate.Store(ctr) -} - -func (i *aiCounter) Unwrap() metric.Observable { - if ctr := i.delegate.Load(); ctr != nil { - return ctr.(metric.Int64ObservableCounter) - } - return nil -} - -type aiUpDownCounter struct { - embedded.Int64ObservableUpDownCounter - metric.Int64Observable - - name string - opts []metric.Int64ObservableUpDownCounterOption - - delegate atomic.Value //metric.Int64ObservableUpDownCounter -} - -var _ unwrapper = (*aiUpDownCounter)(nil) -var _ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil) - -func (i *aiUpDownCounter) setDelegate(m metric.Meter) { - ctr, err := m.Int64ObservableUpDownCounter(i.name, i.opts...) - if err != nil { - GetErrorHandler().Handle(err) - return - } - i.delegate.Store(ctr) -} - -func (i *aiUpDownCounter) Unwrap() metric.Observable { - if ctr := i.delegate.Load(); ctr != nil { - return ctr.(metric.Int64ObservableUpDownCounter) - } - return nil -} - -type aiGauge struct { - embedded.Int64ObservableGauge - metric.Int64Observable - - name string - opts []metric.Int64ObservableGaugeOption - - delegate atomic.Value //metric.Int64ObservableGauge -} - -var _ unwrapper = (*aiGauge)(nil) -var _ metric.Int64ObservableGauge = (*aiGauge)(nil) - -func (i *aiGauge) setDelegate(m metric.Meter) { - ctr, err := m.Int64ObservableGauge(i.name, i.opts...) - if err != nil { - GetErrorHandler().Handle(err) - return - } - i.delegate.Store(ctr) -} - -func (i *aiGauge) Unwrap() metric.Observable { - if ctr := i.delegate.Load(); ctr != nil { - return ctr.(metric.Int64ObservableGauge) - } - return nil -} - -// Sync Instruments. -type sfCounter struct { - embedded.Float64Counter - - name string - opts []metric.Float64CounterOption - - delegate atomic.Value //metric.Float64Counter -} - -var _ metric.Float64Counter = (*sfCounter)(nil) - -func (i *sfCounter) setDelegate(m metric.Meter) { - ctr, err := m.Float64Counter(i.name, i.opts...) - if err != nil { - GetErrorHandler().Handle(err) - return - } - i.delegate.Store(ctr) -} - -func (i *sfCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) { - if ctr := i.delegate.Load(); ctr != nil { - ctr.(metric.Float64Counter).Add(ctx, incr, opts...) - } -} - -type sfUpDownCounter struct { - embedded.Float64UpDownCounter - - name string - opts []metric.Float64UpDownCounterOption - - delegate atomic.Value //metric.Float64UpDownCounter -} - -var _ metric.Float64UpDownCounter = (*sfUpDownCounter)(nil) - -func (i *sfUpDownCounter) setDelegate(m metric.Meter) { - ctr, err := m.Float64UpDownCounter(i.name, i.opts...) - if err != nil { - GetErrorHandler().Handle(err) - return - } - i.delegate.Store(ctr) -} - -func (i *sfUpDownCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) { - if ctr := i.delegate.Load(); ctr != nil { - ctr.(metric.Float64UpDownCounter).Add(ctx, incr, opts...) - } -} - -type sfHistogram struct { - embedded.Float64Histogram - - name string - opts []metric.Float64HistogramOption - - delegate atomic.Value //metric.Float64Histogram -} - -var _ metric.Float64Histogram = (*sfHistogram)(nil) - -func (i *sfHistogram) setDelegate(m metric.Meter) { - ctr, err := m.Float64Histogram(i.name, i.opts...) - if err != nil { - GetErrorHandler().Handle(err) - return - } - i.delegate.Store(ctr) -} - -func (i *sfHistogram) Record(ctx context.Context, x float64, opts ...metric.RecordOption) { - if ctr := i.delegate.Load(); ctr != nil { - ctr.(metric.Float64Histogram).Record(ctx, x, opts...) - } -} - -type siCounter struct { - embedded.Int64Counter - - name string - opts []metric.Int64CounterOption - - delegate atomic.Value //metric.Int64Counter -} - -var _ metric.Int64Counter = (*siCounter)(nil) - -func (i *siCounter) setDelegate(m metric.Meter) { - ctr, err := m.Int64Counter(i.name, i.opts...) - if err != nil { - GetErrorHandler().Handle(err) - return - } - i.delegate.Store(ctr) -} - -func (i *siCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) { - if ctr := i.delegate.Load(); ctr != nil { - ctr.(metric.Int64Counter).Add(ctx, x, opts...) - } -} - -type siUpDownCounter struct { - embedded.Int64UpDownCounter - - name string - opts []metric.Int64UpDownCounterOption - - delegate atomic.Value //metric.Int64UpDownCounter -} - -var _ metric.Int64UpDownCounter = (*siUpDownCounter)(nil) - -func (i *siUpDownCounter) setDelegate(m metric.Meter) { - ctr, err := m.Int64UpDownCounter(i.name, i.opts...) - if err != nil { - GetErrorHandler().Handle(err) - return - } - i.delegate.Store(ctr) -} - -func (i *siUpDownCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) { - if ctr := i.delegate.Load(); ctr != nil { - ctr.(metric.Int64UpDownCounter).Add(ctx, x, opts...) - } -} - -type siHistogram struct { - embedded.Int64Histogram - - name string - opts []metric.Int64HistogramOption - - delegate atomic.Value //metric.Int64Histogram -} - -var _ metric.Int64Histogram = (*siHistogram)(nil) - -func (i *siHistogram) setDelegate(m metric.Meter) { - ctr, err := m.Int64Histogram(i.name, i.opts...) - if err != nil { - GetErrorHandler().Handle(err) - return - } - i.delegate.Store(ctr) -} - -func (i *siHistogram) Record(ctx context.Context, x int64, opts ...metric.RecordOption) { - if ctr := i.delegate.Load(); ctr != nil { - ctr.(metric.Int64Histogram).Record(ctx, x, opts...) - } -} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go deleted file mode 100644 index 5951fd06..00000000 --- a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package global // import "go.opentelemetry.io/otel/internal/global" - -import ( - "log" - "os" - "sync/atomic" - "unsafe" - - "github.com/go-logr/logr" - "github.com/go-logr/stdr" -) - -// globalLogger is the logging interface used within the otel api and sdk provide details of the internals. -// -// The default logger uses stdr which is backed by the standard `log.Logger` -// interface. This logger will only show messages at the Error Level. -var globalLogger unsafe.Pointer - -func init() { - SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) -} - -// SetLogger overrides the globalLogger with l. -// -// To see Warn messages use a logger with `l.V(1).Enabled() == true` -// To see Info messages use a logger with `l.V(4).Enabled() == true` -// To see Debug messages use a logger with `l.V(8).Enabled() == true`. -func SetLogger(l logr.Logger) { - atomic.StorePointer(&globalLogger, unsafe.Pointer(&l)) -} - -func getLogger() logr.Logger { - return *(*logr.Logger)(atomic.LoadPointer(&globalLogger)) -} - -// Info prints messages about the general state of the API or SDK. -// This should usually be less than 5 messages a minute. -func Info(msg string, keysAndValues ...interface{}) { - getLogger().V(4).Info(msg, keysAndValues...) -} - -// Error prints messages about exceptional states of the API or SDK. -func Error(err error, msg string, keysAndValues ...interface{}) { - getLogger().Error(err, msg, keysAndValues...) -} - -// Debug prints messages about all internal changes in the API or SDK. -func Debug(msg string, keysAndValues ...interface{}) { - getLogger().V(8).Info(msg, keysAndValues...) -} - -// Warn prints messages about warnings in the API or SDK. -// Not an error but is likely more important than an informational event. -func Warn(msg string, keysAndValues ...interface{}) { - getLogger().V(1).Info(msg, keysAndValues...) -} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go deleted file mode 100644 index 0097db47..00000000 --- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ /dev/null @@ -1,354 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package global // import "go.opentelemetry.io/otel/internal/global" - -import ( - "container/list" - "sync" - "sync/atomic" - - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/embedded" -) - -// meterProvider is a placeholder for a configured SDK MeterProvider. -// -// All MeterProvider functionality is forwarded to a delegate once -// configured. -type meterProvider struct { - embedded.MeterProvider - - mtx sync.Mutex - meters map[il]*meter - - delegate metric.MeterProvider -} - -// setDelegate configures p to delegate all MeterProvider functionality to -// provider. -// -// All Meters provided prior to this function call are switched out to be -// Meters provided by provider. All instruments and callbacks are recreated and -// delegated. -// -// It is guaranteed by the caller that this happens only once. -func (p *meterProvider) setDelegate(provider metric.MeterProvider) { - p.mtx.Lock() - defer p.mtx.Unlock() - - p.delegate = provider - - if len(p.meters) == 0 { - return - } - - for _, meter := range p.meters { - meter.setDelegate(provider) - } - - p.meters = nil -} - -// Meter implements MeterProvider. -func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Meter { - p.mtx.Lock() - defer p.mtx.Unlock() - - if p.delegate != nil { - return p.delegate.Meter(name, opts...) - } - - // At this moment it is guaranteed that no sdk is installed, save the meter in the meters map. - - c := metric.NewMeterConfig(opts...) - key := il{ - name: name, - version: c.InstrumentationVersion(), - } - - if p.meters == nil { - p.meters = make(map[il]*meter) - } - - if val, ok := p.meters[key]; ok { - return val - } - - t := &meter{name: name, opts: opts} - p.meters[key] = t - return t -} - -// meter is a placeholder for a metric.Meter. -// -// All Meter functionality is forwarded to a delegate once configured. -// Otherwise, all functionality is forwarded to a NoopMeter. -type meter struct { - embedded.Meter - - name string - opts []metric.MeterOption - - mtx sync.Mutex - instruments []delegatedInstrument - - registry list.List - - delegate atomic.Value // metric.Meter -} - -type delegatedInstrument interface { - setDelegate(metric.Meter) -} - -// setDelegate configures m to delegate all Meter functionality to Meters -// created by provider. -// -// All subsequent calls to the Meter methods will be passed to the delegate. -// -// It is guaranteed by the caller that this happens only once. -func (m *meter) setDelegate(provider metric.MeterProvider) { - meter := provider.Meter(m.name, m.opts...) - m.delegate.Store(meter) - - m.mtx.Lock() - defer m.mtx.Unlock() - - for _, inst := range m.instruments { - inst.setDelegate(meter) - } - - for e := m.registry.Front(); e != nil; e = e.Next() { - r := e.Value.(*registration) - r.setDelegate(meter) - m.registry.Remove(e) - } - - m.instruments = nil - m.registry.Init() -} - -func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64Counter(name, options...) - } - m.mtx.Lock() - defer m.mtx.Unlock() - i := &siCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) - return i, nil -} - -func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64UpDownCounter(name, options...) - } - m.mtx.Lock() - defer m.mtx.Unlock() - i := &siUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) - return i, nil -} - -func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64Histogram(name, options...) - } - m.mtx.Lock() - defer m.mtx.Unlock() - i := &siHistogram{name: name, opts: options} - m.instruments = append(m.instruments, i) - return i, nil -} - -func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64ObservableCounter(name, options...) - } - m.mtx.Lock() - defer m.mtx.Unlock() - i := &aiCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) - return i, nil -} - -func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64ObservableUpDownCounter(name, options...) - } - m.mtx.Lock() - defer m.mtx.Unlock() - i := &aiUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) - return i, nil -} - -func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64ObservableGauge(name, options...) - } - m.mtx.Lock() - defer m.mtx.Unlock() - i := &aiGauge{name: name, opts: options} - m.instruments = append(m.instruments, i) - return i, nil -} - -func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64Counter(name, options...) - } - m.mtx.Lock() - defer m.mtx.Unlock() - i := &sfCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) - return i, nil -} - -func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64UpDownCounter(name, options...) - } - m.mtx.Lock() - defer m.mtx.Unlock() - i := &sfUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) - return i, nil -} - -func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64Histogram(name, options...) - } - m.mtx.Lock() - defer m.mtx.Unlock() - i := &sfHistogram{name: name, opts: options} - m.instruments = append(m.instruments, i) - return i, nil -} - -func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64ObservableCounter(name, options...) - } - m.mtx.Lock() - defer m.mtx.Unlock() - i := &afCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) - return i, nil -} - -func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64ObservableUpDownCounter(name, options...) - } - m.mtx.Lock() - defer m.mtx.Unlock() - i := &afUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) - return i, nil -} - -func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64ObservableGauge(name, options...) - } - m.mtx.Lock() - defer m.mtx.Unlock() - i := &afGauge{name: name, opts: options} - m.instruments = append(m.instruments, i) - return i, nil -} - -// RegisterCallback captures the function that will be called during Collect. -func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - insts = unwrapInstruments(insts) - return del.RegisterCallback(f, insts...) - } - - m.mtx.Lock() - defer m.mtx.Unlock() - - reg := ®istration{instruments: insts, function: f} - e := m.registry.PushBack(reg) - reg.unreg = func() error { - m.mtx.Lock() - _ = m.registry.Remove(e) - m.mtx.Unlock() - return nil - } - return reg, nil -} - -type wrapped interface { - unwrap() metric.Observable -} - -func unwrapInstruments(instruments []metric.Observable) []metric.Observable { - out := make([]metric.Observable, 0, len(instruments)) - - for _, inst := range instruments { - if in, ok := inst.(wrapped); ok { - out = append(out, in.unwrap()) - } else { - out = append(out, inst) - } - } - - return out -} - -type registration struct { - embedded.Registration - - instruments []metric.Observable - function metric.Callback - - unreg func() error - unregMu sync.Mutex -} - -func (c *registration) setDelegate(m metric.Meter) { - insts := unwrapInstruments(c.instruments) - - c.unregMu.Lock() - defer c.unregMu.Unlock() - - if c.unreg == nil { - // Unregister already called. - return - } - - reg, err := m.RegisterCallback(c.function, insts...) - if err != nil { - GetErrorHandler().Handle(err) - } - - c.unreg = reg.Unregister -} - -func (c *registration) Unregister() error { - c.unregMu.Lock() - defer c.unregMu.Unlock() - if c.unreg == nil { - // Unregister already called. - return nil - } - - var err error - err, c.unreg = c.unreg(), nil - return err -} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go deleted file mode 100644 index 06bac35c..00000000 --- a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package global // import "go.opentelemetry.io/otel/internal/global" - -import ( - "context" - "sync" - - "go.opentelemetry.io/otel/propagation" -) - -// textMapPropagator is a default TextMapPropagator that delegates calls to a -// registered delegate if one is set, otherwise it defaults to delegating the -// calls to a the default no-op propagation.TextMapPropagator. -type textMapPropagator struct { - mtx sync.Mutex - once sync.Once - delegate propagation.TextMapPropagator - noop propagation.TextMapPropagator -} - -// Compile-time guarantee that textMapPropagator implements the -// propagation.TextMapPropagator interface. -var _ propagation.TextMapPropagator = (*textMapPropagator)(nil) - -func newTextMapPropagator() *textMapPropagator { - return &textMapPropagator{ - noop: propagation.NewCompositeTextMapPropagator(), - } -} - -// SetDelegate sets a delegate propagation.TextMapPropagator that all calls are -// forwarded to. Delegation can only be performed once, all subsequent calls -// perform no delegation. -func (p *textMapPropagator) SetDelegate(delegate propagation.TextMapPropagator) { - if delegate == nil { - return - } - - p.mtx.Lock() - p.once.Do(func() { p.delegate = delegate }) - p.mtx.Unlock() -} - -// effectiveDelegate returns the current delegate of p if one is set, -// otherwise the default noop TextMapPropagator is returned. This method -// can be called concurrently. -func (p *textMapPropagator) effectiveDelegate() propagation.TextMapPropagator { - p.mtx.Lock() - defer p.mtx.Unlock() - if p.delegate != nil { - return p.delegate - } - return p.noop -} - -// Inject set cross-cutting concerns from the Context into the carrier. -func (p *textMapPropagator) Inject(ctx context.Context, carrier propagation.TextMapCarrier) { - p.effectiveDelegate().Inject(ctx, carrier) -} - -// Extract reads cross-cutting concerns from the carrier into a Context. -func (p *textMapPropagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context { - return p.effectiveDelegate().Extract(ctx, carrier) -} - -// Fields returns the keys whose values are set with Inject. -func (p *textMapPropagator) Fields() []string { - return p.effectiveDelegate().Fields() -} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/state.go b/vendor/go.opentelemetry.io/otel/internal/global/state.go deleted file mode 100644 index 7985005b..00000000 --- a/vendor/go.opentelemetry.io/otel/internal/global/state.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package global // import "go.opentelemetry.io/otel/internal/global" - -import ( - "errors" - "sync" - "sync/atomic" - - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/propagation" - "go.opentelemetry.io/otel/trace" -) - -type ( - tracerProviderHolder struct { - tp trace.TracerProvider - } - - propagatorsHolder struct { - tm propagation.TextMapPropagator - } - - meterProviderHolder struct { - mp metric.MeterProvider - } -) - -var ( - globalTracer = defaultTracerValue() - globalPropagators = defaultPropagatorsValue() - globalMeterProvider = defaultMeterProvider() - - delegateTraceOnce sync.Once - delegateTextMapPropagatorOnce sync.Once - delegateMeterOnce sync.Once -) - -// TracerProvider is the internal implementation for global.TracerProvider. -func TracerProvider() trace.TracerProvider { - return globalTracer.Load().(tracerProviderHolder).tp -} - -// SetTracerProvider is the internal implementation for global.SetTracerProvider. -func SetTracerProvider(tp trace.TracerProvider) { - current := TracerProvider() - - if _, cOk := current.(*tracerProvider); cOk { - if _, tpOk := tp.(*tracerProvider); tpOk && current == tp { - // Do not assign the default delegating TracerProvider to delegate - // to itself. - Error( - errors.New("no delegate configured in tracer provider"), - "Setting tracer provider to it's current value. No delegate will be configured", - ) - return - } - } - - delegateTraceOnce.Do(func() { - if def, ok := current.(*tracerProvider); ok { - def.setDelegate(tp) - } - }) - globalTracer.Store(tracerProviderHolder{tp: tp}) -} - -// TextMapPropagator is the internal implementation for global.TextMapPropagator. -func TextMapPropagator() propagation.TextMapPropagator { - return globalPropagators.Load().(propagatorsHolder).tm -} - -// SetTextMapPropagator is the internal implementation for global.SetTextMapPropagator. -func SetTextMapPropagator(p propagation.TextMapPropagator) { - current := TextMapPropagator() - - if _, cOk := current.(*textMapPropagator); cOk { - if _, pOk := p.(*textMapPropagator); pOk && current == p { - // Do not assign the default delegating TextMapPropagator to - // delegate to itself. - Error( - errors.New("no delegate configured in text map propagator"), - "Setting text map propagator to it's current value. No delegate will be configured", - ) - return - } - } - - // For the textMapPropagator already returned by TextMapPropagator - // delegate to p. - delegateTextMapPropagatorOnce.Do(func() { - if def, ok := current.(*textMapPropagator); ok { - def.SetDelegate(p) - } - }) - // Return p when subsequent calls to TextMapPropagator are made. - globalPropagators.Store(propagatorsHolder{tm: p}) -} - -// MeterProvider is the internal implementation for global.MeterProvider. -func MeterProvider() metric.MeterProvider { - return globalMeterProvider.Load().(meterProviderHolder).mp -} - -// SetMeterProvider is the internal implementation for global.SetMeterProvider. -func SetMeterProvider(mp metric.MeterProvider) { - current := MeterProvider() - if _, cOk := current.(*meterProvider); cOk { - if _, mpOk := mp.(*meterProvider); mpOk && current == mp { - // Do not assign the default delegating MeterProvider to delegate - // to itself. - Error( - errors.New("no delegate configured in meter provider"), - "Setting meter provider to it's current value. No delegate will be configured", - ) - return - } - } - - delegateMeterOnce.Do(func() { - if def, ok := current.(*meterProvider); ok { - def.setDelegate(mp) - } - }) - globalMeterProvider.Store(meterProviderHolder{mp: mp}) -} - -func defaultTracerValue() *atomic.Value { - v := &atomic.Value{} - v.Store(tracerProviderHolder{tp: &tracerProvider{}}) - return v -} - -func defaultPropagatorsValue() *atomic.Value { - v := &atomic.Value{} - v.Store(propagatorsHolder{tm: newTextMapPropagator()}) - return v -} - -func defaultMeterProvider() *atomic.Value { - v := &atomic.Value{} - v.Store(meterProviderHolder{mp: &meterProvider{}}) - return v -} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go deleted file mode 100644 index 5f008d09..00000000 --- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package global // import "go.opentelemetry.io/otel/internal/global" - -/* -This file contains the forwarding implementation of the TracerProvider used as -the default global instance. Prior to initialization of an SDK, Tracers -returned by the global TracerProvider will provide no-op functionality. This -means that all Span created prior to initialization are no-op Spans. - -Once an SDK has been initialized, all provided no-op Tracers are swapped for -Tracers provided by the SDK defined TracerProvider. However, any Span started -prior to this initialization does not change its behavior. Meaning, the Span -remains a no-op Span. - -The implementation to track and swap Tracers locks all new Tracer creation -until the swap is complete. This assumes that this operation is not -performance-critical. If that assumption is incorrect, be sure to configure an -SDK prior to any Tracer creation. -*/ - -import ( - "context" - "sync" - "sync/atomic" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace" -) - -// tracerProvider is a placeholder for a configured SDK TracerProvider. -// -// All TracerProvider functionality is forwarded to a delegate once -// configured. -type tracerProvider struct { - mtx sync.Mutex - tracers map[il]*tracer - delegate trace.TracerProvider -} - -// Compile-time guarantee that tracerProvider implements the TracerProvider -// interface. -var _ trace.TracerProvider = &tracerProvider{} - -// setDelegate configures p to delegate all TracerProvider functionality to -// provider. -// -// All Tracers provided prior to this function call are switched out to be -// Tracers provided by provider. -// -// It is guaranteed by the caller that this happens only once. -func (p *tracerProvider) setDelegate(provider trace.TracerProvider) { - p.mtx.Lock() - defer p.mtx.Unlock() - - p.delegate = provider - - if len(p.tracers) == 0 { - return - } - - for _, t := range p.tracers { - t.setDelegate(provider) - } - - p.tracers = nil -} - -// Tracer implements TracerProvider. -func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { - p.mtx.Lock() - defer p.mtx.Unlock() - - if p.delegate != nil { - return p.delegate.Tracer(name, opts...) - } - - // At this moment it is guaranteed that no sdk is installed, save the tracer in the tracers map. - - c := trace.NewTracerConfig(opts...) - key := il{ - name: name, - version: c.InstrumentationVersion(), - } - - if p.tracers == nil { - p.tracers = make(map[il]*tracer) - } - - if val, ok := p.tracers[key]; ok { - return val - } - - t := &tracer{name: name, opts: opts, provider: p} - p.tracers[key] = t - return t -} - -type il struct { - name string - version string -} - -// tracer is a placeholder for a trace.Tracer. -// -// All Tracer functionality is forwarded to a delegate once configured. -// Otherwise, all functionality is forwarded to a NoopTracer. -type tracer struct { - name string - opts []trace.TracerOption - provider *tracerProvider - - delegate atomic.Value -} - -// Compile-time guarantee that tracer implements the trace.Tracer interface. -var _ trace.Tracer = &tracer{} - -// setDelegate configures t to delegate all Tracer functionality to Tracers -// created by provider. -// -// All subsequent calls to the Tracer methods will be passed to the delegate. -// -// It is guaranteed by the caller that this happens only once. -func (t *tracer) setDelegate(provider trace.TracerProvider) { - t.delegate.Store(provider.Tracer(t.name, t.opts...)) -} - -// Start implements trace.Tracer by forwarding the call to t.delegate if -// set, otherwise it forwards the call to a NoopTracer. -func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { - delegate := t.delegate.Load() - if delegate != nil { - return delegate.(trace.Tracer).Start(ctx, name, opts...) - } - - s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t} - ctx = trace.ContextWithSpan(ctx, s) - return ctx, s -} - -// nonRecordingSpan is a minimal implementation of a Span that wraps a -// SpanContext. It performs no operations other than to return the wrapped -// SpanContext. -type nonRecordingSpan struct { - sc trace.SpanContext - tracer *tracer -} - -var _ trace.Span = nonRecordingSpan{} - -// SpanContext returns the wrapped SpanContext. -func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc } - -// IsRecording always returns false. -func (nonRecordingSpan) IsRecording() bool { return false } - -// SetStatus does nothing. -func (nonRecordingSpan) SetStatus(codes.Code, string) {} - -// SetError does nothing. -func (nonRecordingSpan) SetError(bool) {} - -// SetAttributes does nothing. -func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {} - -// End does nothing. -func (nonRecordingSpan) End(...trace.SpanEndOption) {} - -// RecordError does nothing. -func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {} - -// AddEvent does nothing. -func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {} - -// SetName does nothing. -func (nonRecordingSpan) SetName(string) {} - -func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider } diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go deleted file mode 100644 index e07e7940..00000000 --- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal // import "go.opentelemetry.io/otel/internal" - -import ( - "math" - "unsafe" -) - -func BoolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag. - if b { - return 1 - } - return 0 -} - -func RawToBool(r uint64) bool { - return r != 0 -} - -func Int64ToRaw(i int64) uint64 { - return uint64(i) -} - -func RawToInt64(r uint64) int64 { - return int64(r) -} - -func Float64ToRaw(f float64) uint64 { - return math.Float64bits(f) -} - -func RawToFloat64(r uint64) float64 { - return math.Float64frombits(r) -} - -func RawPtrToFloat64Ptr(r *uint64) *float64 { - return (*float64)(unsafe.Pointer(r)) -} - -func RawPtrToInt64Ptr(r *uint64) *int64 { - return (*int64)(unsafe.Pointer(r)) -} diff --git a/vendor/go.opentelemetry.io/otel/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal_logging.go deleted file mode 100644 index c4f8acd5..00000000 --- a/vendor/go.opentelemetry.io/otel/internal_logging.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otel // import "go.opentelemetry.io/otel" - -import ( - "github.com/go-logr/logr" - - "go.opentelemetry.io/otel/internal/global" -) - -// SetLogger configures the logger used internally to opentelemetry. -func SetLogger(logger logr.Logger) { - global.SetLogger(logger) -} diff --git a/vendor/go.opentelemetry.io/otel/metric.go b/vendor/go.opentelemetry.io/otel/metric.go deleted file mode 100644 index f9551719..00000000 --- a/vendor/go.opentelemetry.io/otel/metric.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otel // import "go.opentelemetry.io/otel" - -import ( - "go.opentelemetry.io/otel/internal/global" - "go.opentelemetry.io/otel/metric" -) - -// Meter returns a Meter from the global MeterProvider. The name must be the -// name of the library providing instrumentation. This name may be the same as -// the instrumented code only if that code provides built-in instrumentation. -// If the name is empty, then a implementation defined default name will be -// used instead. -// -// If this is called before a global MeterProvider is registered the returned -// Meter will be a No-op implementation of a Meter. When a global MeterProvider -// is registered for the first time, the returned Meter, and all the -// instruments it has created or will create, are recreated automatically from -// the new MeterProvider. -// -// This is short for GetMeterProvider().Meter(name). -func Meter(name string, opts ...metric.MeterOption) metric.Meter { - return GetMeterProvider().Meter(name, opts...) -} - -// GetMeterProvider returns the registered global meter provider. -// -// If no global GetMeterProvider has been registered, a No-op GetMeterProvider -// implementation is returned. When a global GetMeterProvider is registered for -// the first time, the returned GetMeterProvider, and all the Meters it has -// created or will create, are recreated automatically from the new -// GetMeterProvider. -func GetMeterProvider() metric.MeterProvider { - return global.MeterProvider() -} - -// SetMeterProvider registers mp as the global MeterProvider. -func SetMeterProvider(mp metric.MeterProvider) { - global.SetMeterProvider(mp) -} diff --git a/vendor/go.opentelemetry.io/otel/metric/LICENSE b/vendor/go.opentelemetry.io/otel/metric/LICENSE deleted file mode 100644 index 261eeb9e..00000000 --- a/vendor/go.opentelemetry.io/otel/metric/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go deleted file mode 100644 index 072baa8e..00000000 --- a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go +++ /dev/null @@ -1,271 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metric // import "go.opentelemetry.io/otel/metric" - -import ( - "context" - - "go.opentelemetry.io/otel/metric/embedded" -) - -// Float64Observable describes a set of instruments used asynchronously to -// record float64 measurements once per collection cycle. Observations of -// these instruments are only made within a callback. -// -// Warning: Methods may be added to this interface in minor releases. -type Float64Observable interface { - Observable - - float64Observable() -} - -// Float64ObservableCounter is an instrument used to asynchronously record -// increasing float64 measurements once per collection cycle. Observations are -// only made within a callback for this instrument. The value observed is -// assumed the to be the cumulative sum of the count. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for -// unimplemented methods. -type Float64ObservableCounter interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Float64ObservableCounter - - Float64Observable -} - -// Float64ObservableCounterConfig contains options for asynchronous counter -// instruments that record int64 values. -type Float64ObservableCounterConfig struct { - description string - unit string - callbacks []Float64Callback -} - -// NewFloat64ObservableCounterConfig returns a new -// [Float64ObservableCounterConfig] with all opts applied. -func NewFloat64ObservableCounterConfig(opts ...Float64ObservableCounterOption) Float64ObservableCounterConfig { - var config Float64ObservableCounterConfig - for _, o := range opts { - config = o.applyFloat64ObservableCounter(config) - } - return config -} - -// Description returns the configured description. -func (c Float64ObservableCounterConfig) Description() string { - return c.description -} - -// Unit returns the configured unit. -func (c Float64ObservableCounterConfig) Unit() string { - return c.unit -} - -// Callbacks returns the configured callbacks. -func (c Float64ObservableCounterConfig) Callbacks() []Float64Callback { - return c.callbacks -} - -// Float64ObservableCounterOption applies options to a -// [Float64ObservableCounterConfig]. See [Float64ObservableOption] and -// [InstrumentOption] for other options that can be used as a -// Float64ObservableCounterOption. -type Float64ObservableCounterOption interface { - applyFloat64ObservableCounter(Float64ObservableCounterConfig) Float64ObservableCounterConfig -} - -// Float64ObservableUpDownCounter is an instrument used to asynchronously -// record float64 measurements once per collection cycle. Observations are only -// made within a callback for this instrument. The value observed is assumed -// the to be the cumulative sum of the count. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Float64ObservableUpDownCounter interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Float64ObservableUpDownCounter - - Float64Observable -} - -// Float64ObservableUpDownCounterConfig contains options for asynchronous -// counter instruments that record int64 values. -type Float64ObservableUpDownCounterConfig struct { - description string - unit string - callbacks []Float64Callback -} - -// NewFloat64ObservableUpDownCounterConfig returns a new -// [Float64ObservableUpDownCounterConfig] with all opts applied. -func NewFloat64ObservableUpDownCounterConfig(opts ...Float64ObservableUpDownCounterOption) Float64ObservableUpDownCounterConfig { - var config Float64ObservableUpDownCounterConfig - for _, o := range opts { - config = o.applyFloat64ObservableUpDownCounter(config) - } - return config -} - -// Description returns the configured description. -func (c Float64ObservableUpDownCounterConfig) Description() string { - return c.description -} - -// Unit returns the configured unit. -func (c Float64ObservableUpDownCounterConfig) Unit() string { - return c.unit -} - -// Callbacks returns the configured callbacks. -func (c Float64ObservableUpDownCounterConfig) Callbacks() []Float64Callback { - return c.callbacks -} - -// Float64ObservableUpDownCounterOption applies options to a -// [Float64ObservableUpDownCounterConfig]. See [Float64ObservableOption] and -// [InstrumentOption] for other options that can be used as a -// Float64ObservableUpDownCounterOption. -type Float64ObservableUpDownCounterOption interface { - applyFloat64ObservableUpDownCounter(Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig -} - -// Float64ObservableGauge is an instrument used to asynchronously record -// instantaneous float64 measurements once per collection cycle. Observations -// are only made within a callback for this instrument. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Float64ObservableGauge interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Float64ObservableGauge - - Float64Observable -} - -// Float64ObservableGaugeConfig contains options for asynchronous counter -// instruments that record int64 values. -type Float64ObservableGaugeConfig struct { - description string - unit string - callbacks []Float64Callback -} - -// NewFloat64ObservableGaugeConfig returns a new [Float64ObservableGaugeConfig] -// with all opts applied. -func NewFloat64ObservableGaugeConfig(opts ...Float64ObservableGaugeOption) Float64ObservableGaugeConfig { - var config Float64ObservableGaugeConfig - for _, o := range opts { - config = o.applyFloat64ObservableGauge(config) - } - return config -} - -// Description returns the configured description. -func (c Float64ObservableGaugeConfig) Description() string { - return c.description -} - -// Unit returns the configured unit. -func (c Float64ObservableGaugeConfig) Unit() string { - return c.unit -} - -// Callbacks returns the configured callbacks. -func (c Float64ObservableGaugeConfig) Callbacks() []Float64Callback { - return c.callbacks -} - -// Float64ObservableGaugeOption applies options to a -// [Float64ObservableGaugeConfig]. See [Float64ObservableOption] and -// [InstrumentOption] for other options that can be used as a -// Float64ObservableGaugeOption. -type Float64ObservableGaugeOption interface { - applyFloat64ObservableGauge(Float64ObservableGaugeConfig) Float64ObservableGaugeConfig -} - -// Float64Observer is a recorder of float64 measurements. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Float64Observer interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Float64Observer - - // Observe records the float64 value. - // - // Use the WithAttributeSet (or, if performance is not a concern, - // the WithAttributes) option to include measurement attributes. - Observe(value float64, options ...ObserveOption) -} - -// Float64Callback is a function registered with a Meter that makes -// observations for a Float64Observerable instrument it is registered with. -// Calls to the Float64Observer record measurement values for the -// Float64Observable. -// -// The function needs to complete in a finite amount of time and the deadline -// of the passed context is expected to be honored. -// -// The function needs to make unique observations across all registered -// Float64Callbacks. Meaning, it should not report measurements with the same -// attributes as another Float64Callbacks also registered for the same -// instrument. -// -// The function needs to be concurrent safe. -type Float64Callback func(context.Context, Float64Observer) error - -// Float64ObservableOption applies options to float64 Observer instruments. -type Float64ObservableOption interface { - Float64ObservableCounterOption - Float64ObservableUpDownCounterOption - Float64ObservableGaugeOption -} - -type float64CallbackOpt struct { - cback Float64Callback -} - -func (o float64CallbackOpt) applyFloat64ObservableCounter(cfg Float64ObservableCounterConfig) Float64ObservableCounterConfig { - cfg.callbacks = append(cfg.callbacks, o.cback) - return cfg -} - -func (o float64CallbackOpt) applyFloat64ObservableUpDownCounter(cfg Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { - cfg.callbacks = append(cfg.callbacks, o.cback) - return cfg -} - -func (o float64CallbackOpt) applyFloat64ObservableGauge(cfg Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { - cfg.callbacks = append(cfg.callbacks, o.cback) - return cfg -} - -// WithFloat64Callback adds callback to be called for an instrument. -func WithFloat64Callback(callback Float64Callback) Float64ObservableOption { - return float64CallbackOpt{callback} -} diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go deleted file mode 100644 index 9bd6ebf0..00000000 --- a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go +++ /dev/null @@ -1,269 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metric // import "go.opentelemetry.io/otel/metric" - -import ( - "context" - - "go.opentelemetry.io/otel/metric/embedded" -) - -// Int64Observable describes a set of instruments used asynchronously to record -// int64 measurements once per collection cycle. Observations of these -// instruments are only made within a callback. -// -// Warning: Methods may be added to this interface in minor releases. -type Int64Observable interface { - Observable - - int64Observable() -} - -// Int64ObservableCounter is an instrument used to asynchronously record -// increasing int64 measurements once per collection cycle. Observations are -// only made within a callback for this instrument. The value observed is -// assumed the to be the cumulative sum of the count. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Int64ObservableCounter interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Int64ObservableCounter - - Int64Observable -} - -// Int64ObservableCounterConfig contains options for asynchronous counter -// instruments that record int64 values. -type Int64ObservableCounterConfig struct { - description string - unit string - callbacks []Int64Callback -} - -// NewInt64ObservableCounterConfig returns a new [Int64ObservableCounterConfig] -// with all opts applied. -func NewInt64ObservableCounterConfig(opts ...Int64ObservableCounterOption) Int64ObservableCounterConfig { - var config Int64ObservableCounterConfig - for _, o := range opts { - config = o.applyInt64ObservableCounter(config) - } - return config -} - -// Description returns the configured description. -func (c Int64ObservableCounterConfig) Description() string { - return c.description -} - -// Unit returns the configured unit. -func (c Int64ObservableCounterConfig) Unit() string { - return c.unit -} - -// Callbacks returns the configured callbacks. -func (c Int64ObservableCounterConfig) Callbacks() []Int64Callback { - return c.callbacks -} - -// Int64ObservableCounterOption applies options to a -// [Int64ObservableCounterConfig]. See [Int64ObservableOption] and -// [InstrumentOption] for other options that can be used as an -// Int64ObservableCounterOption. -type Int64ObservableCounterOption interface { - applyInt64ObservableCounter(Int64ObservableCounterConfig) Int64ObservableCounterConfig -} - -// Int64ObservableUpDownCounter is an instrument used to asynchronously record -// int64 measurements once per collection cycle. Observations are only made -// within a callback for this instrument. The value observed is assumed the to -// be the cumulative sum of the count. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Int64ObservableUpDownCounter interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Int64ObservableUpDownCounter - - Int64Observable -} - -// Int64ObservableUpDownCounterConfig contains options for asynchronous counter -// instruments that record int64 values. -type Int64ObservableUpDownCounterConfig struct { - description string - unit string - callbacks []Int64Callback -} - -// NewInt64ObservableUpDownCounterConfig returns a new -// [Int64ObservableUpDownCounterConfig] with all opts applied. -func NewInt64ObservableUpDownCounterConfig(opts ...Int64ObservableUpDownCounterOption) Int64ObservableUpDownCounterConfig { - var config Int64ObservableUpDownCounterConfig - for _, o := range opts { - config = o.applyInt64ObservableUpDownCounter(config) - } - return config -} - -// Description returns the configured description. -func (c Int64ObservableUpDownCounterConfig) Description() string { - return c.description -} - -// Unit returns the configured unit. -func (c Int64ObservableUpDownCounterConfig) Unit() string { - return c.unit -} - -// Callbacks returns the configured callbacks. -func (c Int64ObservableUpDownCounterConfig) Callbacks() []Int64Callback { - return c.callbacks -} - -// Int64ObservableUpDownCounterOption applies options to a -// [Int64ObservableUpDownCounterConfig]. See [Int64ObservableOption] and -// [InstrumentOption] for other options that can be used as an -// Int64ObservableUpDownCounterOption. -type Int64ObservableUpDownCounterOption interface { - applyInt64ObservableUpDownCounter(Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig -} - -// Int64ObservableGauge is an instrument used to asynchronously record -// instantaneous int64 measurements once per collection cycle. Observations are -// only made within a callback for this instrument. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Int64ObservableGauge interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Int64ObservableGauge - - Int64Observable -} - -// Int64ObservableGaugeConfig contains options for asynchronous counter -// instruments that record int64 values. -type Int64ObservableGaugeConfig struct { - description string - unit string - callbacks []Int64Callback -} - -// NewInt64ObservableGaugeConfig returns a new [Int64ObservableGaugeConfig] -// with all opts applied. -func NewInt64ObservableGaugeConfig(opts ...Int64ObservableGaugeOption) Int64ObservableGaugeConfig { - var config Int64ObservableGaugeConfig - for _, o := range opts { - config = o.applyInt64ObservableGauge(config) - } - return config -} - -// Description returns the configured description. -func (c Int64ObservableGaugeConfig) Description() string { - return c.description -} - -// Unit returns the configured unit. -func (c Int64ObservableGaugeConfig) Unit() string { - return c.unit -} - -// Callbacks returns the configured callbacks. -func (c Int64ObservableGaugeConfig) Callbacks() []Int64Callback { - return c.callbacks -} - -// Int64ObservableGaugeOption applies options to a -// [Int64ObservableGaugeConfig]. See [Int64ObservableOption] and -// [InstrumentOption] for other options that can be used as an -// Int64ObservableGaugeOption. -type Int64ObservableGaugeOption interface { - applyInt64ObservableGauge(Int64ObservableGaugeConfig) Int64ObservableGaugeConfig -} - -// Int64Observer is a recorder of int64 measurements. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Int64Observer interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Int64Observer - - // Observe records the int64 value. - // - // Use the WithAttributeSet (or, if performance is not a concern, - // the WithAttributes) option to include measurement attributes. - Observe(value int64, options ...ObserveOption) -} - -// Int64Callback is a function registered with a Meter that makes observations -// for an Int64Observerable instrument it is registered with. Calls to the -// Int64Observer record measurement values for the Int64Observable. -// -// The function needs to complete in a finite amount of time and the deadline -// of the passed context is expected to be honored. -// -// The function needs to make unique observations across all registered -// Int64Callbacks. Meaning, it should not report measurements with the same -// attributes as another Int64Callbacks also registered for the same -// instrument. -// -// The function needs to be concurrent safe. -type Int64Callback func(context.Context, Int64Observer) error - -// Int64ObservableOption applies options to int64 Observer instruments. -type Int64ObservableOption interface { - Int64ObservableCounterOption - Int64ObservableUpDownCounterOption - Int64ObservableGaugeOption -} - -type int64CallbackOpt struct { - cback Int64Callback -} - -func (o int64CallbackOpt) applyInt64ObservableCounter(cfg Int64ObservableCounterConfig) Int64ObservableCounterConfig { - cfg.callbacks = append(cfg.callbacks, o.cback) - return cfg -} - -func (o int64CallbackOpt) applyInt64ObservableUpDownCounter(cfg Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { - cfg.callbacks = append(cfg.callbacks, o.cback) - return cfg -} - -func (o int64CallbackOpt) applyInt64ObservableGauge(cfg Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { - cfg.callbacks = append(cfg.callbacks, o.cback) - return cfg -} - -// WithInt64Callback adds callback to be called for an instrument. -func WithInt64Callback(callback Int64Callback) Int64ObservableOption { - return int64CallbackOpt{callback} -} diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go deleted file mode 100644 index 778ad2d7..00000000 --- a/vendor/go.opentelemetry.io/otel/metric/config.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metric // import "go.opentelemetry.io/otel/metric" - -import "go.opentelemetry.io/otel/attribute" - -// MeterConfig contains options for Meters. -type MeterConfig struct { - instrumentationVersion string - schemaURL string - attrs attribute.Set - - // Ensure forward compatibility by explicitly making this not comparable. - noCmp [0]func() //nolint: unused // This is indeed used. -} - -// InstrumentationVersion returns the version of the library providing -// instrumentation. -func (cfg MeterConfig) InstrumentationVersion() string { - return cfg.instrumentationVersion -} - -// InstrumentationAttributes returns the attributes associated with the library -// providing instrumentation. -func (cfg MeterConfig) InstrumentationAttributes() attribute.Set { - return cfg.attrs -} - -// SchemaURL is the schema_url of the library providing instrumentation. -func (cfg MeterConfig) SchemaURL() string { - return cfg.schemaURL -} - -// MeterOption is an interface for applying Meter options. -type MeterOption interface { - // applyMeter is used to set a MeterOption value of a MeterConfig. - applyMeter(MeterConfig) MeterConfig -} - -// NewMeterConfig creates a new MeterConfig and applies -// all the given options. -func NewMeterConfig(opts ...MeterOption) MeterConfig { - var config MeterConfig - for _, o := range opts { - config = o.applyMeter(config) - } - return config -} - -type meterOptionFunc func(MeterConfig) MeterConfig - -func (fn meterOptionFunc) applyMeter(cfg MeterConfig) MeterConfig { - return fn(cfg) -} - -// WithInstrumentationVersion sets the instrumentation version. -func WithInstrumentationVersion(version string) MeterOption { - return meterOptionFunc(func(config MeterConfig) MeterConfig { - config.instrumentationVersion = version - return config - }) -} - -// WithInstrumentationAttributes sets the instrumentation attributes. -// -// The passed attributes will be de-duplicated. -func WithInstrumentationAttributes(attr ...attribute.KeyValue) MeterOption { - return meterOptionFunc(func(config MeterConfig) MeterConfig { - config.attrs = attribute.NewSet(attr...) - return config - }) -} - -// WithSchemaURL sets the schema URL. -func WithSchemaURL(schemaURL string) MeterOption { - return meterOptionFunc(func(config MeterConfig) MeterConfig { - config.schemaURL = schemaURL - return config - }) -} diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go deleted file mode 100644 index ae24e448..00000000 --- a/vendor/go.opentelemetry.io/otel/metric/doc.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package metric provides the OpenTelemetry API used to measure metrics about -source code operation. - -This API is separate from its implementation so the instrumentation built from -it is reusable. See [go.opentelemetry.io/otel/sdk/metric] for the official -OpenTelemetry implementation of this API. - -All measurements made with this package are made via instruments. These -instruments are created by a [Meter] which itself is created by a -[MeterProvider]. Applications need to accept a [MeterProvider] implementation -as a starting point when instrumenting. This can be done directly, or by using -the OpenTelemetry global MeterProvider via [GetMeterProvider]. Using an -appropriately named [Meter] from the accepted [MeterProvider], instrumentation -can then be built from the [Meter]'s instruments. - -# Instruments - -Each instrument is designed to make measurements of a particular type. Broadly, -all instruments fall into two overlapping logical categories: asynchronous or -synchronous, and int64 or float64. - -All synchronous instruments ([Int64Counter], [Int64UpDownCounter], -[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and -[Float64Histogram]) are used to measure the operation and performance of source -code during the source code execution. These instruments only make measurements -when the source code they instrument is run. - -All asynchronous instruments ([Int64ObservableCounter], -[Int64ObservableUpDownCounter], [Int64ObservableGauge], -[Float64ObservableCounter], [Float64ObservableUpDownCounter], and -[Float64ObservableGauge]) are used to measure metrics outside of the execution -of source code. They are said to make "observations" via a callback function -called once every measurement collection cycle. - -Each instrument is also grouped by the value type it measures. Either int64 or -float64. The value being measured will dictate which instrument in these -categories to use. - -Outside of these two broad categories, instruments are described by the -function they are designed to serve. All Counters ([Int64Counter], -[Float64Counter], [Int64ObservableCounter], and [Float64ObservableCounter]) are -designed to measure values that never decrease in value, but instead only -incrementally increase in value. UpDownCounters ([Int64UpDownCounter], -[Float64UpDownCounter], [Int64ObservableUpDownCounter], and -[Float64ObservableUpDownCounter]) on the other hand, are designed to measure -values that can increase and decrease. When more information needs to be -conveyed about all the synchronous measurements made during a collection cycle, -a Histogram ([Int64Histogram] and [Float64Histogram]) should be used. Finally, -when just the most recent measurement needs to be conveyed about an -asynchronous measurement, a Gauge ([Int64ObservableGauge] and -[Float64ObservableGauge]) should be used. - -See the [OpenTelemetry documentation] for more information about instruments -and their intended use. - -# Measurements - -Measurements are made by recording values and information about the values with -an instrument. How these measurements are recorded depends on the instrument. - -Measurements for synchronous instruments ([Int64Counter], [Int64UpDownCounter], -[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and -[Float64Histogram]) are recorded using the instrument methods directly. All -counter instruments have an Add method that is used to measure an increment -value, and all histogram instruments have a Record method to measure a data -point. - -Asynchronous instruments ([Int64ObservableCounter], -[Int64ObservableUpDownCounter], [Int64ObservableGauge], -[Float64ObservableCounter], [Float64ObservableUpDownCounter], and -[Float64ObservableGauge]) record measurements within a callback function. The -callback is registered with the Meter which ensures the callback is called once -per collection cycle. A callback can be registered two ways: during the -instrument's creation using an option, or later using the RegisterCallback -method of the [Meter] that created the instrument. - -If the following criteria are met, an option ([WithInt64Callback] or -[WithFloat64Callback]) can be used during the asynchronous instrument's -creation to register a callback ([Int64Callback] or [Float64Callback], -respectively): - - - The measurement process is known when the instrument is created - - Only that instrument will make a measurement within the callback - - The callback never needs to be unregistered - -If the criteria are not met, use the RegisterCallback method of the [Meter] that -created the instrument to register a [Callback]. - -# API Implementations - -This package does not conform to the standard Go versioning policy, all of its -interfaces may have methods added to them without a package major version bump. -This non-standard API evolution could surprise an uninformed implementation -author. They could unknowingly build their implementation in a way that would -result in a runtime panic for their users that update to the new API. - -The API is designed to help inform an instrumentation author about this -non-standard API evolution. It requires them to choose a default behavior for -unimplemented interface methods. There are three behavior choices they can -make: - - - Compilation failure - - Panic - - Default to another implementation - -All interfaces in this API embed a corresponding interface from -[go.opentelemetry.io/otel/metric/embedded]. If an author wants the default -behavior of their implementations to be a compilation failure, signaling to -their users they need to update to the latest version of that implementation, -they need to embed the corresponding interface from -[go.opentelemetry.io/otel/metric/embedded] in their implementation. For -example, - - import "go.opentelemetry.io/otel/metric/embedded" - - type MeterProvider struct { - embedded.MeterProvider - // ... - } - -If an author wants the default behavior of their implementations to a panic, -they need to embed the API interface directly. - - import "go.opentelemetry.io/otel/metric" - - type MeterProvider struct { - metric.MeterProvider - // ... - } - -This is not a recommended behavior as it could lead to publishing packages that -contain runtime panics when users update other package that use newer versions -of [go.opentelemetry.io/otel/metric]. - -Finally, an author can embed another implementation in theirs. The embedded -implementation will be used for methods not defined by the author. For example, -an author who want to default to silently dropping the call can use -[go.opentelemetry.io/otel/metric/noop]: - - import "go.opentelemetry.io/otel/metric/noop" - - type MeterProvider struct { - noop.MeterProvider - // ... - } - -It is strongly recommended that authors only embed -[go.opentelemetry.io/otel/metric/noop] if they choose this default behavior. -That implementation is the only one OpenTelemetry authors can guarantee will -fully implement all the API interfaces when a user updates their API. - -[OpenTelemetry documentation]: https://opentelemetry.io/docs/concepts/signals/metrics/ -[GetMeterProvider]: https://pkg.go.dev/go.opentelemetry.io/otel#GetMeterProvider -*/ -package metric // import "go.opentelemetry.io/otel/metric" diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go deleted file mode 100644 index ae0bdbd2..00000000 --- a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go +++ /dev/null @@ -1,234 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package embedded provides interfaces embedded within the [OpenTelemetry -// metric API]. -// -// Implementers of the [OpenTelemetry metric API] can embed the relevant type -// from this package into their implementation directly. Doing so will result -// in a compilation error for users when the [OpenTelemetry metric API] is -// extended (which is something that can happen without a major version bump of -// the API package). -// -// [OpenTelemetry metric API]: https://pkg.go.dev/go.opentelemetry.io/otel/metric -package embedded // import "go.opentelemetry.io/otel/metric/embedded" - -// MeterProvider is embedded in -// [go.opentelemetry.io/otel/metric.MeterProvider]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.MeterProvider] if you want users to -// experience a compilation error, signaling they need to update to your latest -// implementation, when the [go.opentelemetry.io/otel/metric.MeterProvider] -// interface is extended (which is something that can happen without a major -// version bump of the API package). -type MeterProvider interface{ meterProvider() } - -// Meter is embedded in [go.opentelemetry.io/otel/metric.Meter]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.Meter] if you want users to experience a -// compilation error, signaling they need to update to your latest -// implementation, when the [go.opentelemetry.io/otel/metric.Meter] interface -// is extended (which is something that can happen without a major version bump -// of the API package). -type Meter interface{ meter() } - -// Float64Observer is embedded in -// [go.opentelemetry.io/otel/metric.Float64Observer]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.Float64Observer] if you want -// users to experience a compilation error, signaling they need to update to -// your latest implementation, when the -// [go.opentelemetry.io/otel/metric.Float64Observer] interface is -// extended (which is something that can happen without a major version bump of -// the API package). -type Float64Observer interface{ float64Observer() } - -// Int64Observer is embedded in -// [go.opentelemetry.io/otel/metric.Int64Observer]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.Int64Observer] if you want users -// to experience a compilation error, signaling they need to update to your -// latest implementation, when the -// [go.opentelemetry.io/otel/metric.Int64Observer] interface is -// extended (which is something that can happen without a major version bump of -// the API package). -type Int64Observer interface{ int64Observer() } - -// Observer is embedded in [go.opentelemetry.io/otel/metric.Observer]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.Observer] if you want users to experience a -// compilation error, signaling they need to update to your latest -// implementation, when the [go.opentelemetry.io/otel/metric.Observer] -// interface is extended (which is something that can happen without a major -// version bump of the API package). -type Observer interface{ observer() } - -// Registration is embedded in [go.opentelemetry.io/otel/metric.Registration]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.Registration] if you want users to -// experience a compilation error, signaling they need to update to your latest -// implementation, when the [go.opentelemetry.io/otel/metric.Registration] -// interface is extended (which is something that can happen without a major -// version bump of the API package). -type Registration interface{ registration() } - -// Float64Counter is embedded in -// [go.opentelemetry.io/otel/metric.Float64Counter]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.Float64Counter] if you want -// users to experience a compilation error, signaling they need to update to -// your latest implementation, when the -// [go.opentelemetry.io/otel/metric.Float64Counter] interface is -// extended (which is something that can happen without a major version bump of -// the API package). -type Float64Counter interface{ float64Counter() } - -// Float64Histogram is embedded in -// [go.opentelemetry.io/otel/metric.Float64Histogram]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.Float64Histogram] if you want -// users to experience a compilation error, signaling they need to update to -// your latest implementation, when the -// [go.opentelemetry.io/otel/metric.Float64Histogram] interface is -// extended (which is something that can happen without a major version bump of -// the API package). -type Float64Histogram interface{ float64Histogram() } - -// Float64ObservableCounter is embedded in -// [go.opentelemetry.io/otel/metric.Float64ObservableCounter]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.Float64ObservableCounter] if you -// want users to experience a compilation error, signaling they need to update -// to your latest implementation, when the -// [go.opentelemetry.io/otel/metric.Float64ObservableCounter] -// interface is extended (which is something that can happen without a major -// version bump of the API package). -type Float64ObservableCounter interface{ float64ObservableCounter() } - -// Float64ObservableGauge is embedded in -// [go.opentelemetry.io/otel/metric.Float64ObservableGauge]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.Float64ObservableGauge] if you -// want users to experience a compilation error, signaling they need to update -// to your latest implementation, when the -// [go.opentelemetry.io/otel/metric.Float64ObservableGauge] -// interface is extended (which is something that can happen without a major -// version bump of the API package). -type Float64ObservableGauge interface{ float64ObservableGauge() } - -// Float64ObservableUpDownCounter is embedded in -// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter] -// if you want users to experience a compilation error, signaling they need to -// update to your latest implementation, when the -// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter] -// interface is extended (which is something that can happen without a major -// version bump of the API package). -type Float64ObservableUpDownCounter interface{ float64ObservableUpDownCounter() } - -// Float64UpDownCounter is embedded in -// [go.opentelemetry.io/otel/metric.Float64UpDownCounter]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] if you -// want users to experience a compilation error, signaling they need to update -// to your latest implementation, when the -// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] interface -// is extended (which is something that can happen without a major version bump -// of the API package). -type Float64UpDownCounter interface{ float64UpDownCounter() } - -// Int64Counter is embedded in -// [go.opentelemetry.io/otel/metric.Int64Counter]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.Int64Counter] if you want users -// to experience a compilation error, signaling they need to update to your -// latest implementation, when the -// [go.opentelemetry.io/otel/metric.Int64Counter] interface is -// extended (which is something that can happen without a major version bump of -// the API package). -type Int64Counter interface{ int64Counter() } - -// Int64Histogram is embedded in -// [go.opentelemetry.io/otel/metric.Int64Histogram]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.Int64Histogram] if you want -// users to experience a compilation error, signaling they need to update to -// your latest implementation, when the -// [go.opentelemetry.io/otel/metric.Int64Histogram] interface is -// extended (which is something that can happen without a major version bump of -// the API package). -type Int64Histogram interface{ int64Histogram() } - -// Int64ObservableCounter is embedded in -// [go.opentelemetry.io/otel/metric.Int64ObservableCounter]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.Int64ObservableCounter] if you -// want users to experience a compilation error, signaling they need to update -// to your latest implementation, when the -// [go.opentelemetry.io/otel/metric.Int64ObservableCounter] -// interface is extended (which is something that can happen without a major -// version bump of the API package). -type Int64ObservableCounter interface{ int64ObservableCounter() } - -// Int64ObservableGauge is embedded in -// [go.opentelemetry.io/otel/metric.Int64ObservableGauge]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] if you -// want users to experience a compilation error, signaling they need to update -// to your latest implementation, when the -// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] interface -// is extended (which is something that can happen without a major version bump -// of the API package). -type Int64ObservableGauge interface{ int64ObservableGauge() } - -// Int64ObservableUpDownCounter is embedded in -// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter] if -// you want users to experience a compilation error, signaling they need to -// update to your latest implementation, when the -// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter] -// interface is extended (which is something that can happen without a major -// version bump of the API package). -type Int64ObservableUpDownCounter interface{ int64ObservableUpDownCounter() } - -// Int64UpDownCounter is embedded in -// [go.opentelemetry.io/otel/metric.Int64UpDownCounter]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] if you want -// users to experience a compilation error, signaling they need to update to -// your latest implementation, when the -// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] interface is -// extended (which is something that can happen without a major version bump of -// the API package). -type Int64UpDownCounter interface{ int64UpDownCounter() } diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go deleted file mode 100644 index 0033c1e1..00000000 --- a/vendor/go.opentelemetry.io/otel/metric/instrument.go +++ /dev/null @@ -1,332 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metric // import "go.opentelemetry.io/otel/metric" - -import "go.opentelemetry.io/otel/attribute" - -// Observable is used as a grouping mechanism for all instruments that are -// updated within a Callback. -type Observable interface { - observable() -} - -// InstrumentOption applies options to all instruments. -type InstrumentOption interface { - Int64CounterOption - Int64UpDownCounterOption - Int64HistogramOption - Int64ObservableCounterOption - Int64ObservableUpDownCounterOption - Int64ObservableGaugeOption - - Float64CounterOption - Float64UpDownCounterOption - Float64HistogramOption - Float64ObservableCounterOption - Float64ObservableUpDownCounterOption - Float64ObservableGaugeOption -} - -type descOpt string - -func (o descOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig { - c.description = string(o) - return c -} - -func (o descOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig { - c.description = string(o) - return c -} - -func (o descOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { - c.description = string(o) - return c -} - -func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { - c.description = string(o) - return c -} - -func (o descOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { - c.description = string(o) - return c -} - -func (o descOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { - c.description = string(o) - return c -} - -func (o descOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig { - c.description = string(o) - return c -} - -func (o descOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig { - c.description = string(o) - return c -} - -func (o descOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { - c.description = string(o) - return c -} - -func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { - c.description = string(o) - return c -} - -func (o descOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { - c.description = string(o) - return c -} - -func (o descOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { - c.description = string(o) - return c -} - -// WithDescription sets the instrument description. -func WithDescription(desc string) InstrumentOption { return descOpt(desc) } - -type unitOpt string - -func (o unitOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig { - c.unit = string(o) - return c -} - -func (o unitOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig { - c.unit = string(o) - return c -} - -func (o unitOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { - c.unit = string(o) - return c -} - -func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { - c.unit = string(o) - return c -} - -func (o unitOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { - c.unit = string(o) - return c -} - -func (o unitOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { - c.unit = string(o) - return c -} - -func (o unitOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig { - c.unit = string(o) - return c -} - -func (o unitOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig { - c.unit = string(o) - return c -} - -func (o unitOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { - c.unit = string(o) - return c -} - -func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { - c.unit = string(o) - return c -} - -func (o unitOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { - c.unit = string(o) - return c -} - -func (o unitOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { - c.unit = string(o) - return c -} - -// WithUnit sets the instrument unit. -func WithUnit(u string) InstrumentOption { return unitOpt(u) } - -// AddOption applies options to an addition measurement. See -// [MeasurementOption] for other options that can be used as an AddOption. -type AddOption interface { - applyAdd(AddConfig) AddConfig -} - -// AddConfig contains options for an addition measurement. -type AddConfig struct { - attrs attribute.Set -} - -// NewAddConfig returns a new [AddConfig] with all opts applied. -func NewAddConfig(opts []AddOption) AddConfig { - config := AddConfig{attrs: *attribute.EmptySet()} - for _, o := range opts { - config = o.applyAdd(config) - } - return config -} - -// Attributes returns the configured attribute set. -func (c AddConfig) Attributes() attribute.Set { - return c.attrs -} - -// RecordOption applies options to an addition measurement. See -// [MeasurementOption] for other options that can be used as a RecordOption. -type RecordOption interface { - applyRecord(RecordConfig) RecordConfig -} - -// RecordConfig contains options for a recorded measurement. -type RecordConfig struct { - attrs attribute.Set -} - -// NewRecordConfig returns a new [RecordConfig] with all opts applied. -func NewRecordConfig(opts []RecordOption) RecordConfig { - config := RecordConfig{attrs: *attribute.EmptySet()} - for _, o := range opts { - config = o.applyRecord(config) - } - return config -} - -// Attributes returns the configured attribute set. -func (c RecordConfig) Attributes() attribute.Set { - return c.attrs -} - -// ObserveOption applies options to an addition measurement. See -// [MeasurementOption] for other options that can be used as a ObserveOption. -type ObserveOption interface { - applyObserve(ObserveConfig) ObserveConfig -} - -// ObserveConfig contains options for an observed measurement. -type ObserveConfig struct { - attrs attribute.Set -} - -// NewObserveConfig returns a new [ObserveConfig] with all opts applied. -func NewObserveConfig(opts []ObserveOption) ObserveConfig { - config := ObserveConfig{attrs: *attribute.EmptySet()} - for _, o := range opts { - config = o.applyObserve(config) - } - return config -} - -// Attributes returns the configured attribute set. -func (c ObserveConfig) Attributes() attribute.Set { - return c.attrs -} - -// MeasurementOption applies options to all instrument measurement. -type MeasurementOption interface { - AddOption - RecordOption - ObserveOption -} - -type attrOpt struct { - set attribute.Set -} - -// mergeSets returns the union of keys between a and b. Any duplicate keys will -// use the value associated with b. -func mergeSets(a, b attribute.Set) attribute.Set { - // NewMergeIterator uses the first value for any duplicates. - iter := attribute.NewMergeIterator(&b, &a) - merged := make([]attribute.KeyValue, 0, a.Len()+b.Len()) - for iter.Next() { - merged = append(merged, iter.Attribute()) - } - return attribute.NewSet(merged...) -} - -func (o attrOpt) applyAdd(c AddConfig) AddConfig { - switch { - case o.set.Len() == 0: - case c.attrs.Len() == 0: - c.attrs = o.set - default: - c.attrs = mergeSets(c.attrs, o.set) - } - return c -} - -func (o attrOpt) applyRecord(c RecordConfig) RecordConfig { - switch { - case o.set.Len() == 0: - case c.attrs.Len() == 0: - c.attrs = o.set - default: - c.attrs = mergeSets(c.attrs, o.set) - } - return c -} - -func (o attrOpt) applyObserve(c ObserveConfig) ObserveConfig { - switch { - case o.set.Len() == 0: - case c.attrs.Len() == 0: - c.attrs = o.set - default: - c.attrs = mergeSets(c.attrs, o.set) - } - return c -} - -// WithAttributeSet sets the attribute Set associated with a measurement is -// made with. -// -// If multiple WithAttributeSet or WithAttributes options are passed the -// attributes will be merged together in the order they are passed. Attributes -// with duplicate keys will use the last value passed. -func WithAttributeSet(attributes attribute.Set) MeasurementOption { - return attrOpt{set: attributes} -} - -// WithAttributes converts attributes into an attribute Set and sets the Set to -// be associated with a measurement. This is shorthand for: -// -// cp := make([]attribute.KeyValue, len(attributes)) -// copy(cp, attributes) -// WithAttributes(attribute.NewSet(cp...)) -// -// [attribute.NewSet] may modify the passed attributes so this will make a copy -// of attributes before creating a set in order to ensure this function is -// concurrent safe. This makes this option function less optimized in -// comparison to [WithAttributeSet]. Therefore, [WithAttributeSet] should be -// preferred for performance sensitive code. -// -// See [WithAttributeSet] for information about how multiple WithAttributes are -// merged. -func WithAttributes(attributes ...attribute.KeyValue) MeasurementOption { - cp := make([]attribute.KeyValue, len(attributes)) - copy(cp, attributes) - return attrOpt{set: attribute.NewSet(cp...)} -} diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go deleted file mode 100644 index 8e1917c3..00000000 --- a/vendor/go.opentelemetry.io/otel/metric/meter.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metric // import "go.opentelemetry.io/otel/metric" - -import ( - "context" - - "go.opentelemetry.io/otel/metric/embedded" -) - -// MeterProvider provides access to named Meter instances, for instrumenting -// an application or package. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type MeterProvider interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.MeterProvider - - // Meter returns a new Meter with the provided name and configuration. - // - // A Meter should be scoped at most to a single package. The name needs to - // be unique so it does not collide with other names used by - // an application, nor other applications. To achieve this, the import path - // of the instrumentation package is recommended to be used as name. - // - // If the name is empty, then an implementation defined default name will - // be used instead. - Meter(name string, opts ...MeterOption) Meter -} - -// Meter provides access to instrument instances for recording metrics. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Meter interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Meter - - // Int64Counter returns a new Int64Counter instrument identified by name - // and configured with options. The instrument is used to synchronously - // record increasing int64 measurements during a computational operation. - Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error) - // Int64UpDownCounter returns a new Int64UpDownCounter instrument - // identified by name and configured with options. The instrument is used - // to synchronously record int64 measurements during a computational - // operation. - Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error) - // Int64Histogram returns a new Int64Histogram instrument identified by - // name and configured with options. The instrument is used to - // synchronously record the distribution of int64 measurements during a - // computational operation. - Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) - // Int64ObservableCounter returns a new Int64ObservableCounter identified - // by name and configured with options. The instrument is used to - // asynchronously record increasing int64 measurements once per a - // measurement collection cycle. - // - // Measurements for the returned instrument are made via a callback. Use - // the WithInt64Callback option to register the callback here, or use the - // RegisterCallback method of this Meter to register one later. See the - // Measurements section of the package documentation for more information. - Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error) - // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter - // instrument identified by name and configured with options. The - // instrument is used to asynchronously record int64 measurements once per - // a measurement collection cycle. - // - // Measurements for the returned instrument are made via a callback. Use - // the WithInt64Callback option to register the callback here, or use the - // RegisterCallback method of this Meter to register one later. See the - // Measurements section of the package documentation for more information. - Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) - // Int64ObservableGauge returns a new Int64ObservableGauge instrument - // identified by name and configured with options. The instrument is used - // to asynchronously record instantaneous int64 measurements once per a - // measurement collection cycle. - // - // Measurements for the returned instrument are made via a callback. Use - // the WithInt64Callback option to register the callback here, or use the - // RegisterCallback method of this Meter to register one later. See the - // Measurements section of the package documentation for more information. - Int64ObservableGauge(name string, options ...Int64ObservableGaugeOption) (Int64ObservableGauge, error) - - // Float64Counter returns a new Float64Counter instrument identified by - // name and configured with options. The instrument is used to - // synchronously record increasing float64 measurements during a - // computational operation. - Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error) - // Float64UpDownCounter returns a new Float64UpDownCounter instrument - // identified by name and configured with options. The instrument is used - // to synchronously record float64 measurements during a computational - // operation. - Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error) - // Float64Histogram returns a new Float64Histogram instrument identified by - // name and configured with options. The instrument is used to - // synchronously record the distribution of float64 measurements during a - // computational operation. - Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) - // Float64ObservableCounter returns a new Float64ObservableCounter - // instrument identified by name and configured with options. The - // instrument is used to asynchronously record increasing float64 - // measurements once per a measurement collection cycle. - // - // Measurements for the returned instrument are made via a callback. Use - // the WithFloat64Callback option to register the callback here, or use the - // RegisterCallback method of this Meter to register one later. See the - // Measurements section of the package documentation for more information. - Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error) - // Float64ObservableUpDownCounter returns a new - // Float64ObservableUpDownCounter instrument identified by name and - // configured with options. The instrument is used to asynchronously record - // float64 measurements once per a measurement collection cycle. - // - // Measurements for the returned instrument are made via a callback. Use - // the WithFloat64Callback option to register the callback here, or use the - // RegisterCallback method of this Meter to register one later. See the - // Measurements section of the package documentation for more information. - Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) - // Float64ObservableGauge returns a new Float64ObservableGauge instrument - // identified by name and configured with options. The instrument is used - // to asynchronously record instantaneous float64 measurements once per a - // measurement collection cycle. - // - // Measurements for the returned instrument are made via a callback. Use - // the WithFloat64Callback option to register the callback here, or use the - // RegisterCallback method of this Meter to register one later. See the - // Measurements section of the package documentation for more information. - Float64ObservableGauge(name string, options ...Float64ObservableGaugeOption) (Float64ObservableGauge, error) - - // RegisterCallback registers f to be called during the collection of a - // measurement cycle. - // - // If Unregister of the returned Registration is called, f needs to be - // unregistered and not called during collection. - // - // The instruments f is registered with are the only instruments that f may - // observe values for. - // - // If no instruments are passed, f should not be registered nor called - // during collection. - RegisterCallback(f Callback, instruments ...Observable) (Registration, error) -} - -// Callback is a function registered with a Meter that makes observations for -// the set of instruments it is registered with. The Observer parameter is used -// to record measurement observations for these instruments. -// -// The function needs to complete in a finite amount of time and the deadline -// of the passed context is expected to be honored. -// -// The function needs to make unique observations across all registered -// Callbacks. Meaning, it should not report measurements for an instrument with -// the same attributes as another Callback will report. -// -// The function needs to be concurrent safe. -type Callback func(context.Context, Observer) error - -// Observer records measurements for multiple instruments in a Callback. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Observer interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Observer - - // ObserveFloat64 records the float64 value for obsrv. - ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption) - // ObserveInt64 records the int64 value for obsrv. - ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption) -} - -// Registration is an token representing the unique registration of a callback -// for a set of instruments with a Meter. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Registration interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Registration - - // Unregister removes the callback registration from a Meter. - // - // This method needs to be idempotent and concurrent safe. - Unregister() error -} diff --git a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go deleted file mode 100644 index f0b06372..00000000 --- a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metric // import "go.opentelemetry.io/otel/metric" - -import ( - "context" - - "go.opentelemetry.io/otel/metric/embedded" -) - -// Float64Counter is an instrument that records increasing float64 values. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Float64Counter interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Float64Counter - - // Add records a change to the counter. - // - // Use the WithAttributeSet (or, if performance is not a concern, - // the WithAttributes) option to include measurement attributes. - Add(ctx context.Context, incr float64, options ...AddOption) -} - -// Float64CounterConfig contains options for synchronous counter instruments that -// record int64 values. -type Float64CounterConfig struct { - description string - unit string -} - -// NewFloat64CounterConfig returns a new [Float64CounterConfig] with all opts -// applied. -func NewFloat64CounterConfig(opts ...Float64CounterOption) Float64CounterConfig { - var config Float64CounterConfig - for _, o := range opts { - config = o.applyFloat64Counter(config) - } - return config -} - -// Description returns the configured description. -func (c Float64CounterConfig) Description() string { - return c.description -} - -// Unit returns the configured unit. -func (c Float64CounterConfig) Unit() string { - return c.unit -} - -// Float64CounterOption applies options to a [Float64CounterConfig]. See -// [InstrumentOption] for other options that can be used as a -// Float64CounterOption. -type Float64CounterOption interface { - applyFloat64Counter(Float64CounterConfig) Float64CounterConfig -} - -// Float64UpDownCounter is an instrument that records increasing or decreasing -// float64 values. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Float64UpDownCounter interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Float64UpDownCounter - - // Add records a change to the counter. - // - // Use the WithAttributeSet (or, if performance is not a concern, - // the WithAttributes) option to include measurement attributes. - Add(ctx context.Context, incr float64, options ...AddOption) -} - -// Float64UpDownCounterConfig contains options for synchronous counter -// instruments that record int64 values. -type Float64UpDownCounterConfig struct { - description string - unit string -} - -// NewFloat64UpDownCounterConfig returns a new [Float64UpDownCounterConfig] -// with all opts applied. -func NewFloat64UpDownCounterConfig(opts ...Float64UpDownCounterOption) Float64UpDownCounterConfig { - var config Float64UpDownCounterConfig - for _, o := range opts { - config = o.applyFloat64UpDownCounter(config) - } - return config -} - -// Description returns the configured description. -func (c Float64UpDownCounterConfig) Description() string { - return c.description -} - -// Unit returns the configured unit. -func (c Float64UpDownCounterConfig) Unit() string { - return c.unit -} - -// Float64UpDownCounterOption applies options to a -// [Float64UpDownCounterConfig]. See [InstrumentOption] for other options that -// can be used as a Float64UpDownCounterOption. -type Float64UpDownCounterOption interface { - applyFloat64UpDownCounter(Float64UpDownCounterConfig) Float64UpDownCounterConfig -} - -// Float64Histogram is an instrument that records a distribution of float64 -// values. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Float64Histogram interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Float64Histogram - - // Record adds an additional value to the distribution. - // - // Use the WithAttributeSet (or, if performance is not a concern, - // the WithAttributes) option to include measurement attributes. - Record(ctx context.Context, incr float64, options ...RecordOption) -} - -// Float64HistogramConfig contains options for synchronous counter instruments -// that record int64 values. -type Float64HistogramConfig struct { - description string - unit string -} - -// NewFloat64HistogramConfig returns a new [Float64HistogramConfig] with all -// opts applied. -func NewFloat64HistogramConfig(opts ...Float64HistogramOption) Float64HistogramConfig { - var config Float64HistogramConfig - for _, o := range opts { - config = o.applyFloat64Histogram(config) - } - return config -} - -// Description returns the configured description. -func (c Float64HistogramConfig) Description() string { - return c.description -} - -// Unit returns the configured unit. -func (c Float64HistogramConfig) Unit() string { - return c.unit -} - -// Float64HistogramOption applies options to a [Float64HistogramConfig]. See -// [InstrumentOption] for other options that can be used as a -// Float64HistogramOption. -type Float64HistogramOption interface { - applyFloat64Histogram(Float64HistogramConfig) Float64HistogramConfig -} diff --git a/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/syncint64.go deleted file mode 100644 index 6f508eb6..00000000 --- a/vendor/go.opentelemetry.io/otel/metric/syncint64.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metric // import "go.opentelemetry.io/otel/metric" - -import ( - "context" - - "go.opentelemetry.io/otel/metric/embedded" -) - -// Int64Counter is an instrument that records increasing int64 values. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Int64Counter interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Int64Counter - - // Add records a change to the counter. - // - // Use the WithAttributeSet (or, if performance is not a concern, - // the WithAttributes) option to include measurement attributes. - Add(ctx context.Context, incr int64, options ...AddOption) -} - -// Int64CounterConfig contains options for synchronous counter instruments that -// record int64 values. -type Int64CounterConfig struct { - description string - unit string -} - -// NewInt64CounterConfig returns a new [Int64CounterConfig] with all opts -// applied. -func NewInt64CounterConfig(opts ...Int64CounterOption) Int64CounterConfig { - var config Int64CounterConfig - for _, o := range opts { - config = o.applyInt64Counter(config) - } - return config -} - -// Description returns the configured description. -func (c Int64CounterConfig) Description() string { - return c.description -} - -// Unit returns the configured unit. -func (c Int64CounterConfig) Unit() string { - return c.unit -} - -// Int64CounterOption applies options to a [Int64CounterConfig]. See -// [InstrumentOption] for other options that can be used as an -// Int64CounterOption. -type Int64CounterOption interface { - applyInt64Counter(Int64CounterConfig) Int64CounterConfig -} - -// Int64UpDownCounter is an instrument that records increasing or decreasing -// int64 values. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Int64UpDownCounter interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Int64UpDownCounter - - // Add records a change to the counter. - // - // Use the WithAttributeSet (or, if performance is not a concern, - // the WithAttributes) option to include measurement attributes. - Add(ctx context.Context, incr int64, options ...AddOption) -} - -// Int64UpDownCounterConfig contains options for synchronous counter -// instruments that record int64 values. -type Int64UpDownCounterConfig struct { - description string - unit string -} - -// NewInt64UpDownCounterConfig returns a new [Int64UpDownCounterConfig] with -// all opts applied. -func NewInt64UpDownCounterConfig(opts ...Int64UpDownCounterOption) Int64UpDownCounterConfig { - var config Int64UpDownCounterConfig - for _, o := range opts { - config = o.applyInt64UpDownCounter(config) - } - return config -} - -// Description returns the configured description. -func (c Int64UpDownCounterConfig) Description() string { - return c.description -} - -// Unit returns the configured unit. -func (c Int64UpDownCounterConfig) Unit() string { - return c.unit -} - -// Int64UpDownCounterOption applies options to a [Int64UpDownCounterConfig]. -// See [InstrumentOption] for other options that can be used as an -// Int64UpDownCounterOption. -type Int64UpDownCounterOption interface { - applyInt64UpDownCounter(Int64UpDownCounterConfig) Int64UpDownCounterConfig -} - -// Int64Histogram is an instrument that records a distribution of int64 -// values. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Int64Histogram interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Int64Histogram - - // Record adds an additional value to the distribution. - // - // Use the WithAttributeSet (or, if performance is not a concern, - // the WithAttributes) option to include measurement attributes. - Record(ctx context.Context, incr int64, options ...RecordOption) -} - -// Int64HistogramConfig contains options for synchronous counter instruments -// that record int64 values. -type Int64HistogramConfig struct { - description string - unit string -} - -// NewInt64HistogramConfig returns a new [Int64HistogramConfig] with all opts -// applied. -func NewInt64HistogramConfig(opts ...Int64HistogramOption) Int64HistogramConfig { - var config Int64HistogramConfig - for _, o := range opts { - config = o.applyInt64Histogram(config) - } - return config -} - -// Description returns the configured description. -func (c Int64HistogramConfig) Description() string { - return c.description -} - -// Unit returns the configured unit. -func (c Int64HistogramConfig) Unit() string { - return c.unit -} - -// Int64HistogramOption applies options to a [Int64HistogramConfig]. See -// [InstrumentOption] for other options that can be used as an -// Int64HistogramOption. -type Int64HistogramOption interface { - applyInt64Histogram(Int64HistogramConfig) Int64HistogramConfig -} diff --git a/vendor/go.opentelemetry.io/otel/propagation.go b/vendor/go.opentelemetry.io/otel/propagation.go deleted file mode 100644 index d29aaa32..00000000 --- a/vendor/go.opentelemetry.io/otel/propagation.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otel // import "go.opentelemetry.io/otel" - -import ( - "go.opentelemetry.io/otel/internal/global" - "go.opentelemetry.io/otel/propagation" -) - -// GetTextMapPropagator returns the global TextMapPropagator. If none has been -// set, a No-Op TextMapPropagator is returned. -func GetTextMapPropagator() propagation.TextMapPropagator { - return global.TextMapPropagator() -} - -// SetTextMapPropagator sets propagator as the global TextMapPropagator. -func SetTextMapPropagator(propagator propagation.TextMapPropagator) { - global.SetTextMapPropagator(propagator) -} diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go deleted file mode 100644 index 303cdf1c..00000000 --- a/vendor/go.opentelemetry.io/otel/propagation/baggage.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package propagation // import "go.opentelemetry.io/otel/propagation" - -import ( - "context" - - "go.opentelemetry.io/otel/baggage" -) - -const baggageHeader = "baggage" - -// Baggage is a propagator that supports the W3C Baggage format. -// -// This propagates user-defined baggage associated with a trace. The complete -// specification is defined at https://www.w3.org/TR/baggage/. -type Baggage struct{} - -var _ TextMapPropagator = Baggage{} - -// Inject sets baggage key-values from ctx into the carrier. -func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { - bStr := baggage.FromContext(ctx).String() - if bStr != "" { - carrier.Set(baggageHeader, bStr) - } -} - -// Extract returns a copy of parent with the baggage from the carrier added. -func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context { - bStr := carrier.Get(baggageHeader) - if bStr == "" { - return parent - } - - bag, err := baggage.Parse(bStr) - if err != nil { - return parent - } - return baggage.ContextWithBaggage(parent, bag) -} - -// Fields returns the keys who's values are set with Inject. -func (b Baggage) Fields() []string { - return []string{baggageHeader} -} diff --git a/vendor/go.opentelemetry.io/otel/propagation/doc.go b/vendor/go.opentelemetry.io/otel/propagation/doc.go deleted file mode 100644 index c119eb28..00000000 --- a/vendor/go.opentelemetry.io/otel/propagation/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package propagation contains OpenTelemetry context propagators. - -OpenTelemetry propagators are used to extract and inject context data from and -into messages exchanged by applications. The propagator supported by this -package is the W3C Trace Context encoding -(https://www.w3.org/TR/trace-context/), and W3C Baggage -(https://www.w3.org/TR/baggage/). -*/ -package propagation // import "go.opentelemetry.io/otel/propagation" diff --git a/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/vendor/go.opentelemetry.io/otel/propagation/propagation.go deleted file mode 100644 index c94438f7..00000000 --- a/vendor/go.opentelemetry.io/otel/propagation/propagation.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package propagation // import "go.opentelemetry.io/otel/propagation" - -import ( - "context" - "net/http" -) - -// TextMapCarrier is the storage medium used by a TextMapPropagator. -type TextMapCarrier interface { - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // Get returns the value associated with the passed key. - Get(key string) string - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // Set stores the key-value pair. - Set(key string, value string) - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // Keys lists the keys stored in this carrier. - Keys() []string - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. -} - -// MapCarrier is a TextMapCarrier that uses a map held in memory as a storage -// medium for propagated key-value pairs. -type MapCarrier map[string]string - -// Compile time check that MapCarrier implements the TextMapCarrier. -var _ TextMapCarrier = MapCarrier{} - -// Get returns the value associated with the passed key. -func (c MapCarrier) Get(key string) string { - return c[key] -} - -// Set stores the key-value pair. -func (c MapCarrier) Set(key, value string) { - c[key] = value -} - -// Keys lists the keys stored in this carrier. -func (c MapCarrier) Keys() []string { - keys := make([]string, 0, len(c)) - for k := range c { - keys = append(keys, k) - } - return keys -} - -// HeaderCarrier adapts http.Header to satisfy the TextMapCarrier interface. -type HeaderCarrier http.Header - -// Get returns the value associated with the passed key. -func (hc HeaderCarrier) Get(key string) string { - return http.Header(hc).Get(key) -} - -// Set stores the key-value pair. -func (hc HeaderCarrier) Set(key string, value string) { - http.Header(hc).Set(key, value) -} - -// Keys lists the keys stored in this carrier. -func (hc HeaderCarrier) Keys() []string { - keys := make([]string, 0, len(hc)) - for k := range hc { - keys = append(keys, k) - } - return keys -} - -// TextMapPropagator propagates cross-cutting concerns as key-value text -// pairs within a carrier that travels in-band across process boundaries. -type TextMapPropagator interface { - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // Inject set cross-cutting concerns from the Context into the carrier. - Inject(ctx context.Context, carrier TextMapCarrier) - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // Extract reads cross-cutting concerns from the carrier into a Context. - Extract(ctx context.Context, carrier TextMapCarrier) context.Context - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // Fields returns the keys whose values are set with Inject. - Fields() []string - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. -} - -type compositeTextMapPropagator []TextMapPropagator - -func (p compositeTextMapPropagator) Inject(ctx context.Context, carrier TextMapCarrier) { - for _, i := range p { - i.Inject(ctx, carrier) - } -} - -func (p compositeTextMapPropagator) Extract(ctx context.Context, carrier TextMapCarrier) context.Context { - for _, i := range p { - ctx = i.Extract(ctx, carrier) - } - return ctx -} - -func (p compositeTextMapPropagator) Fields() []string { - unique := make(map[string]struct{}) - for _, i := range p { - for _, k := range i.Fields() { - unique[k] = struct{}{} - } - } - - fields := make([]string, 0, len(unique)) - for k := range unique { - fields = append(fields, k) - } - return fields -} - -// NewCompositeTextMapPropagator returns a unified TextMapPropagator from the -// group of passed TextMapPropagator. This allows different cross-cutting -// concerns to be propagates in a unified manner. -// -// The returned TextMapPropagator will inject and extract cross-cutting -// concerns in the order the TextMapPropagators were provided. Additionally, -// the Fields method will return a de-duplicated slice of the keys that are -// set with the Inject method. -func NewCompositeTextMapPropagator(p ...TextMapPropagator) TextMapPropagator { - return compositeTextMapPropagator(p) -} diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go deleted file mode 100644 index 902692da..00000000 --- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package propagation // import "go.opentelemetry.io/otel/propagation" - -import ( - "context" - "encoding/hex" - "fmt" - "regexp" - - "go.opentelemetry.io/otel/trace" -) - -const ( - supportedVersion = 0 - maxVersion = 254 - traceparentHeader = "traceparent" - tracestateHeader = "tracestate" -) - -// TraceContext is a propagator that supports the W3C Trace Context format -// (https://www.w3.org/TR/trace-context/) -// -// This propagator will propagate the traceparent and tracestate headers to -// guarantee traces are not broken. It is up to the users of this propagator -// to choose if they want to participate in a trace by modifying the -// traceparent header and relevant parts of the tracestate header containing -// their proprietary information. -type TraceContext struct{} - -var _ TextMapPropagator = TraceContext{} -var traceCtxRegExp = regexp.MustCompile("^(?P[0-9a-f]{2})-(?P[a-f0-9]{32})-(?P[a-f0-9]{16})-(?P[a-f0-9]{2})(?:-.*)?$") - -// Inject set tracecontext from the Context into the carrier. -func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { - sc := trace.SpanContextFromContext(ctx) - if !sc.IsValid() { - return - } - - if ts := sc.TraceState().String(); ts != "" { - carrier.Set(tracestateHeader, ts) - } - - // Clear all flags other than the trace-context supported sampling bit. - flags := sc.TraceFlags() & trace.FlagsSampled - - h := fmt.Sprintf("%.2x-%s-%s-%s", - supportedVersion, - sc.TraceID(), - sc.SpanID(), - flags) - carrier.Set(traceparentHeader, h) -} - -// Extract reads tracecontext from the carrier into a returned Context. -// -// The returned Context will be a copy of ctx and contain the extracted -// tracecontext as the remote SpanContext. If the extracted tracecontext is -// invalid, the passed ctx will be returned directly instead. -func (tc TraceContext) Extract(ctx context.Context, carrier TextMapCarrier) context.Context { - sc := tc.extract(carrier) - if !sc.IsValid() { - return ctx - } - return trace.ContextWithRemoteSpanContext(ctx, sc) -} - -func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { - h := carrier.Get(traceparentHeader) - if h == "" { - return trace.SpanContext{} - } - - matches := traceCtxRegExp.FindStringSubmatch(h) - - if len(matches) == 0 { - return trace.SpanContext{} - } - - if len(matches) < 5 { // four subgroups plus the overall match - return trace.SpanContext{} - } - - if len(matches[1]) != 2 { - return trace.SpanContext{} - } - ver, err := hex.DecodeString(matches[1]) - if err != nil { - return trace.SpanContext{} - } - version := int(ver[0]) - if version > maxVersion { - return trace.SpanContext{} - } - - if version == 0 && len(matches) != 5 { // four subgroups plus the overall match - return trace.SpanContext{} - } - - if len(matches[2]) != 32 { - return trace.SpanContext{} - } - - var scc trace.SpanContextConfig - - scc.TraceID, err = trace.TraceIDFromHex(matches[2][:32]) - if err != nil { - return trace.SpanContext{} - } - - if len(matches[3]) != 16 { - return trace.SpanContext{} - } - scc.SpanID, err = trace.SpanIDFromHex(matches[3]) - if err != nil { - return trace.SpanContext{} - } - - if len(matches[4]) != 2 { - return trace.SpanContext{} - } - opts, err := hex.DecodeString(matches[4]) - if err != nil || len(opts) < 1 || (version == 0 && opts[0] > 2) { - return trace.SpanContext{} - } - // Clear all flags other than the trace-context supported sampling bit. - scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled - - // Ignore the error returned here. Failure to parse tracestate MUST NOT - // affect the parsing of traceparent according to the W3C tracecontext - // specification. - scc.TraceState, _ = trace.ParseTraceState(carrier.Get(tracestateHeader)) - scc.Remote = true - - sc := trace.NewSpanContext(scc) - if !sc.IsValid() { - return trace.SpanContext{} - } - - return sc -} - -// Fields returns the keys who's values are set with Inject. -func (tc TraceContext) Fields() []string { - return []string{traceparentHeader, tracestateHeader} -} diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt deleted file mode 100644 index 407f1748..00000000 --- a/vendor/go.opentelemetry.io/otel/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -codespell==2.2.4 diff --git a/vendor/go.opentelemetry.io/otel/sdk/LICENSE b/vendor/go.opentelemetry.io/otel/sdk/LICENSE deleted file mode 100644 index 261eeb9e..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go deleted file mode 100644 index 6e923aca..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package instrumentation provides types to represent the code libraries that -// provide OpenTelemetry instrumentation. These types are used in the -// OpenTelemetry signal pipelines to identify the source of telemetry. -// -// See -// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0083-component.md -// and -// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0201-scope-attributes.md -// for more information. -package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go deleted file mode 100644 index 39f025a1..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" - -// Library represents the instrumentation library. -// Deprecated: please use Scope instead. -type Library = Scope diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go deleted file mode 100644 index 09c6d93f..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" - -// Scope represents the instrumentation scope. -type Scope struct { - // Name is the name of the instrumentation scope. This should be the - // Go package name of that scope. - Name string - // Version is the version of the instrumentation scope. - Version string - // SchemaURL of the telemetry emitted by the scope. - SchemaURL string -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go deleted file mode 100644 index 59dcfab2..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package env // import "go.opentelemetry.io/otel/sdk/internal/env" - -import ( - "os" - "strconv" - - "go.opentelemetry.io/otel/internal/global" -) - -// Environment variable names. -const ( - // BatchSpanProcessorScheduleDelayKey is the delay interval between two - // consecutive exports (i.e. 5000). - BatchSpanProcessorScheduleDelayKey = "OTEL_BSP_SCHEDULE_DELAY" - // BatchSpanProcessorExportTimeoutKey is the maximum allowed time to - // export data (i.e. 3000). - BatchSpanProcessorExportTimeoutKey = "OTEL_BSP_EXPORT_TIMEOUT" - // BatchSpanProcessorMaxQueueSizeKey is the maximum queue size (i.e. 2048). - BatchSpanProcessorMaxQueueSizeKey = "OTEL_BSP_MAX_QUEUE_SIZE" - // BatchSpanProcessorMaxExportBatchSizeKey is the maximum batch size (i.e. - // 512). Note: it must be less than or equal to - // EnvBatchSpanProcessorMaxQueueSize. - BatchSpanProcessorMaxExportBatchSizeKey = "OTEL_BSP_MAX_EXPORT_BATCH_SIZE" - - // AttributeValueLengthKey is the maximum allowed attribute value size. - AttributeValueLengthKey = "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT" - - // AttributeCountKey is the maximum allowed span attribute count. - AttributeCountKey = "OTEL_ATTRIBUTE_COUNT_LIMIT" - - // SpanAttributeValueLengthKey is the maximum allowed attribute value size - // for a span. - SpanAttributeValueLengthKey = "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT" - - // SpanAttributeCountKey is the maximum allowed span attribute count for a - // span. - SpanAttributeCountKey = "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT" - - // SpanEventCountKey is the maximum allowed span event count. - SpanEventCountKey = "OTEL_SPAN_EVENT_COUNT_LIMIT" - - // SpanEventAttributeCountKey is the maximum allowed attribute per span - // event count. - SpanEventAttributeCountKey = "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT" - - // SpanLinkCountKey is the maximum allowed span link count. - SpanLinkCountKey = "OTEL_SPAN_LINK_COUNT_LIMIT" - - // SpanLinkAttributeCountKey is the maximum allowed attribute per span - // link count. - SpanLinkAttributeCountKey = "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT" -) - -// firstInt returns the value of the first matching environment variable from -// keys. If the value is not an integer or no match is found, defaultValue is -// returned. -func firstInt(defaultValue int, keys ...string) int { - for _, key := range keys { - value := os.Getenv(key) - if value == "" { - continue - } - - intValue, err := strconv.Atoi(value) - if err != nil { - global.Info("Got invalid value, number value expected.", key, value) - return defaultValue - } - - return intValue - } - - return defaultValue -} - -// IntEnvOr returns the int value of the environment variable with name key if -// it exists, it is not empty, and the value is an int. Otherwise, defaultValue is returned. -func IntEnvOr(key string, defaultValue int) int { - value := os.Getenv(key) - if value == "" { - return defaultValue - } - - intValue, err := strconv.Atoi(value) - if err != nil { - global.Info("Got invalid value, number value expected.", key, value) - return defaultValue - } - - return intValue -} - -// BatchSpanProcessorScheduleDelay returns the environment variable value for -// the OTEL_BSP_SCHEDULE_DELAY key if it exists, otherwise defaultValue is -// returned. -func BatchSpanProcessorScheduleDelay(defaultValue int) int { - return IntEnvOr(BatchSpanProcessorScheduleDelayKey, defaultValue) -} - -// BatchSpanProcessorExportTimeout returns the environment variable value for -// the OTEL_BSP_EXPORT_TIMEOUT key if it exists, otherwise defaultValue is -// returned. -func BatchSpanProcessorExportTimeout(defaultValue int) int { - return IntEnvOr(BatchSpanProcessorExportTimeoutKey, defaultValue) -} - -// BatchSpanProcessorMaxQueueSize returns the environment variable value for -// the OTEL_BSP_MAX_QUEUE_SIZE key if it exists, otherwise defaultValue is -// returned. -func BatchSpanProcessorMaxQueueSize(defaultValue int) int { - return IntEnvOr(BatchSpanProcessorMaxQueueSizeKey, defaultValue) -} - -// BatchSpanProcessorMaxExportBatchSize returns the environment variable value for -// the OTEL_BSP_MAX_EXPORT_BATCH_SIZE key if it exists, otherwise defaultValue -// is returned. -func BatchSpanProcessorMaxExportBatchSize(defaultValue int) int { - return IntEnvOr(BatchSpanProcessorMaxExportBatchSizeKey, defaultValue) -} - -// SpanAttributeValueLength returns the environment variable value for the -// OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the -// environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT is -// returned or defaultValue if that is not set. -func SpanAttributeValueLength(defaultValue int) int { - return firstInt(defaultValue, SpanAttributeValueLengthKey, AttributeValueLengthKey) -} - -// SpanAttributeCount returns the environment variable value for the -// OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the -// environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT is returned or -// defaultValue if that is not set. -func SpanAttributeCount(defaultValue int) int { - return firstInt(defaultValue, SpanAttributeCountKey, AttributeCountKey) -} - -// SpanEventCount returns the environment variable value for the -// OTEL_SPAN_EVENT_COUNT_LIMIT key if it exists, otherwise defaultValue is -// returned. -func SpanEventCount(defaultValue int) int { - return IntEnvOr(SpanEventCountKey, defaultValue) -} - -// SpanEventAttributeCount returns the environment variable value for the -// OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key if it exists, otherwise defaultValue -// is returned. -func SpanEventAttributeCount(defaultValue int) int { - return IntEnvOr(SpanEventAttributeCountKey, defaultValue) -} - -// SpanLinkCount returns the environment variable value for the -// OTEL_SPAN_LINK_COUNT_LIMIT key if it exists, otherwise defaultValue is -// returned. -func SpanLinkCount(defaultValue int) int { - return IntEnvOr(SpanLinkCountKey, defaultValue) -} - -// SpanLinkAttributeCount returns the environment variable value for the -// OTEL_LINK_ATTRIBUTE_COUNT_LIMIT key if it exists, otherwise defaultValue is -// returned. -func SpanLinkAttributeCount(defaultValue int) int { - return IntEnvOr(SpanLinkAttributeCountKey, defaultValue) -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go b/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go deleted file mode 100644 index dfeaaa8c..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal // import "go.opentelemetry.io/otel/sdk/internal" - -import "time" - -// MonotonicEndTime returns the end time at present -// but offset from start, monotonically. -// -// The monotonic clock is used in subtractions hence -// the duration since start added back to start gives -// end as a monotonic time. -// See https://golang.org/pkg/time/#hdr-Monotonic_Clocks -func MonotonicEndTime(start time.Time) time.Time { - return start.Add(time.Since(start)) -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go deleted file mode 100644 index 324dd4ba..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -import ( - "context" - "errors" - "fmt" - "strings" -) - -var ( - // ErrPartialResource is returned by a detector when complete source - // information for a Resource is unavailable or the source information - // contains invalid values that are omitted from the returned Resource. - ErrPartialResource = errors.New("partial resource") -) - -// Detector detects OpenTelemetry resource information. -type Detector interface { - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // Detect returns an initialized Resource based on gathered information. - // If the source information to construct a Resource contains invalid - // values, a Resource is returned with the valid parts of the source - // information used for initialization along with an appropriately - // wrapped ErrPartialResource error. - Detect(ctx context.Context) (*Resource, error) - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. -} - -// Detect calls all input detectors sequentially and merges each result with the previous one. -// It returns the merged error too. -func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) { - r := new(Resource) - return r, detect(ctx, r, detectors) -} - -// detect runs all detectors using ctx and merges the result into res. This -// assumes res is allocated and not nil, it will panic otherwise. -func detect(ctx context.Context, res *Resource, detectors []Detector) error { - var ( - r *Resource - errs detectErrs - err error - ) - - for _, detector := range detectors { - if detector == nil { - continue - } - r, err = detector.Detect(ctx) - if err != nil { - errs = append(errs, err) - if !errors.Is(err, ErrPartialResource) { - continue - } - } - r, err = Merge(res, r) - if err != nil { - errs = append(errs, err) - } - *res = *r - } - - if len(errs) == 0 { - return nil - } - return errs -} - -type detectErrs []error - -func (e detectErrs) Error() string { - errStr := make([]string, len(e)) - for i, err := range e { - errStr[i] = fmt.Sprintf("* %s", err) - } - - format := "%d errors occurred detecting resource:\n\t%s" - return fmt.Sprintf(format, len(e), strings.Join(errStr, "\n\t")) -} - -func (e detectErrs) Unwrap() error { - switch len(e) { - case 0: - return nil - case 1: - return e[0] - } - return e[1:] -} - -func (e detectErrs) Is(target error) bool { - return len(e) != 0 && errors.Is(e[0], target) -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go deleted file mode 100644 index 72320ca5..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -import ( - "context" - "fmt" - "os" - "path/filepath" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk" - semconv "go.opentelemetry.io/otel/semconv/v1.17.0" -) - -type ( - // telemetrySDK is a Detector that provides information about - // the OpenTelemetry SDK used. This Detector is included as a - // builtin. If these resource attributes are not wanted, use - // the WithTelemetrySDK(nil) or WithoutBuiltin() options to - // explicitly disable them. - telemetrySDK struct{} - - // host is a Detector that provides information about the host - // being run on. This Detector is included as a builtin. If - // these resource attributes are not wanted, use the - // WithHost(nil) or WithoutBuiltin() options to explicitly - // disable them. - host struct{} - - stringDetector struct { - schemaURL string - K attribute.Key - F func() (string, error) - } - - defaultServiceNameDetector struct{} -) - -var ( - _ Detector = telemetrySDK{} - _ Detector = host{} - _ Detector = stringDetector{} - _ Detector = defaultServiceNameDetector{} -) - -// Detect returns a *Resource that describes the OpenTelemetry SDK used. -func (telemetrySDK) Detect(context.Context) (*Resource, error) { - return NewWithAttributes( - semconv.SchemaURL, - semconv.TelemetrySDKName("opentelemetry"), - semconv.TelemetrySDKLanguageGo, - semconv.TelemetrySDKVersion(sdk.Version()), - ), nil -} - -// Detect returns a *Resource that describes the host being run on. -func (host) Detect(ctx context.Context) (*Resource, error) { - return StringDetector(semconv.SchemaURL, semconv.HostNameKey, os.Hostname).Detect(ctx) -} - -// StringDetector returns a Detector that will produce a *Resource -// containing the string as a value corresponding to k. The resulting Resource -// will have the specified schemaURL. -func StringDetector(schemaURL string, k attribute.Key, f func() (string, error)) Detector { - return stringDetector{schemaURL: schemaURL, K: k, F: f} -} - -// Detect returns a *Resource that describes the string as a value -// corresponding to attribute.Key as well as the specific schemaURL. -func (sd stringDetector) Detect(ctx context.Context) (*Resource, error) { - value, err := sd.F() - if err != nil { - return nil, fmt.Errorf("%s: %w", string(sd.K), err) - } - a := sd.K.String(value) - if !a.Valid() { - return nil, fmt.Errorf("invalid attribute: %q -> %q", a.Key, a.Value.Emit()) - } - return NewWithAttributes(sd.schemaURL, sd.K.String(value)), nil -} - -// Detect implements Detector. -func (defaultServiceNameDetector) Detect(ctx context.Context) (*Resource, error) { - return StringDetector( - semconv.SchemaURL, - semconv.ServiceNameKey, - func() (string, error) { - executable, err := os.Executable() - if err != nil { - return "unknown_service:go", nil - } - return "unknown_service:" + filepath.Base(executable), nil - }, - ).Detect(ctx) -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go deleted file mode 100644 index f263919f..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" -) - -// config contains configuration for Resource creation. -type config struct { - // detectors that will be evaluated. - detectors []Detector - // SchemaURL to associate with the Resource. - schemaURL string -} - -// Option is the interface that applies a configuration option. -type Option interface { - // apply sets the Option value of a config. - apply(config) config -} - -// WithAttributes adds attributes to the configured Resource. -func WithAttributes(attributes ...attribute.KeyValue) Option { - return WithDetectors(detectAttributes{attributes}) -} - -type detectAttributes struct { - attributes []attribute.KeyValue -} - -func (d detectAttributes) Detect(context.Context) (*Resource, error) { - return NewSchemaless(d.attributes...), nil -} - -// WithDetectors adds detectors to be evaluated for the configured resource. -func WithDetectors(detectors ...Detector) Option { - return detectorsOption{detectors: detectors} -} - -type detectorsOption struct { - detectors []Detector -} - -func (o detectorsOption) apply(cfg config) config { - cfg.detectors = append(cfg.detectors, o.detectors...) - return cfg -} - -// WithFromEnv adds attributes from environment variables to the configured resource. -func WithFromEnv() Option { - return WithDetectors(fromEnv{}) -} - -// WithHost adds attributes from the host to the configured resource. -func WithHost() Option { - return WithDetectors(host{}) -} - -// WithHostID adds host ID information to the configured resource. -func WithHostID() Option { - return WithDetectors(hostIDDetector{}) -} - -// WithTelemetrySDK adds TelemetrySDK version info to the configured resource. -func WithTelemetrySDK() Option { - return WithDetectors(telemetrySDK{}) -} - -// WithSchemaURL sets the schema URL for the configured resource. -func WithSchemaURL(schemaURL string) Option { - return schemaURLOption(schemaURL) -} - -type schemaURLOption string - -func (o schemaURLOption) apply(cfg config) config { - cfg.schemaURL = string(o) - return cfg -} - -// WithOS adds all the OS attributes to the configured Resource. -// See individual WithOS* functions to configure specific attributes. -func WithOS() Option { - return WithDetectors( - osTypeDetector{}, - osDescriptionDetector{}, - ) -} - -// WithOSType adds an attribute with the operating system type to the configured Resource. -func WithOSType() Option { - return WithDetectors(osTypeDetector{}) -} - -// WithOSDescription adds an attribute with the operating system description to the -// configured Resource. The formatted string is equivalent to the output of the -// `uname -snrvm` command. -func WithOSDescription() Option { - return WithDetectors(osDescriptionDetector{}) -} - -// WithProcess adds all the Process attributes to the configured Resource. -// -// Warning! This option will include process command line arguments. If these -// contain sensitive information it will be included in the exported resource. -// -// This option is equivalent to calling WithProcessPID, -// WithProcessExecutableName, WithProcessExecutablePath, -// WithProcessCommandArgs, WithProcessOwner, WithProcessRuntimeName, -// WithProcessRuntimeVersion, and WithProcessRuntimeDescription. See each -// option function for information about what resource attributes each -// includes. -func WithProcess() Option { - return WithDetectors( - processPIDDetector{}, - processExecutableNameDetector{}, - processExecutablePathDetector{}, - processCommandArgsDetector{}, - processOwnerDetector{}, - processRuntimeNameDetector{}, - processRuntimeVersionDetector{}, - processRuntimeDescriptionDetector{}, - ) -} - -// WithProcessPID adds an attribute with the process identifier (PID) to the -// configured Resource. -func WithProcessPID() Option { - return WithDetectors(processPIDDetector{}) -} - -// WithProcessExecutableName adds an attribute with the name of the process -// executable to the configured Resource. -func WithProcessExecutableName() Option { - return WithDetectors(processExecutableNameDetector{}) -} - -// WithProcessExecutablePath adds an attribute with the full path to the process -// executable to the configured Resource. -func WithProcessExecutablePath() Option { - return WithDetectors(processExecutablePathDetector{}) -} - -// WithProcessCommandArgs adds an attribute with all the command arguments (including -// the command/executable itself) as received by the process to the configured -// Resource. -// -// Warning! This option will include process command line arguments. If these -// contain sensitive information it will be included in the exported resource. -func WithProcessCommandArgs() Option { - return WithDetectors(processCommandArgsDetector{}) -} - -// WithProcessOwner adds an attribute with the username of the user that owns the process -// to the configured Resource. -func WithProcessOwner() Option { - return WithDetectors(processOwnerDetector{}) -} - -// WithProcessRuntimeName adds an attribute with the name of the runtime of this -// process to the configured Resource. -func WithProcessRuntimeName() Option { - return WithDetectors(processRuntimeNameDetector{}) -} - -// WithProcessRuntimeVersion adds an attribute with the version of the runtime of -// this process to the configured Resource. -func WithProcessRuntimeVersion() Option { - return WithDetectors(processRuntimeVersionDetector{}) -} - -// WithProcessRuntimeDescription adds an attribute with an additional description -// about the runtime of the process to the configured Resource. -func WithProcessRuntimeDescription() Option { - return WithDetectors(processRuntimeDescriptionDetector{}) -} - -// WithContainer adds all the Container attributes to the configured Resource. -// See individual WithContainer* functions to configure specific attributes. -func WithContainer() Option { - return WithDetectors( - cgroupContainerIDDetector{}, - ) -} - -// WithContainerID adds an attribute with the id of the container to the configured Resource. -// Note: WithContainerID will not extract the correct container ID in an ECS environment. -// Please use the ECS resource detector instead (https://pkg.go.dev/go.opentelemetry.io/contrib/detectors/aws/ecs). -func WithContainerID() Option { - return WithDetectors(cgroupContainerIDDetector{}) -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go deleted file mode 100644 index 318dcf82..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -import ( - "bufio" - "context" - "errors" - "io" - "os" - "regexp" - - semconv "go.opentelemetry.io/otel/semconv/v1.17.0" -) - -type containerIDProvider func() (string, error) - -var ( - containerID containerIDProvider = getContainerIDFromCGroup - cgroupContainerIDRe = regexp.MustCompile(`^.*/(?:.*-)?([0-9a-f]+)(?:\.|\s*$)`) -) - -type cgroupContainerIDDetector struct{} - -const cgroupPath = "/proc/self/cgroup" - -// Detect returns a *Resource that describes the id of the container. -// If no container id found, an empty resource will be returned. -func (cgroupContainerIDDetector) Detect(ctx context.Context) (*Resource, error) { - containerID, err := containerID() - if err != nil { - return nil, err - } - - if containerID == "" { - return Empty(), nil - } - return NewWithAttributes(semconv.SchemaURL, semconv.ContainerID(containerID)), nil -} - -var ( - defaultOSStat = os.Stat - osStat = defaultOSStat - - defaultOSOpen = func(name string) (io.ReadCloser, error) { - return os.Open(name) - } - osOpen = defaultOSOpen -) - -// getContainerIDFromCGroup returns the id of the container from the cgroup file. -// If no container id found, an empty string will be returned. -func getContainerIDFromCGroup() (string, error) { - if _, err := osStat(cgroupPath); errors.Is(err, os.ErrNotExist) { - // File does not exist, skip - return "", nil - } - - file, err := osOpen(cgroupPath) - if err != nil { - return "", err - } - defer file.Close() - - return getContainerIDFromReader(file), nil -} - -// getContainerIDFromReader returns the id of the container from reader. -func getContainerIDFromReader(reader io.Reader) string { - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - line := scanner.Text() - - if id := getContainerIDFromLine(line); id != "" { - return id - } - } - return "" -} - -// getContainerIDFromLine returns the id of the container from one string line. -func getContainerIDFromLine(line string) string { - matches := cgroupContainerIDRe.FindStringSubmatch(line) - if len(matches) <= 1 { - return "" - } - return matches[1] -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go b/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go deleted file mode 100644 index d55a50b0..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package resource provides detecting and representing resources. -// -// The fundamental struct is a Resource which holds identifying information -// about the entities for which telemetry is exported. -// -// To automatically construct Resources from an environment a Detector -// interface is defined. Implementations of this interface can be passed to -// the Detect function to generate a Resource from the merged information. -// -// To load a user defined Resource from the environment variable -// OTEL_RESOURCE_ATTRIBUTES the FromEnv Detector can be used. It will interpret -// the value as a list of comma delimited key/value pairs -// (e.g. `=,=,...`). -// -// While this package provides a stable API, -// the attributes added by resource detectors may change. -package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go deleted file mode 100644 index f09a7819..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -import ( - "context" - "fmt" - "net/url" - "os" - "strings" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.17.0" -) - -const ( - // resourceAttrKey is the environment variable name OpenTelemetry Resource information will be read from. - resourceAttrKey = "OTEL_RESOURCE_ATTRIBUTES" - - // svcNameKey is the environment variable name that Service Name information will be read from. - svcNameKey = "OTEL_SERVICE_NAME" -) - -var ( - // errMissingValue is returned when a resource value is missing. - errMissingValue = fmt.Errorf("%w: missing value", ErrPartialResource) -) - -// fromEnv is a Detector that implements the Detector and collects -// resources from environment. This Detector is included as a -// builtin. -type fromEnv struct{} - -// compile time assertion that FromEnv implements Detector interface. -var _ Detector = fromEnv{} - -// Detect collects resources from environment. -func (fromEnv) Detect(context.Context) (*Resource, error) { - attrs := strings.TrimSpace(os.Getenv(resourceAttrKey)) - svcName := strings.TrimSpace(os.Getenv(svcNameKey)) - - if attrs == "" && svcName == "" { - return Empty(), nil - } - - var res *Resource - - if svcName != "" { - res = NewSchemaless(semconv.ServiceName(svcName)) - } - - r2, err := constructOTResources(attrs) - - // Ensure that the resource with the service name from OTEL_SERVICE_NAME - // takes precedence, if it was defined. - res, err2 := Merge(r2, res) - - if err == nil { - err = err2 - } else if err2 != nil { - err = fmt.Errorf("detecting resources: %s", []string{err.Error(), err2.Error()}) - } - - return res, err -} - -func constructOTResources(s string) (*Resource, error) { - if s == "" { - return Empty(), nil - } - pairs := strings.Split(s, ",") - var attrs []attribute.KeyValue - var invalid []string - for _, p := range pairs { - k, v, found := strings.Cut(p, "=") - if !found { - invalid = append(invalid, p) - continue - } - key := strings.TrimSpace(k) - val, err := url.QueryUnescape(strings.TrimSpace(v)) - if err != nil { - // Retain original value if decoding fails, otherwise it will be - // an empty string. - val = v - otel.Handle(err) - } - attrs = append(attrs, attribute.String(key, val)) - } - var err error - if len(invalid) > 0 { - err = fmt.Errorf("%w: %v", errMissingValue, invalid) - } - return NewSchemaless(attrs...), err -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go deleted file mode 100644 index b8e934d4..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -import ( - "context" - "errors" - "strings" - - semconv "go.opentelemetry.io/otel/semconv/v1.17.0" -) - -type hostIDProvider func() (string, error) - -var defaultHostIDProvider hostIDProvider = platformHostIDReader.read - -var hostID = defaultHostIDProvider - -type hostIDReader interface { - read() (string, error) -} - -type fileReader func(string) (string, error) - -type commandExecutor func(string, ...string) (string, error) - -// hostIDReaderBSD implements hostIDReader. -type hostIDReaderBSD struct { - execCommand commandExecutor - readFile fileReader -} - -// read attempts to read the machine-id from /etc/hostid. If not found it will -// execute `kenv -q smbios.system.uuid`. If neither location yields an id an -// error will be returned. -func (r *hostIDReaderBSD) read() (string, error) { - if result, err := r.readFile("/etc/hostid"); err == nil { - return strings.TrimSpace(result), nil - } - - if result, err := r.execCommand("kenv", "-q", "smbios.system.uuid"); err == nil { - return strings.TrimSpace(result), nil - } - - return "", errors.New("host id not found in: /etc/hostid or kenv") -} - -// hostIDReaderDarwin implements hostIDReader. -type hostIDReaderDarwin struct { - execCommand commandExecutor -} - -// read executes `ioreg -rd1 -c "IOPlatformExpertDevice"` and parses host id -// from the IOPlatformUUID line. If the command fails or the uuid cannot be -// parsed an error will be returned. -func (r *hostIDReaderDarwin) read() (string, error) { - result, err := r.execCommand("ioreg", "-rd1", "-c", "IOPlatformExpertDevice") - if err != nil { - return "", err - } - - lines := strings.Split(result, "\n") - for _, line := range lines { - if strings.Contains(line, "IOPlatformUUID") { - parts := strings.Split(line, " = ") - if len(parts) == 2 { - return strings.Trim(parts[1], "\""), nil - } - break - } - } - - return "", errors.New("could not parse IOPlatformUUID") -} - -type hostIDReaderLinux struct { - readFile fileReader -} - -// read attempts to read the machine-id from /etc/machine-id followed by -// /var/lib/dbus/machine-id. If neither location yields an ID an error will -// be returned. -func (r *hostIDReaderLinux) read() (string, error) { - if result, err := r.readFile("/etc/machine-id"); err == nil { - return strings.TrimSpace(result), nil - } - - if result, err := r.readFile("/var/lib/dbus/machine-id"); err == nil { - return strings.TrimSpace(result), nil - } - - return "", errors.New("host id not found in: /etc/machine-id or /var/lib/dbus/machine-id") -} - -type hostIDDetector struct{} - -// Detect returns a *Resource containing the platform specific host id. -func (hostIDDetector) Detect(ctx context.Context) (*Resource, error) { - hostID, err := hostID() - if err != nil { - return nil, err - } - - return NewWithAttributes( - semconv.SchemaURL, - semconv.HostID(hostID), - ), nil -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go deleted file mode 100644 index 1778bbac..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build dragonfly || freebsd || netbsd || openbsd || solaris -// +build dragonfly freebsd netbsd openbsd solaris - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -var platformHostIDReader hostIDReader = &hostIDReaderBSD{ - execCommand: execCommand, - readFile: readFile, -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go deleted file mode 100644 index ba41409b..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -var platformHostIDReader hostIDReader = &hostIDReaderDarwin{ - execCommand: execCommand, -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go deleted file mode 100644 index 207acb0e..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build darwin || dragonfly || freebsd || netbsd || openbsd || solaris - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -import "os/exec" - -func execCommand(name string, arg ...string) (string, error) { - cmd := exec.Command(name, arg...) - b, err := cmd.Output() - if err != nil { - return "", err - } - - return string(b), nil -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go deleted file mode 100644 index 410579b8..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build linux -// +build linux - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -var platformHostIDReader hostIDReader = &hostIDReaderLinux{ - readFile: readFile, -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go deleted file mode 100644 index f92c6dad..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build linux || dragonfly || freebsd || netbsd || openbsd || solaris - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -import "os" - -func readFile(filename string) (string, error) { - b, err := os.ReadFile(filename) - if err != nil { - return "", nil - } - - return string(b), nil -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go deleted file mode 100644 index 89df9d68..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !darwin -// +build !dragonfly -// +build !freebsd -// +build !linux -// +build !netbsd -// +build !openbsd -// +build !solaris -// +build !windows - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -// hostIDReaderUnsupported is a placeholder implementation for operating systems -// for which this project currently doesn't support host.id -// attribute detection. See build tags declaration early on this file -// for a list of unsupported OSes. -type hostIDReaderUnsupported struct{} - -func (*hostIDReaderUnsupported) read() (string, error) { - return "", nil -} - -var platformHostIDReader hostIDReader = &hostIDReaderUnsupported{} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go deleted file mode 100644 index 5b431c6e..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build windows -// +build windows - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -import ( - "golang.org/x/sys/windows/registry" -) - -// implements hostIDReader -type hostIDReaderWindows struct{} - -// read reads MachineGuid from the windows registry key: -// SOFTWARE\Microsoft\Cryptography -func (*hostIDReaderWindows) read() (string, error) { - k, err := registry.OpenKey( - registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Cryptography`, - registry.QUERY_VALUE|registry.WOW64_64KEY, - ) - - if err != nil { - return "", err - } - defer k.Close() - - guid, _, err := k.GetStringValue("MachineGuid") - if err != nil { - return "", err - } - - return guid, nil -} - -var platformHostIDReader hostIDReader = &hostIDReaderWindows{} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go deleted file mode 100644 index 815fe5c2..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -import ( - "context" - "strings" - - "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.17.0" -) - -type osDescriptionProvider func() (string, error) - -var defaultOSDescriptionProvider osDescriptionProvider = platformOSDescription - -var osDescription = defaultOSDescriptionProvider - -func setDefaultOSDescriptionProvider() { - setOSDescriptionProvider(defaultOSDescriptionProvider) -} - -func setOSDescriptionProvider(osDescriptionProvider osDescriptionProvider) { - osDescription = osDescriptionProvider -} - -type osTypeDetector struct{} -type osDescriptionDetector struct{} - -// Detect returns a *Resource that describes the operating system type the -// service is running on. -func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) { - osType := runtimeOS() - - osTypeAttribute := mapRuntimeOSToSemconvOSType(osType) - - return NewWithAttributes( - semconv.SchemaURL, - osTypeAttribute, - ), nil -} - -// Detect returns a *Resource that describes the operating system the -// service is running on. -func (osDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { - description, err := osDescription() - - if err != nil { - return nil, err - } - - return NewWithAttributes( - semconv.SchemaURL, - semconv.OSDescription(description), - ), nil -} - -// mapRuntimeOSToSemconvOSType translates the OS name as provided by the Go runtime -// into an OS type attribute with the corresponding value defined by the semantic -// conventions. In case the provided OS name isn't mapped, it's transformed to lowercase -// and used as the value for the returned OS type attribute. -func mapRuntimeOSToSemconvOSType(osType string) attribute.KeyValue { - // the elements in this map are the intersection between - // available GOOS values and defined semconv OS types - osTypeAttributeMap := map[string]attribute.KeyValue{ - "darwin": semconv.OSTypeDarwin, - "dragonfly": semconv.OSTypeDragonflyBSD, - "freebsd": semconv.OSTypeFreeBSD, - "linux": semconv.OSTypeLinux, - "netbsd": semconv.OSTypeNetBSD, - "openbsd": semconv.OSTypeOpenBSD, - "solaris": semconv.OSTypeSolaris, - "windows": semconv.OSTypeWindows, - } - - var osTypeAttribute attribute.KeyValue - - if attr, ok := osTypeAttributeMap[osType]; ok { - osTypeAttribute = attr - } else { - osTypeAttribute = semconv.OSTypeKey.String(strings.ToLower(osType)) - } - - return osTypeAttribute -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go deleted file mode 100644 index 24ec8579..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -import ( - "encoding/xml" - "fmt" - "io" - "os" -) - -type plist struct { - XMLName xml.Name `xml:"plist"` - Dict dict `xml:"dict"` -} - -type dict struct { - Key []string `xml:"key"` - String []string `xml:"string"` -} - -// osRelease builds a string describing the operating system release based on the -// contents of the property list (.plist) system files. If no .plist files are found, -// or if the required properties to build the release description string are missing, -// an empty string is returned instead. The generated string resembles the output of -// the `sw_vers` commandline program, but in a single-line string. For more information -// about the `sw_vers` program, see: https://www.unix.com/man-page/osx/1/SW_VERS. -func osRelease() string { - file, err := getPlistFile() - if err != nil { - return "" - } - - defer file.Close() - - values, err := parsePlistFile(file) - if err != nil { - return "" - } - - return buildOSRelease(values) -} - -// getPlistFile returns a *os.File pointing to one of the well-known .plist files -// available on macOS. If no file can be opened, it returns an error. -func getPlistFile() (*os.File, error) { - return getFirstAvailableFile([]string{ - "/System/Library/CoreServices/SystemVersion.plist", - "/System/Library/CoreServices/ServerVersion.plist", - }) -} - -// parsePlistFile process the file pointed by `file` as a .plist file and returns -// a map with the key-values for each pair of correlated and elements -// contained in it. -func parsePlistFile(file io.Reader) (map[string]string, error) { - var v plist - - err := xml.NewDecoder(file).Decode(&v) - if err != nil { - return nil, err - } - - if len(v.Dict.Key) != len(v.Dict.String) { - return nil, fmt.Errorf("the number of and elements doesn't match") - } - - properties := make(map[string]string, len(v.Dict.Key)) - for i, key := range v.Dict.Key { - properties[key] = v.Dict.String[i] - } - - return properties, nil -} - -// buildOSRelease builds a string describing the OS release based on the properties -// available on the provided map. It tries to find the `ProductName`, `ProductVersion` -// and `ProductBuildVersion` properties. If some of these properties are not found, -// it returns an empty string. -func buildOSRelease(properties map[string]string) string { - productName := properties["ProductName"] - productVersion := properties["ProductVersion"] - productBuildVersion := properties["ProductBuildVersion"] - - if productName == "" || productVersion == "" || productBuildVersion == "" { - return "" - } - - return fmt.Sprintf("%s %s (%s)", productName, productVersion, productBuildVersion) -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go deleted file mode 100644 index c771942d..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix dragonfly freebsd linux netbsd openbsd solaris zos - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -import ( - "bufio" - "fmt" - "io" - "os" - "strings" -) - -// osRelease builds a string describing the operating system release based on the -// properties of the os-release file. If no os-release file is found, or if the -// required properties to build the release description string are missing, an empty -// string is returned instead. For more information about os-release files, see: -// https://www.freedesktop.org/software/systemd/man/os-release.html -func osRelease() string { - file, err := getOSReleaseFile() - if err != nil { - return "" - } - - defer file.Close() - - values := parseOSReleaseFile(file) - - return buildOSRelease(values) -} - -// getOSReleaseFile returns a *os.File pointing to one of the well-known os-release -// files, according to their order of preference. If no file can be opened, it -// returns an error. -func getOSReleaseFile() (*os.File, error) { - return getFirstAvailableFile([]string{"/etc/os-release", "/usr/lib/os-release"}) -} - -// parseOSReleaseFile process the file pointed by `file` as an os-release file and -// returns a map with the key-values contained in it. Empty lines or lines starting -// with a '#' character are ignored, as well as lines with the missing key=value -// separator. Values are unquoted and unescaped. -func parseOSReleaseFile(file io.Reader) map[string]string { - values := make(map[string]string) - scanner := bufio.NewScanner(file) - - for scanner.Scan() { - line := scanner.Text() - - if skip(line) { - continue - } - - key, value, ok := parse(line) - if ok { - values[key] = value - } - } - - return values -} - -// skip returns true if the line is blank or starts with a '#' character, and -// therefore should be skipped from processing. -func skip(line string) bool { - line = strings.TrimSpace(line) - - return len(line) == 0 || strings.HasPrefix(line, "#") -} - -// parse attempts to split the provided line on the first '=' character, and then -// sanitize each side of the split before returning them as a key-value pair. -func parse(line string) (string, string, bool) { - k, v, found := strings.Cut(line, "=") - - if !found || len(k) == 0 { - return "", "", false - } - - key := strings.TrimSpace(k) - value := unescape(unquote(strings.TrimSpace(v))) - - return key, value, true -} - -// unquote checks whether the string `s` is quoted with double or single quotes -// and, if so, returns a version of the string without them. Otherwise it returns -// the provided string unchanged. -func unquote(s string) string { - if len(s) < 2 { - return s - } - - if (s[0] == '"' || s[0] == '\'') && s[0] == s[len(s)-1] { - return s[1 : len(s)-1] - } - - return s -} - -// unescape removes the `\` prefix from some characters that are expected -// to have it added in front of them for escaping purposes. -func unescape(s string) string { - return strings.NewReplacer( - `\$`, `$`, - `\"`, `"`, - `\'`, `'`, - `\\`, `\`, - "\\`", "`", - ).Replace(s) -} - -// buildOSRelease builds a string describing the OS release based on the properties -// available on the provided map. It favors a combination of the `NAME` and `VERSION` -// properties as first option (falling back to `VERSION_ID` if `VERSION` isn't -// found), and using `PRETTY_NAME` alone if some of the previous are not present. If -// none of these properties are found, it returns an empty string. -// -// The rationale behind not using `PRETTY_NAME` as first choice was that, for some -// Linux distributions, it doesn't include the same detail that can be found on the -// individual `NAME` and `VERSION` properties, and combining `PRETTY_NAME` with -// other properties can produce "pretty" redundant strings in some cases. -func buildOSRelease(values map[string]string) string { - var osRelease string - - name := values["NAME"] - version := values["VERSION"] - - if version == "" { - version = values["VERSION_ID"] - } - - if name != "" && version != "" { - osRelease = fmt.Sprintf("%s %s", name, version) - } else { - osRelease = values["PRETTY_NAME"] - } - - return osRelease -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go deleted file mode 100644 index 1c84afc1..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -import ( - "fmt" - "os" - - "golang.org/x/sys/unix" -) - -type unameProvider func(buf *unix.Utsname) (err error) - -var defaultUnameProvider unameProvider = unix.Uname - -var currentUnameProvider = defaultUnameProvider - -func setDefaultUnameProvider() { - setUnameProvider(defaultUnameProvider) -} - -func setUnameProvider(unameProvider unameProvider) { - currentUnameProvider = unameProvider -} - -// platformOSDescription returns a human readable OS version information string. -// The final string combines OS release information (where available) and the -// result of the `uname` system call. -func platformOSDescription() (string, error) { - uname, err := uname() - if err != nil { - return "", err - } - - osRelease := osRelease() - if osRelease != "" { - return fmt.Sprintf("%s (%s)", osRelease, uname), nil - } - - return uname, nil -} - -// uname issues a uname(2) system call (or equivalent on systems which doesn't -// have one) and formats the output in a single string, similar to the output -// of the `uname` commandline program. The final string resembles the one -// obtained with a call to `uname -snrvm`. -func uname() (string, error) { - var utsName unix.Utsname - - err := currentUnameProvider(&utsName) - if err != nil { - return "", err - } - - return fmt.Sprintf("%s %s %s %s %s", - unix.ByteSliceToString(utsName.Sysname[:]), - unix.ByteSliceToString(utsName.Nodename[:]), - unix.ByteSliceToString(utsName.Release[:]), - unix.ByteSliceToString(utsName.Version[:]), - unix.ByteSliceToString(utsName.Machine[:]), - ), nil -} - -// getFirstAvailableFile returns an *os.File of the first available -// file from a list of candidate file paths. -func getFirstAvailableFile(candidates []string) (*os.File, error) { - for _, c := range candidates { - file, err := os.Open(c) - if err == nil { - return file, nil - } - } - - return nil, fmt.Errorf("no candidate file available: %v", candidates) -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go deleted file mode 100644 index 3ebcb534..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !aix -// +build !darwin -// +build !dragonfly -// +build !freebsd -// +build !linux -// +build !netbsd -// +build !openbsd -// +build !solaris -// +build !windows -// +build !zos - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -// platformOSDescription is a placeholder implementation for OSes -// for which this project currently doesn't support os.description -// attribute detection. See build tags declaration early on this file -// for a list of unsupported OSes. -func platformOSDescription() (string, error) { - return "", nil -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go deleted file mode 100644 index faad64d8..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -import ( - "fmt" - "strconv" - - "golang.org/x/sys/windows/registry" -) - -// platformOSDescription returns a human readable OS version information string. -// It does so by querying registry values under the -// `SOFTWARE\Microsoft\Windows NT\CurrentVersion` key. The final string -// resembles the one displayed by the Version Reporter Applet (winver.exe). -func platformOSDescription() (string, error) { - k, err := registry.OpenKey( - registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) - - if err != nil { - return "", err - } - - defer k.Close() - - var ( - productName = readProductName(k) - displayVersion = readDisplayVersion(k) - releaseID = readReleaseID(k) - currentMajorVersionNumber = readCurrentMajorVersionNumber(k) - currentMinorVersionNumber = readCurrentMinorVersionNumber(k) - currentBuildNumber = readCurrentBuildNumber(k) - ubr = readUBR(k) - ) - - if displayVersion != "" { - displayVersion += " " - } - - return fmt.Sprintf("%s %s(%s) [Version %s.%s.%s.%s]", - productName, - displayVersion, - releaseID, - currentMajorVersionNumber, - currentMinorVersionNumber, - currentBuildNumber, - ubr, - ), nil -} - -func getStringValue(name string, k registry.Key) string { - value, _, _ := k.GetStringValue(name) - - return value -} - -func getIntegerValue(name string, k registry.Key) uint64 { - value, _, _ := k.GetIntegerValue(name) - - return value -} - -func readProductName(k registry.Key) string { - return getStringValue("ProductName", k) -} - -func readDisplayVersion(k registry.Key) string { - return getStringValue("DisplayVersion", k) -} - -func readReleaseID(k registry.Key) string { - return getStringValue("ReleaseID", k) -} - -func readCurrentMajorVersionNumber(k registry.Key) string { - return strconv.FormatUint(getIntegerValue("CurrentMajorVersionNumber", k), 10) -} - -func readCurrentMinorVersionNumber(k registry.Key) string { - return strconv.FormatUint(getIntegerValue("CurrentMinorVersionNumber", k), 10) -} - -func readCurrentBuildNumber(k registry.Key) string { - return getStringValue("CurrentBuildNumber", k) -} - -func readUBR(k registry.Key) string { - return strconv.FormatUint(getIntegerValue("UBR", k), 10) -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go deleted file mode 100644 index bdd0e7fe..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -import ( - "context" - "fmt" - "os" - "os/user" - "path/filepath" - "runtime" - - semconv "go.opentelemetry.io/otel/semconv/v1.17.0" -) - -type pidProvider func() int -type executablePathProvider func() (string, error) -type commandArgsProvider func() []string -type ownerProvider func() (*user.User, error) -type runtimeNameProvider func() string -type runtimeVersionProvider func() string -type runtimeOSProvider func() string -type runtimeArchProvider func() string - -var ( - defaultPidProvider pidProvider = os.Getpid - defaultExecutablePathProvider executablePathProvider = os.Executable - defaultCommandArgsProvider commandArgsProvider = func() []string { return os.Args } - defaultOwnerProvider ownerProvider = user.Current - defaultRuntimeNameProvider runtimeNameProvider = func() string { - if runtime.Compiler == "gc" { - return "go" - } - return runtime.Compiler - } - defaultRuntimeVersionProvider runtimeVersionProvider = runtime.Version - defaultRuntimeOSProvider runtimeOSProvider = func() string { return runtime.GOOS } - defaultRuntimeArchProvider runtimeArchProvider = func() string { return runtime.GOARCH } -) - -var ( - pid = defaultPidProvider - executablePath = defaultExecutablePathProvider - commandArgs = defaultCommandArgsProvider - owner = defaultOwnerProvider - runtimeName = defaultRuntimeNameProvider - runtimeVersion = defaultRuntimeVersionProvider - runtimeOS = defaultRuntimeOSProvider - runtimeArch = defaultRuntimeArchProvider -) - -func setDefaultOSProviders() { - setOSProviders( - defaultPidProvider, - defaultExecutablePathProvider, - defaultCommandArgsProvider, - ) -} - -func setOSProviders( - pidProvider pidProvider, - executablePathProvider executablePathProvider, - commandArgsProvider commandArgsProvider, -) { - pid = pidProvider - executablePath = executablePathProvider - commandArgs = commandArgsProvider -} - -func setDefaultRuntimeProviders() { - setRuntimeProviders( - defaultRuntimeNameProvider, - defaultRuntimeVersionProvider, - defaultRuntimeOSProvider, - defaultRuntimeArchProvider, - ) -} - -func setRuntimeProviders( - runtimeNameProvider runtimeNameProvider, - runtimeVersionProvider runtimeVersionProvider, - runtimeOSProvider runtimeOSProvider, - runtimeArchProvider runtimeArchProvider, -) { - runtimeName = runtimeNameProvider - runtimeVersion = runtimeVersionProvider - runtimeOS = runtimeOSProvider - runtimeArch = runtimeArchProvider -} - -func setDefaultUserProviders() { - setUserProviders(defaultOwnerProvider) -} - -func setUserProviders(ownerProvider ownerProvider) { - owner = ownerProvider -} - -type processPIDDetector struct{} -type processExecutableNameDetector struct{} -type processExecutablePathDetector struct{} -type processCommandArgsDetector struct{} -type processOwnerDetector struct{} -type processRuntimeNameDetector struct{} -type processRuntimeVersionDetector struct{} -type processRuntimeDescriptionDetector struct{} - -// Detect returns a *Resource that describes the process identifier (PID) of the -// executing process. -func (processPIDDetector) Detect(ctx context.Context) (*Resource, error) { - return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPID(pid())), nil -} - -// Detect returns a *Resource that describes the name of the process executable. -func (processExecutableNameDetector) Detect(ctx context.Context) (*Resource, error) { - executableName := filepath.Base(commandArgs()[0]) - - return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableName(executableName)), nil -} - -// Detect returns a *Resource that describes the full path of the process executable. -func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, error) { - executablePath, err := executablePath() - if err != nil { - return nil, err - } - - return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutablePath(executablePath)), nil -} - -// Detect returns a *Resource that describes all the command arguments as received -// by the process. -func (processCommandArgsDetector) Detect(ctx context.Context) (*Resource, error) { - return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgs(commandArgs()...)), nil -} - -// Detect returns a *Resource that describes the username of the user that owns the -// process. -func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) { - owner, err := owner() - if err != nil { - return nil, err - } - - return NewWithAttributes(semconv.SchemaURL, semconv.ProcessOwner(owner.Username)), nil -} - -// Detect returns a *Resource that describes the name of the compiler used to compile -// this process image. -func (processRuntimeNameDetector) Detect(ctx context.Context) (*Resource, error) { - return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeName(runtimeName())), nil -} - -// Detect returns a *Resource that describes the version of the runtime of this process. -func (processRuntimeVersionDetector) Detect(ctx context.Context) (*Resource, error) { - return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersion(runtimeVersion())), nil -} - -// Detect returns a *Resource that describes the runtime of this process. -func (processRuntimeDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { - runtimeDescription := fmt.Sprintf( - "go version %s %s/%s", runtimeVersion(), runtimeOS(), runtimeArch()) - - return NewWithAttributes( - semconv.SchemaURL, - semconv.ProcessRuntimeDescription(runtimeDescription), - ), nil -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go deleted file mode 100644 index 139dc7e8..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go +++ /dev/null @@ -1,272 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -import ( - "context" - "errors" - "sync" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" -) - -// Resource describes an entity about which identifying information -// and metadata is exposed. Resource is an immutable object, -// equivalent to a map from key to unique value. -// -// Resources should be passed and stored as pointers -// (`*resource.Resource`). The `nil` value is equivalent to an empty -// Resource. -type Resource struct { - attrs attribute.Set - schemaURL string -} - -var ( - emptyResource Resource - defaultResource *Resource - defaultResourceOnce sync.Once -) - -var errMergeConflictSchemaURL = errors.New("cannot merge resource due to conflicting Schema URL") - -// New returns a Resource combined from the user-provided detectors. -func New(ctx context.Context, opts ...Option) (*Resource, error) { - cfg := config{} - for _, opt := range opts { - cfg = opt.apply(cfg) - } - - r := &Resource{schemaURL: cfg.schemaURL} - return r, detect(ctx, r, cfg.detectors) -} - -// NewWithAttributes creates a resource from attrs and associates the resource with a -// schema URL. If attrs contains duplicate keys, the last value will be used. If attrs -// contains any invalid items those items will be dropped. The attrs are assumed to be -// in a schema identified by schemaURL. -func NewWithAttributes(schemaURL string, attrs ...attribute.KeyValue) *Resource { - resource := NewSchemaless(attrs...) - resource.schemaURL = schemaURL - return resource -} - -// NewSchemaless creates a resource from attrs. If attrs contains duplicate keys, -// the last value will be used. If attrs contains any invalid items those items will -// be dropped. The resource will not be associated with a schema URL. If the schema -// of the attrs is known use NewWithAttributes instead. -func NewSchemaless(attrs ...attribute.KeyValue) *Resource { - if len(attrs) == 0 { - return &emptyResource - } - - // Ensure attributes comply with the specification: - // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/common/README.md#attribute - s, _ := attribute.NewSetWithFiltered(attrs, func(kv attribute.KeyValue) bool { - return kv.Valid() - }) - - // If attrs only contains invalid entries do not allocate a new resource. - if s.Len() == 0 { - return &emptyResource - } - - return &Resource{attrs: s} //nolint -} - -// String implements the Stringer interface and provides a -// human-readable form of the resource. -// -// Avoid using this representation as the key in a map of resources, -// use Equivalent() as the key instead. -func (r *Resource) String() string { - if r == nil { - return "" - } - return r.attrs.Encoded(attribute.DefaultEncoder()) -} - -// MarshalLog is the marshaling function used by the logging system to represent this exporter. -func (r *Resource) MarshalLog() interface{} { - return struct { - Attributes attribute.Set - SchemaURL string - }{ - Attributes: r.attrs, - SchemaURL: r.schemaURL, - } -} - -// Attributes returns a copy of attributes from the resource in a sorted order. -// To avoid allocating a new slice, use an iterator. -func (r *Resource) Attributes() []attribute.KeyValue { - if r == nil { - r = Empty() - } - return r.attrs.ToSlice() -} - -// SchemaURL returns the schema URL associated with Resource r. -func (r *Resource) SchemaURL() string { - if r == nil { - return "" - } - return r.schemaURL -} - -// Iter returns an iterator of the Resource attributes. -// This is ideal to use if you do not want a copy of the attributes. -func (r *Resource) Iter() attribute.Iterator { - if r == nil { - r = Empty() - } - return r.attrs.Iter() -} - -// Equal returns true when a Resource is equivalent to this Resource. -func (r *Resource) Equal(eq *Resource) bool { - if r == nil { - r = Empty() - } - if eq == nil { - eq = Empty() - } - return r.Equivalent() == eq.Equivalent() -} - -// Merge creates a new resource by combining resource a and b. -// -// If there are common keys between resource a and b, then the value -// from resource b will overwrite the value from resource a, even -// if resource b's value is empty. -// -// The SchemaURL of the resources will be merged according to the spec rules: -// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/resource/sdk.md#merge -// If the resources have different non-empty schemaURL an empty resource and an error -// will be returned. -func Merge(a, b *Resource) (*Resource, error) { - if a == nil && b == nil { - return Empty(), nil - } - if a == nil { - return b, nil - } - if b == nil { - return a, nil - } - - // Merge the schema URL. - var schemaURL string - switch true { - case a.schemaURL == "": - schemaURL = b.schemaURL - case b.schemaURL == "": - schemaURL = a.schemaURL - case a.schemaURL == b.schemaURL: - schemaURL = a.schemaURL - default: - return Empty(), errMergeConflictSchemaURL - } - - // Note: 'b' attributes will overwrite 'a' with last-value-wins in attribute.Key() - // Meaning this is equivalent to: append(a.Attributes(), b.Attributes()...) - mi := attribute.NewMergeIterator(b.Set(), a.Set()) - combine := make([]attribute.KeyValue, 0, a.Len()+b.Len()) - for mi.Next() { - combine = append(combine, mi.Attribute()) - } - merged := NewWithAttributes(schemaURL, combine...) - return merged, nil -} - -// Empty returns an instance of Resource with no attributes. It is -// equivalent to a `nil` Resource. -func Empty() *Resource { - return &emptyResource -} - -// Default returns an instance of Resource with a default -// "service.name" and OpenTelemetrySDK attributes. -func Default() *Resource { - defaultResourceOnce.Do(func() { - var err error - defaultResource, err = Detect( - context.Background(), - defaultServiceNameDetector{}, - fromEnv{}, - telemetrySDK{}, - ) - if err != nil { - otel.Handle(err) - } - // If Detect did not return a valid resource, fall back to emptyResource. - if defaultResource == nil { - defaultResource = &emptyResource - } - }) - return defaultResource -} - -// Environment returns an instance of Resource with attributes -// extracted from the OTEL_RESOURCE_ATTRIBUTES environment variable. -func Environment() *Resource { - detector := &fromEnv{} - resource, err := detector.Detect(context.Background()) - if err != nil { - otel.Handle(err) - } - return resource -} - -// Equivalent returns an object that can be compared for equality -// between two resources. This value is suitable for use as a key in -// a map. -func (r *Resource) Equivalent() attribute.Distinct { - return r.Set().Equivalent() -} - -// Set returns the equivalent *attribute.Set of this resource's attributes. -func (r *Resource) Set() *attribute.Set { - if r == nil { - r = Empty() - } - return &r.attrs -} - -// MarshalJSON encodes the resource attributes as a JSON list of { "Key": -// "...", "Value": ... } pairs in order sorted by key. -func (r *Resource) MarshalJSON() ([]byte, error) { - if r == nil { - r = Empty() - } - return r.attrs.MarshalJSON() -} - -// Len returns the number of unique key-values in this Resource. -func (r *Resource) Len() int { - if r == nil { - return 0 - } - return r.attrs.Len() -} - -// Encoded returns an encoded representation of the resource. -func (r *Resource) Encoded(enc attribute.Encoder) string { - if r == nil { - return "" - } - return r.attrs.Encoded(enc) -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go deleted file mode 100644 index 43d5b042..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +++ /dev/null @@ -1,432 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -import ( - "context" - "runtime" - "sync" - "sync/atomic" - "time" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/internal/global" - "go.opentelemetry.io/otel/sdk/internal/env" - "go.opentelemetry.io/otel/trace" -) - -// Defaults for BatchSpanProcessorOptions. -const ( - DefaultMaxQueueSize = 2048 - DefaultScheduleDelay = 5000 - DefaultExportTimeout = 30000 - DefaultMaxExportBatchSize = 512 -) - -// BatchSpanProcessorOption configures a BatchSpanProcessor. -type BatchSpanProcessorOption func(o *BatchSpanProcessorOptions) - -// BatchSpanProcessorOptions is configuration settings for a -// BatchSpanProcessor. -type BatchSpanProcessorOptions struct { - // MaxQueueSize is the maximum queue size to buffer spans for delayed processing. If the - // queue gets full it drops the spans. Use BlockOnQueueFull to change this behavior. - // The default value of MaxQueueSize is 2048. - MaxQueueSize int - - // BatchTimeout is the maximum duration for constructing a batch. Processor - // forcefully sends available spans when timeout is reached. - // The default value of BatchTimeout is 5000 msec. - BatchTimeout time.Duration - - // ExportTimeout specifies the maximum duration for exporting spans. If the timeout - // is reached, the export will be cancelled. - // The default value of ExportTimeout is 30000 msec. - ExportTimeout time.Duration - - // MaxExportBatchSize is the maximum number of spans to process in a single batch. - // If there are more than one batch worth of spans then it processes multiple batches - // of spans one batch after the other without any delay. - // The default value of MaxExportBatchSize is 512. - MaxExportBatchSize int - - // BlockOnQueueFull blocks onEnd() and onStart() method if the queue is full - // AND if BlockOnQueueFull is set to true. - // Blocking option should be used carefully as it can severely affect the performance of an - // application. - BlockOnQueueFull bool -} - -// batchSpanProcessor is a SpanProcessor that batches asynchronously-received -// spans and sends them to a trace.Exporter when complete. -type batchSpanProcessor struct { - e SpanExporter - o BatchSpanProcessorOptions - - queue chan ReadOnlySpan - dropped uint32 - - batch []ReadOnlySpan - batchMutex sync.Mutex - timer *time.Timer - stopWait sync.WaitGroup - stopOnce sync.Once - stopCh chan struct{} -} - -var _ SpanProcessor = (*batchSpanProcessor)(nil) - -// NewBatchSpanProcessor creates a new SpanProcessor that will send completed -// span batches to the exporter with the supplied options. -// -// If the exporter is nil, the span processor will perform no action. -func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorOption) SpanProcessor { - maxQueueSize := env.BatchSpanProcessorMaxQueueSize(DefaultMaxQueueSize) - maxExportBatchSize := env.BatchSpanProcessorMaxExportBatchSize(DefaultMaxExportBatchSize) - - if maxExportBatchSize > maxQueueSize { - if DefaultMaxExportBatchSize > maxQueueSize { - maxExportBatchSize = maxQueueSize - } else { - maxExportBatchSize = DefaultMaxExportBatchSize - } - } - - o := BatchSpanProcessorOptions{ - BatchTimeout: time.Duration(env.BatchSpanProcessorScheduleDelay(DefaultScheduleDelay)) * time.Millisecond, - ExportTimeout: time.Duration(env.BatchSpanProcessorExportTimeout(DefaultExportTimeout)) * time.Millisecond, - MaxQueueSize: maxQueueSize, - MaxExportBatchSize: maxExportBatchSize, - } - for _, opt := range options { - opt(&o) - } - bsp := &batchSpanProcessor{ - e: exporter, - o: o, - batch: make([]ReadOnlySpan, 0, o.MaxExportBatchSize), - timer: time.NewTimer(o.BatchTimeout), - queue: make(chan ReadOnlySpan, o.MaxQueueSize), - stopCh: make(chan struct{}), - } - - bsp.stopWait.Add(1) - go func() { - defer bsp.stopWait.Done() - bsp.processQueue() - bsp.drainQueue() - }() - - return bsp -} - -// OnStart method does nothing. -func (bsp *batchSpanProcessor) OnStart(parent context.Context, s ReadWriteSpan) {} - -// OnEnd method enqueues a ReadOnlySpan for later processing. -func (bsp *batchSpanProcessor) OnEnd(s ReadOnlySpan) { - // Do not enqueue spans if we are just going to drop them. - if bsp.e == nil { - return - } - bsp.enqueue(s) -} - -// Shutdown flushes the queue and waits until all spans are processed. -// It only executes once. Subsequent call does nothing. -func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error { - var err error - bsp.stopOnce.Do(func() { - wait := make(chan struct{}) - go func() { - close(bsp.stopCh) - bsp.stopWait.Wait() - if bsp.e != nil { - if err := bsp.e.Shutdown(ctx); err != nil { - otel.Handle(err) - } - } - close(wait) - }() - // Wait until the wait group is done or the context is cancelled - select { - case <-wait: - case <-ctx.Done(): - err = ctx.Err() - } - }) - return err -} - -type forceFlushSpan struct { - ReadOnlySpan - flushed chan struct{} -} - -func (f forceFlushSpan) SpanContext() trace.SpanContext { - return trace.NewSpanContext(trace.SpanContextConfig{TraceFlags: trace.FlagsSampled}) -} - -// ForceFlush exports all ended spans that have not yet been exported. -func (bsp *batchSpanProcessor) ForceFlush(ctx context.Context) error { - var err error - if bsp.e != nil { - flushCh := make(chan struct{}) - if bsp.enqueueBlockOnQueueFull(ctx, forceFlushSpan{flushed: flushCh}) { - select { - case <-flushCh: - // Processed any items in queue prior to ForceFlush being called - case <-ctx.Done(): - return ctx.Err() - } - } - - wait := make(chan error) - go func() { - wait <- bsp.exportSpans(ctx) - close(wait) - }() - // Wait until the export is finished or the context is cancelled/timed out - select { - case err = <-wait: - case <-ctx.Done(): - err = ctx.Err() - } - } - return err -} - -// WithMaxQueueSize returns a BatchSpanProcessorOption that configures the -// maximum queue size allowed for a BatchSpanProcessor. -func WithMaxQueueSize(size int) BatchSpanProcessorOption { - return func(o *BatchSpanProcessorOptions) { - o.MaxQueueSize = size - } -} - -// WithMaxExportBatchSize returns a BatchSpanProcessorOption that configures -// the maximum export batch size allowed for a BatchSpanProcessor. -func WithMaxExportBatchSize(size int) BatchSpanProcessorOption { - return func(o *BatchSpanProcessorOptions) { - o.MaxExportBatchSize = size - } -} - -// WithBatchTimeout returns a BatchSpanProcessorOption that configures the -// maximum delay allowed for a BatchSpanProcessor before it will export any -// held span (whether the queue is full or not). -func WithBatchTimeout(delay time.Duration) BatchSpanProcessorOption { - return func(o *BatchSpanProcessorOptions) { - o.BatchTimeout = delay - } -} - -// WithExportTimeout returns a BatchSpanProcessorOption that configures the -// amount of time a BatchSpanProcessor waits for an exporter to export before -// abandoning the export. -func WithExportTimeout(timeout time.Duration) BatchSpanProcessorOption { - return func(o *BatchSpanProcessorOptions) { - o.ExportTimeout = timeout - } -} - -// WithBlocking returns a BatchSpanProcessorOption that configures a -// BatchSpanProcessor to wait for enqueue operations to succeed instead of -// dropping data when the queue is full. -func WithBlocking() BatchSpanProcessorOption { - return func(o *BatchSpanProcessorOptions) { - o.BlockOnQueueFull = true - } -} - -// exportSpans is a subroutine of processing and draining the queue. -func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { - bsp.timer.Reset(bsp.o.BatchTimeout) - - bsp.batchMutex.Lock() - defer bsp.batchMutex.Unlock() - - if bsp.o.ExportTimeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, bsp.o.ExportTimeout) - defer cancel() - } - - if l := len(bsp.batch); l > 0 { - global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped)) - err := bsp.e.ExportSpans(ctx, bsp.batch) - - // A new batch is always created after exporting, even if the batch failed to be exported. - // - // It is up to the exporter to implement any type of retry logic if a batch is failing - // to be exported, since it is specific to the protocol and backend being sent to. - bsp.batch = bsp.batch[:0] - - if err != nil { - return err - } - } - return nil -} - -// processQueue removes spans from the `queue` channel until processor -// is shut down. It calls the exporter in batches of up to MaxExportBatchSize -// waiting up to BatchTimeout to form a batch. -func (bsp *batchSpanProcessor) processQueue() { - defer bsp.timer.Stop() - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - for { - select { - case <-bsp.stopCh: - return - case <-bsp.timer.C: - if err := bsp.exportSpans(ctx); err != nil { - otel.Handle(err) - } - case sd := <-bsp.queue: - if ffs, ok := sd.(forceFlushSpan); ok { - close(ffs.flushed) - continue - } - bsp.batchMutex.Lock() - bsp.batch = append(bsp.batch, sd) - shouldExport := len(bsp.batch) >= bsp.o.MaxExportBatchSize - bsp.batchMutex.Unlock() - if shouldExport { - if !bsp.timer.Stop() { - <-bsp.timer.C - } - if err := bsp.exportSpans(ctx); err != nil { - otel.Handle(err) - } - } - } - } -} - -// drainQueue awaits the any caller that had added to bsp.stopWait -// to finish the enqueue, then exports the final batch. -func (bsp *batchSpanProcessor) drainQueue() { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - for { - select { - case sd := <-bsp.queue: - if sd == nil { - if err := bsp.exportSpans(ctx); err != nil { - otel.Handle(err) - } - return - } - - bsp.batchMutex.Lock() - bsp.batch = append(bsp.batch, sd) - shouldExport := len(bsp.batch) == bsp.o.MaxExportBatchSize - bsp.batchMutex.Unlock() - - if shouldExport { - if err := bsp.exportSpans(ctx); err != nil { - otel.Handle(err) - } - } - default: - close(bsp.queue) - } - } -} - -func (bsp *batchSpanProcessor) enqueue(sd ReadOnlySpan) { - ctx := context.TODO() - if bsp.o.BlockOnQueueFull { - bsp.enqueueBlockOnQueueFull(ctx, sd) - } else { - bsp.enqueueDrop(ctx, sd) - } -} - -func recoverSendOnClosedChan() { - x := recover() - switch err := x.(type) { - case nil: - return - case runtime.Error: - if err.Error() == "send on closed channel" { - return - } - } - panic(x) -} - -func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd ReadOnlySpan) bool { - if !sd.SpanContext().IsSampled() { - return false - } - - // This ensures the bsp.queue<- below does not panic as the - // processor shuts down. - defer recoverSendOnClosedChan() - - select { - case <-bsp.stopCh: - return false - default: - } - - select { - case bsp.queue <- sd: - return true - case <-ctx.Done(): - return false - } -} - -func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan) bool { - if !sd.SpanContext().IsSampled() { - return false - } - - // This ensures the bsp.queue<- below does not panic as the - // processor shuts down. - defer recoverSendOnClosedChan() - - select { - case <-bsp.stopCh: - return false - default: - } - - select { - case bsp.queue <- sd: - return true - default: - atomic.AddUint32(&bsp.dropped, 1) - } - return false -} - -// MarshalLog is the marshaling function used by the logging system to represent this exporter. -func (bsp *batchSpanProcessor) MarshalLog() interface{} { - return struct { - Type string - SpanExporter SpanExporter - Config BatchSpanProcessorOptions - }{ - Type: "BatchSpanProcessor", - SpanExporter: bsp.e, - Config: bsp.o, - } -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go deleted file mode 100644 index 0285e99b..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package trace contains support for OpenTelemetry distributed tracing. - -The following assumes a basic familiarity with OpenTelemetry concepts. -See https://opentelemetry.io. -*/ -package trace // import "go.opentelemetry.io/otel/sdk/trace" diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/event.go b/vendor/go.opentelemetry.io/otel/sdk/trace/event.go deleted file mode 100644 index 1e3b4267..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/event.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -import ( - "time" - - "go.opentelemetry.io/otel/attribute" -) - -// Event is a thing that happened during a Span's lifetime. -type Event struct { - // Name is the name of this event - Name string - - // Attributes describe the aspects of the event. - Attributes []attribute.KeyValue - - // DroppedAttributeCount is the number of attributes that were not - // recorded due to configured limits being reached. - DroppedAttributeCount int - - // Time at which this event was recorded. - Time time.Time -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go deleted file mode 100644 index d1c86e59..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -// evictedQueue is a FIFO queue with a configurable capacity. -type evictedQueue struct { - queue []interface{} - capacity int - droppedCount int -} - -func newEvictedQueue(capacity int) evictedQueue { - // Do not pre-allocate queue, do this lazily. - return evictedQueue{capacity: capacity} -} - -// add adds value to the evictedQueue eq. If eq is at capacity, the oldest -// queued value will be discarded and the drop count incremented. -func (eq *evictedQueue) add(value interface{}) { - if eq.capacity == 0 { - eq.droppedCount++ - return - } - - if eq.capacity > 0 && len(eq.queue) == eq.capacity { - // Drop first-in while avoiding allocating more capacity to eq.queue. - copy(eq.queue[:eq.capacity-1], eq.queue[1:]) - eq.queue = eq.queue[:eq.capacity-1] - eq.droppedCount++ - } - eq.queue = append(eq.queue, value) -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go deleted file mode 100644 index bba24604..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -import ( - "context" - crand "crypto/rand" - "encoding/binary" - "math/rand" - "sync" - - "go.opentelemetry.io/otel/trace" -) - -// IDGenerator allows custom generators for TraceID and SpanID. -type IDGenerator interface { - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // NewIDs returns a new trace and span ID. - NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // NewSpanID returns a ID for a new span in the trace with traceID. - NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. -} - -type randomIDGenerator struct { - sync.Mutex - randSource *rand.Rand -} - -var _ IDGenerator = &randomIDGenerator{} - -// NewSpanID returns a non-zero span ID from a randomly-chosen sequence. -func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID { - gen.Lock() - defer gen.Unlock() - sid := trace.SpanID{} - _, _ = gen.randSource.Read(sid[:]) - return sid -} - -// NewIDs returns a non-zero trace ID and a non-zero span ID from a -// randomly-chosen sequence. -func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) { - gen.Lock() - defer gen.Unlock() - tid := trace.TraceID{} - _, _ = gen.randSource.Read(tid[:]) - sid := trace.SpanID{} - _, _ = gen.randSource.Read(sid[:]) - return tid, sid -} - -func defaultIDGenerator() IDGenerator { - gen := &randomIDGenerator{} - var rngSeed int64 - _ = binary.Read(crand.Reader, binary.LittleEndian, &rngSeed) - gen.randSource = rand.New(rand.NewSource(rngSeed)) - return gen -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/link.go b/vendor/go.opentelemetry.io/otel/sdk/trace/link.go deleted file mode 100644 index 19cfea4b..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/link.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -import ( - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -// Link is the relationship between two Spans. The relationship can be within -// the same Trace or across different Traces. -type Link struct { - // SpanContext of the linked Span. - SpanContext trace.SpanContext - - // Attributes describe the aspects of the link. - Attributes []attribute.KeyValue - - // DroppedAttributeCount is the number of attributes that were not - // recorded due to configured limits being reached. - DroppedAttributeCount int -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go deleted file mode 100644 index 0a018c14..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +++ /dev/null @@ -1,500 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -import ( - "context" - "fmt" - "sync" - "sync/atomic" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/internal/global" - "go.opentelemetry.io/otel/sdk/instrumentation" - "go.opentelemetry.io/otel/sdk/resource" - "go.opentelemetry.io/otel/trace" -) - -const ( - defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer" -) - -// tracerProviderConfig. -type tracerProviderConfig struct { - // processors contains collection of SpanProcessors that are processing pipeline - // for spans in the trace signal. - // SpanProcessors registered with a TracerProvider and are called at the start - // and end of a Span's lifecycle, and are called in the order they are - // registered. - processors []SpanProcessor - - // sampler is the default sampler used when creating new spans. - sampler Sampler - - // idGenerator is used to generate all Span and Trace IDs when needed. - idGenerator IDGenerator - - // spanLimits defines the attribute, event, and link limits for spans. - spanLimits SpanLimits - - // resource contains attributes representing an entity that produces telemetry. - resource *resource.Resource -} - -// MarshalLog is the marshaling function used by the logging system to represent this exporter. -func (cfg tracerProviderConfig) MarshalLog() interface{} { - return struct { - SpanProcessors []SpanProcessor - SamplerType string - IDGeneratorType string - SpanLimits SpanLimits - Resource *resource.Resource - }{ - SpanProcessors: cfg.processors, - SamplerType: fmt.Sprintf("%T", cfg.sampler), - IDGeneratorType: fmt.Sprintf("%T", cfg.idGenerator), - SpanLimits: cfg.spanLimits, - Resource: cfg.resource, - } -} - -// TracerProvider is an OpenTelemetry TracerProvider. It provides Tracers to -// instrumentation so it can trace operational flow through a system. -type TracerProvider struct { - mu sync.Mutex - namedTracer map[instrumentation.Scope]*tracer - spanProcessors atomic.Pointer[spanProcessorStates] - - isShutdown atomic.Bool - - // These fields are not protected by the lock mu. They are assumed to be - // immutable after creation of the TracerProvider. - sampler Sampler - idGenerator IDGenerator - spanLimits SpanLimits - resource *resource.Resource -} - -var _ trace.TracerProvider = &TracerProvider{} - -// NewTracerProvider returns a new and configured TracerProvider. -// -// By default the returned TracerProvider is configured with: -// - a ParentBased(AlwaysSample) Sampler -// - a random number IDGenerator -// - the resource.Default() Resource -// - the default SpanLimits. -// -// The passed opts are used to override these default values and configure the -// returned TracerProvider appropriately. -func NewTracerProvider(opts ...TracerProviderOption) *TracerProvider { - o := tracerProviderConfig{ - spanLimits: NewSpanLimits(), - } - o = applyTracerProviderEnvConfigs(o) - - for _, opt := range opts { - o = opt.apply(o) - } - - o = ensureValidTracerProviderConfig(o) - - tp := &TracerProvider{ - namedTracer: make(map[instrumentation.Scope]*tracer), - sampler: o.sampler, - idGenerator: o.idGenerator, - spanLimits: o.spanLimits, - resource: o.resource, - } - global.Info("TracerProvider created", "config", o) - - spss := make(spanProcessorStates, 0, len(o.processors)) - for _, sp := range o.processors { - spss = append(spss, newSpanProcessorState(sp)) - } - tp.spanProcessors.Store(&spss) - - return tp -} - -// Tracer returns a Tracer with the given name and options. If a Tracer for -// the given name and options does not exist it is created, otherwise the -// existing Tracer is returned. -// -// If name is empty, DefaultTracerName is used instead. -// -// This method is safe to be called concurrently. -func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { - // This check happens before the mutex is acquired to avoid deadlocking if Tracer() is called from within Shutdown(). - if p.isShutdown.Load() { - return trace.NewNoopTracerProvider().Tracer(name, opts...) - } - c := trace.NewTracerConfig(opts...) - if name == "" { - name = defaultTracerName - } - is := instrumentation.Scope{ - Name: name, - Version: c.InstrumentationVersion(), - SchemaURL: c.SchemaURL(), - } - - t, ok := func() (trace.Tracer, bool) { - p.mu.Lock() - defer p.mu.Unlock() - // Must check the flag after acquiring the mutex to avoid returning a valid tracer if Shutdown() ran - // after the first check above but before we acquired the mutex. - if p.isShutdown.Load() { - return trace.NewNoopTracerProvider().Tracer(name, opts...), true - } - t, ok := p.namedTracer[is] - if !ok { - t = &tracer{ - provider: p, - instrumentationScope: is, - } - p.namedTracer[is] = t - } - return t, ok - }() - if !ok { - // This code is outside the mutex to not hold the lock while calling third party logging code: - // - That code may do slow things like I/O, which would prolong the duration the lock is held, - // slowing down all tracing consumers. - // - Logging code may be instrumented with tracing and deadlock because it could try - // acquiring the same non-reentrant mutex. - global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL) - } - return t -} - -// RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors. -func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) { - // This check prevents calls during a shutdown. - if p.isShutdown.Load() { - return - } - p.mu.Lock() - defer p.mu.Unlock() - // This check prevents calls after a shutdown. - if p.isShutdown.Load() { - return - } - - current := p.getSpanProcessors() - newSPS := make(spanProcessorStates, 0, len(current)+1) - newSPS = append(newSPS, current...) - newSPS = append(newSPS, newSpanProcessorState(sp)) - p.spanProcessors.Store(&newSPS) -} - -// UnregisterSpanProcessor removes the given SpanProcessor from the list of SpanProcessors. -func (p *TracerProvider) UnregisterSpanProcessor(sp SpanProcessor) { - // This check prevents calls during a shutdown. - if p.isShutdown.Load() { - return - } - p.mu.Lock() - defer p.mu.Unlock() - // This check prevents calls after a shutdown. - if p.isShutdown.Load() { - return - } - old := p.getSpanProcessors() - if len(old) == 0 { - return - } - spss := make(spanProcessorStates, len(old)) - copy(spss, old) - - // stop the span processor if it is started and remove it from the list - var stopOnce *spanProcessorState - var idx int - for i, sps := range spss { - if sps.sp == sp { - stopOnce = sps - idx = i - } - } - if stopOnce != nil { - stopOnce.state.Do(func() { - if err := sp.Shutdown(context.Background()); err != nil { - otel.Handle(err) - } - }) - } - if len(spss) > 1 { - copy(spss[idx:], spss[idx+1:]) - } - spss[len(spss)-1] = nil - spss = spss[:len(spss)-1] - - p.spanProcessors.Store(&spss) -} - -// ForceFlush immediately exports all spans that have not yet been exported for -// all the registered span processors. -func (p *TracerProvider) ForceFlush(ctx context.Context) error { - spss := p.getSpanProcessors() - if len(spss) == 0 { - return nil - } - - for _, sps := range spss { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - if err := sps.sp.ForceFlush(ctx); err != nil { - return err - } - } - return nil -} - -// Shutdown shuts down TracerProvider. All registered span processors are shut down -// in the order they were registered and any held computational resources are released. -// After Shutdown is called, all methods are no-ops. -func (p *TracerProvider) Shutdown(ctx context.Context) error { - // This check prevents deadlocks in case of recursive shutdown. - if p.isShutdown.Load() { - return nil - } - p.mu.Lock() - defer p.mu.Unlock() - // This check prevents calls after a shutdown has already been done concurrently. - if !p.isShutdown.CompareAndSwap(false, true) { // did toggle? - return nil - } - - var retErr error - for _, sps := range p.getSpanProcessors() { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - var err error - sps.state.Do(func() { - err = sps.sp.Shutdown(ctx) - }) - if err != nil { - if retErr == nil { - retErr = err - } else { - // Poor man's list of errors - retErr = fmt.Errorf("%v; %v", retErr, err) - } - } - } - p.spanProcessors.Store(&spanProcessorStates{}) - return retErr -} - -func (p *TracerProvider) getSpanProcessors() spanProcessorStates { - return *(p.spanProcessors.Load()) -} - -// TracerProviderOption configures a TracerProvider. -type TracerProviderOption interface { - apply(tracerProviderConfig) tracerProviderConfig -} - -type traceProviderOptionFunc func(tracerProviderConfig) tracerProviderConfig - -func (fn traceProviderOptionFunc) apply(cfg tracerProviderConfig) tracerProviderConfig { - return fn(cfg) -} - -// WithSyncer registers the exporter with the TracerProvider using a -// SimpleSpanProcessor. -// -// This is not recommended for production use. The synchronous nature of the -// SimpleSpanProcessor that will wrap the exporter make it good for testing, -// debugging, or showing examples of other feature, but it will be slow and -// have a high computation resource usage overhead. The WithBatcher option is -// recommended for production use instead. -func WithSyncer(e SpanExporter) TracerProviderOption { - return WithSpanProcessor(NewSimpleSpanProcessor(e)) -} - -// WithBatcher registers the exporter with the TracerProvider using a -// BatchSpanProcessor configured with the passed opts. -func WithBatcher(e SpanExporter, opts ...BatchSpanProcessorOption) TracerProviderOption { - return WithSpanProcessor(NewBatchSpanProcessor(e, opts...)) -} - -// WithSpanProcessor registers the SpanProcessor with a TracerProvider. -func WithSpanProcessor(sp SpanProcessor) TracerProviderOption { - return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { - cfg.processors = append(cfg.processors, sp) - return cfg - }) -} - -// WithResource returns a TracerProviderOption that will configure the -// Resource r as a TracerProvider's Resource. The configured Resource is -// referenced by all the Tracers the TracerProvider creates. It represents the -// entity producing telemetry. -// -// If this option is not used, the TracerProvider will use the -// resource.Default() Resource by default. -func WithResource(r *resource.Resource) TracerProviderOption { - return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { - var err error - cfg.resource, err = resource.Merge(resource.Environment(), r) - if err != nil { - otel.Handle(err) - } - return cfg - }) -} - -// WithIDGenerator returns a TracerProviderOption that will configure the -// IDGenerator g as a TracerProvider's IDGenerator. The configured IDGenerator -// is used by the Tracers the TracerProvider creates to generate new Span and -// Trace IDs. -// -// If this option is not used, the TracerProvider will use a random number -// IDGenerator by default. -func WithIDGenerator(g IDGenerator) TracerProviderOption { - return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { - if g != nil { - cfg.idGenerator = g - } - return cfg - }) -} - -// WithSampler returns a TracerProviderOption that will configure the Sampler -// s as a TracerProvider's Sampler. The configured Sampler is used by the -// Tracers the TracerProvider creates to make their sampling decisions for the -// Spans they create. -// -// This option overrides the Sampler configured through the OTEL_TRACES_SAMPLER -// and OTEL_TRACES_SAMPLER_ARG environment variables. If this option is not used -// and the sampler is not configured through environment variables or the environment -// contains invalid/unsupported configuration, the TracerProvider will use a -// ParentBased(AlwaysSample) Sampler by default. -func WithSampler(s Sampler) TracerProviderOption { - return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { - if s != nil { - cfg.sampler = s - } - return cfg - }) -} - -// WithSpanLimits returns a TracerProviderOption that configures a -// TracerProvider to use the SpanLimits sl. These SpanLimits bound any Span -// created by a Tracer from the TracerProvider. -// -// If any field of sl is zero or negative it will be replaced with the default -// value for that field. -// -// If this or WithRawSpanLimits are not provided, the TracerProvider will use -// the limits defined by environment variables, or the defaults if unset. -// Refer to the NewSpanLimits documentation for information about this -// relationship. -// -// Deprecated: Use WithRawSpanLimits instead which allows setting unlimited -// and zero limits. This option will be kept until the next major version -// incremented release. -func WithSpanLimits(sl SpanLimits) TracerProviderOption { - if sl.AttributeValueLengthLimit <= 0 { - sl.AttributeValueLengthLimit = DefaultAttributeValueLengthLimit - } - if sl.AttributeCountLimit <= 0 { - sl.AttributeCountLimit = DefaultAttributeCountLimit - } - if sl.EventCountLimit <= 0 { - sl.EventCountLimit = DefaultEventCountLimit - } - if sl.AttributePerEventCountLimit <= 0 { - sl.AttributePerEventCountLimit = DefaultAttributePerEventCountLimit - } - if sl.LinkCountLimit <= 0 { - sl.LinkCountLimit = DefaultLinkCountLimit - } - if sl.AttributePerLinkCountLimit <= 0 { - sl.AttributePerLinkCountLimit = DefaultAttributePerLinkCountLimit - } - return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { - cfg.spanLimits = sl - return cfg - }) -} - -// WithRawSpanLimits returns a TracerProviderOption that configures a -// TracerProvider to use these limits. These limits bound any Span created by -// a Tracer from the TracerProvider. -// -// The limits will be used as-is. Zero or negative values will not be changed -// to the default value like WithSpanLimits does. Setting a limit to zero will -// effectively disable the related resource it limits and setting to a -// negative value will mean that resource is unlimited. Consequentially, this -// means that the zero-value SpanLimits will disable all span resources. -// Because of this, limits should be constructed using NewSpanLimits and -// updated accordingly. -// -// If this or WithSpanLimits are not provided, the TracerProvider will use the -// limits defined by environment variables, or the defaults if unset. Refer to -// the NewSpanLimits documentation for information about this relationship. -func WithRawSpanLimits(limits SpanLimits) TracerProviderOption { - return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { - cfg.spanLimits = limits - return cfg - }) -} - -func applyTracerProviderEnvConfigs(cfg tracerProviderConfig) tracerProviderConfig { - for _, opt := range tracerProviderOptionsFromEnv() { - cfg = opt.apply(cfg) - } - - return cfg -} - -func tracerProviderOptionsFromEnv() []TracerProviderOption { - var opts []TracerProviderOption - - sampler, err := samplerFromEnv() - if err != nil { - otel.Handle(err) - } - - if sampler != nil { - opts = append(opts, WithSampler(sampler)) - } - - return opts -} - -// ensureValidTracerProviderConfig ensures that given TracerProviderConfig is valid. -func ensureValidTracerProviderConfig(cfg tracerProviderConfig) tracerProviderConfig { - if cfg.sampler == nil { - cfg.sampler = ParentBased(AlwaysSample()) - } - if cfg.idGenerator == nil { - cfg.idGenerator = defaultIDGenerator() - } - if cfg.resource == nil { - cfg.resource = resource.Default() - } - return cfg -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go deleted file mode 100644 index 02053b31..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -import ( - "errors" - "fmt" - "os" - "strconv" - "strings" -) - -const ( - tracesSamplerKey = "OTEL_TRACES_SAMPLER" - tracesSamplerArgKey = "OTEL_TRACES_SAMPLER_ARG" - - samplerAlwaysOn = "always_on" - samplerAlwaysOff = "always_off" - samplerTraceIDRatio = "traceidratio" - samplerParentBasedAlwaysOn = "parentbased_always_on" - samplerParsedBasedAlwaysOff = "parentbased_always_off" - samplerParentBasedTraceIDRatio = "parentbased_traceidratio" -) - -type errUnsupportedSampler string - -func (e errUnsupportedSampler) Error() string { - return fmt.Sprintf("unsupported sampler: %s", string(e)) -} - -var ( - errNegativeTraceIDRatio = errors.New("invalid trace ID ratio: less than 0.0") - errGreaterThanOneTraceIDRatio = errors.New("invalid trace ID ratio: greater than 1.0") -) - -type samplerArgParseError struct { - parseErr error -} - -func (e samplerArgParseError) Error() string { - return fmt.Sprintf("parsing sampler argument: %s", e.parseErr.Error()) -} - -func (e samplerArgParseError) Unwrap() error { - return e.parseErr -} - -func samplerFromEnv() (Sampler, error) { - sampler, ok := os.LookupEnv(tracesSamplerKey) - if !ok { - return nil, nil - } - - sampler = strings.ToLower(strings.TrimSpace(sampler)) - samplerArg, hasSamplerArg := os.LookupEnv(tracesSamplerArgKey) - samplerArg = strings.TrimSpace(samplerArg) - - switch sampler { - case samplerAlwaysOn: - return AlwaysSample(), nil - case samplerAlwaysOff: - return NeverSample(), nil - case samplerTraceIDRatio: - if !hasSamplerArg { - return TraceIDRatioBased(1.0), nil - } - return parseTraceIDRatio(samplerArg) - case samplerParentBasedAlwaysOn: - return ParentBased(AlwaysSample()), nil - case samplerParsedBasedAlwaysOff: - return ParentBased(NeverSample()), nil - case samplerParentBasedTraceIDRatio: - if !hasSamplerArg { - return ParentBased(TraceIDRatioBased(1.0)), nil - } - ratio, err := parseTraceIDRatio(samplerArg) - return ParentBased(ratio), err - default: - return nil, errUnsupportedSampler(sampler) - } -} - -func parseTraceIDRatio(arg string) (Sampler, error) { - v, err := strconv.ParseFloat(arg, 64) - if err != nil { - return TraceIDRatioBased(1.0), samplerArgParseError{err} - } - if v < 0.0 { - return TraceIDRatioBased(1.0), errNegativeTraceIDRatio - } - if v > 1.0 { - return TraceIDRatioBased(1.0), errGreaterThanOneTraceIDRatio - } - - return TraceIDRatioBased(v), nil -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go deleted file mode 100644 index 5ee9715d..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -import ( - "context" - "encoding/binary" - "fmt" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -// Sampler decides whether a trace should be sampled and exported. -type Sampler interface { - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // ShouldSample returns a SamplingResult based on a decision made from the - // passed parameters. - ShouldSample(parameters SamplingParameters) SamplingResult - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // Description returns information describing the Sampler. - Description() string - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. -} - -// SamplingParameters contains the values passed to a Sampler. -type SamplingParameters struct { - ParentContext context.Context - TraceID trace.TraceID - Name string - Kind trace.SpanKind - Attributes []attribute.KeyValue - Links []trace.Link -} - -// SamplingDecision indicates whether a span is dropped, recorded and/or sampled. -type SamplingDecision uint8 - -// Valid sampling decisions. -const ( - // Drop will not record the span and all attributes/events will be dropped. - Drop SamplingDecision = iota - - // Record indicates the span's `IsRecording() == true`, but `Sampled` flag - // *must not* be set. - RecordOnly - - // RecordAndSample has span's `IsRecording() == true` and `Sampled` flag - // *must* be set. - RecordAndSample -) - -// SamplingResult conveys a SamplingDecision, set of Attributes and a Tracestate. -type SamplingResult struct { - Decision SamplingDecision - Attributes []attribute.KeyValue - Tracestate trace.TraceState -} - -type traceIDRatioSampler struct { - traceIDUpperBound uint64 - description string -} - -func (ts traceIDRatioSampler) ShouldSample(p SamplingParameters) SamplingResult { - psc := trace.SpanContextFromContext(p.ParentContext) - x := binary.BigEndian.Uint64(p.TraceID[8:16]) >> 1 - if x < ts.traceIDUpperBound { - return SamplingResult{ - Decision: RecordAndSample, - Tracestate: psc.TraceState(), - } - } - return SamplingResult{ - Decision: Drop, - Tracestate: psc.TraceState(), - } -} - -func (ts traceIDRatioSampler) Description() string { - return ts.description -} - -// TraceIDRatioBased samples a given fraction of traces. Fractions >= 1 will -// always sample. Fractions < 0 are treated as zero. To respect the -// parent trace's `SampledFlag`, the `TraceIDRatioBased` sampler should be used -// as a delegate of a `Parent` sampler. -// -//nolint:revive // revive complains about stutter of `trace.TraceIDRatioBased` -func TraceIDRatioBased(fraction float64) Sampler { - if fraction >= 1 { - return AlwaysSample() - } - - if fraction <= 0 { - fraction = 0 - } - - return &traceIDRatioSampler{ - traceIDUpperBound: uint64(fraction * (1 << 63)), - description: fmt.Sprintf("TraceIDRatioBased{%g}", fraction), - } -} - -type alwaysOnSampler struct{} - -func (as alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult { - return SamplingResult{ - Decision: RecordAndSample, - Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), - } -} - -func (as alwaysOnSampler) Description() string { - return "AlwaysOnSampler" -} - -// AlwaysSample returns a Sampler that samples every trace. -// Be careful about using this sampler in a production application with -// significant traffic: a new trace will be started and exported for every -// request. -func AlwaysSample() Sampler { - return alwaysOnSampler{} -} - -type alwaysOffSampler struct{} - -func (as alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult { - return SamplingResult{ - Decision: Drop, - Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), - } -} - -func (as alwaysOffSampler) Description() string { - return "AlwaysOffSampler" -} - -// NeverSample returns a Sampler that samples no traces. -func NeverSample() Sampler { - return alwaysOffSampler{} -} - -// ParentBased returns a composite sampler which behaves differently, -// based on the parent of the span. If the span has no parent, -// the root(Sampler) is used to make sampling decision. If the span has -// a parent, depending on whether the parent is remote and whether it -// is sampled, one of the following samplers will apply: -// - remoteParentSampled(Sampler) (default: AlwaysOn) -// - remoteParentNotSampled(Sampler) (default: AlwaysOff) -// - localParentSampled(Sampler) (default: AlwaysOn) -// - localParentNotSampled(Sampler) (default: AlwaysOff) -func ParentBased(root Sampler, samplers ...ParentBasedSamplerOption) Sampler { - return parentBased{ - root: root, - config: configureSamplersForParentBased(samplers), - } -} - -type parentBased struct { - root Sampler - config samplerConfig -} - -func configureSamplersForParentBased(samplers []ParentBasedSamplerOption) samplerConfig { - c := samplerConfig{ - remoteParentSampled: AlwaysSample(), - remoteParentNotSampled: NeverSample(), - localParentSampled: AlwaysSample(), - localParentNotSampled: NeverSample(), - } - - for _, so := range samplers { - c = so.apply(c) - } - - return c -} - -// samplerConfig is a group of options for parentBased sampler. -type samplerConfig struct { - remoteParentSampled, remoteParentNotSampled Sampler - localParentSampled, localParentNotSampled Sampler -} - -// ParentBasedSamplerOption configures the sampler for a particular sampling case. -type ParentBasedSamplerOption interface { - apply(samplerConfig) samplerConfig -} - -// WithRemoteParentSampled sets the sampler for the case of sampled remote parent. -func WithRemoteParentSampled(s Sampler) ParentBasedSamplerOption { - return remoteParentSampledOption{s} -} - -type remoteParentSampledOption struct { - s Sampler -} - -func (o remoteParentSampledOption) apply(config samplerConfig) samplerConfig { - config.remoteParentSampled = o.s - return config -} - -// WithRemoteParentNotSampled sets the sampler for the case of remote parent -// which is not sampled. -func WithRemoteParentNotSampled(s Sampler) ParentBasedSamplerOption { - return remoteParentNotSampledOption{s} -} - -type remoteParentNotSampledOption struct { - s Sampler -} - -func (o remoteParentNotSampledOption) apply(config samplerConfig) samplerConfig { - config.remoteParentNotSampled = o.s - return config -} - -// WithLocalParentSampled sets the sampler for the case of sampled local parent. -func WithLocalParentSampled(s Sampler) ParentBasedSamplerOption { - return localParentSampledOption{s} -} - -type localParentSampledOption struct { - s Sampler -} - -func (o localParentSampledOption) apply(config samplerConfig) samplerConfig { - config.localParentSampled = o.s - return config -} - -// WithLocalParentNotSampled sets the sampler for the case of local parent -// which is not sampled. -func WithLocalParentNotSampled(s Sampler) ParentBasedSamplerOption { - return localParentNotSampledOption{s} -} - -type localParentNotSampledOption struct { - s Sampler -} - -func (o localParentNotSampledOption) apply(config samplerConfig) samplerConfig { - config.localParentNotSampled = o.s - return config -} - -func (pb parentBased) ShouldSample(p SamplingParameters) SamplingResult { - psc := trace.SpanContextFromContext(p.ParentContext) - if psc.IsValid() { - if psc.IsRemote() { - if psc.IsSampled() { - return pb.config.remoteParentSampled.ShouldSample(p) - } - return pb.config.remoteParentNotSampled.ShouldSample(p) - } - - if psc.IsSampled() { - return pb.config.localParentSampled.ShouldSample(p) - } - return pb.config.localParentNotSampled.ShouldSample(p) - } - return pb.root.ShouldSample(p) -} - -func (pb parentBased) Description() string { - return fmt.Sprintf("ParentBased{root:%s,remoteParentSampled:%s,"+ - "remoteParentNotSampled:%s,localParentSampled:%s,localParentNotSampled:%s}", - pb.root.Description(), - pb.config.remoteParentSampled.Description(), - pb.config.remoteParentNotSampled.Description(), - pb.config.localParentSampled.Description(), - pb.config.localParentNotSampled.Description(), - ) -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go deleted file mode 100644 index f8770fff..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -import ( - "context" - "sync" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/internal/global" -) - -// simpleSpanProcessor is a SpanProcessor that synchronously sends all -// completed Spans to a trace.Exporter immediately. -type simpleSpanProcessor struct { - exporterMu sync.Mutex - exporter SpanExporter - stopOnce sync.Once -} - -var _ SpanProcessor = (*simpleSpanProcessor)(nil) - -// NewSimpleSpanProcessor returns a new SpanProcessor that will synchronously -// send completed spans to the exporter immediately. -// -// This SpanProcessor is not recommended for production use. The synchronous -// nature of this SpanProcessor make it good for testing, debugging, or -// showing examples of other feature, but it will be slow and have a high -// computation resource usage overhead. The BatchSpanProcessor is recommended -// for production use instead. -func NewSimpleSpanProcessor(exporter SpanExporter) SpanProcessor { - ssp := &simpleSpanProcessor{ - exporter: exporter, - } - global.Warn("SimpleSpanProcessor is not recommended for production use, consider using BatchSpanProcessor instead.") - - return ssp -} - -// OnStart does nothing. -func (ssp *simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} - -// OnEnd immediately exports a ReadOnlySpan. -func (ssp *simpleSpanProcessor) OnEnd(s ReadOnlySpan) { - ssp.exporterMu.Lock() - defer ssp.exporterMu.Unlock() - - if ssp.exporter != nil && s.SpanContext().TraceFlags().IsSampled() { - if err := ssp.exporter.ExportSpans(context.Background(), []ReadOnlySpan{s}); err != nil { - otel.Handle(err) - } - } -} - -// Shutdown shuts down the exporter this SimpleSpanProcessor exports to. -func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error { - var err error - ssp.stopOnce.Do(func() { - stopFunc := func(exp SpanExporter) (<-chan error, func()) { - done := make(chan error) - return done, func() { done <- exp.Shutdown(ctx) } - } - - // The exporter field of the simpleSpanProcessor needs to be zeroed to - // signal it is shut down, meaning all subsequent calls to OnEnd will - // be gracefully ignored. This needs to be done synchronously to avoid - // any race condition. - // - // A closure is used to keep reference to the exporter and then the - // field is zeroed. This ensures the simpleSpanProcessor is shut down - // before the exporter. This order is important as it avoids a - // potential deadlock. If the exporter shut down operation generates a - // span, that span would need to be exported. Meaning, OnEnd would be - // called and try acquiring the lock that is held here. - ssp.exporterMu.Lock() - done, shutdown := stopFunc(ssp.exporter) - ssp.exporter = nil - ssp.exporterMu.Unlock() - - go shutdown() - - // Wait for the exporter to shut down or the deadline to expire. - select { - case err = <-done: - case <-ctx.Done(): - // It is possible for the exporter to have immediately shut down - // and the context to be done simultaneously. In that case this - // outer select statement will randomly choose a case. This will - // result in a different returned error for similar scenarios. - // Instead, double check if the exporter shut down at the same - // time and return that error if so. This will ensure consistency - // as well as ensure the caller knows the exporter shut down - // successfully (they can already determine if the deadline is - // expired given they passed the context). - select { - case err = <-done: - default: - err = ctx.Err() - } - } - }) - return err -} - -// ForceFlush does nothing as there is no data to flush. -func (ssp *simpleSpanProcessor) ForceFlush(context.Context) error { - return nil -} - -// MarshalLog is the marshaling function used by the logging system to represent this Span Processor. -func (ssp *simpleSpanProcessor) MarshalLog() interface{} { - return struct { - Type string - Exporter SpanExporter - }{ - Type: "SimpleSpanProcessor", - Exporter: ssp.exporter, - } -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go deleted file mode 100644 index 0349b2f1..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -import ( - "time" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/instrumentation" - "go.opentelemetry.io/otel/sdk/resource" - "go.opentelemetry.io/otel/trace" -) - -// snapshot is an record of a spans state at a particular checkpointed time. -// It is used as a read-only representation of that state. -type snapshot struct { - name string - spanContext trace.SpanContext - parent trace.SpanContext - spanKind trace.SpanKind - startTime time.Time - endTime time.Time - attributes []attribute.KeyValue - events []Event - links []Link - status Status - childSpanCount int - droppedAttributeCount int - droppedEventCount int - droppedLinkCount int - resource *resource.Resource - instrumentationScope instrumentation.Scope -} - -var _ ReadOnlySpan = snapshot{} - -func (s snapshot) private() {} - -// Name returns the name of the span. -func (s snapshot) Name() string { - return s.name -} - -// SpanContext returns the unique SpanContext that identifies the span. -func (s snapshot) SpanContext() trace.SpanContext { - return s.spanContext -} - -// Parent returns the unique SpanContext that identifies the parent of the -// span if one exists. If the span has no parent the returned SpanContext -// will be invalid. -func (s snapshot) Parent() trace.SpanContext { - return s.parent -} - -// SpanKind returns the role the span plays in a Trace. -func (s snapshot) SpanKind() trace.SpanKind { - return s.spanKind -} - -// StartTime returns the time the span started recording. -func (s snapshot) StartTime() time.Time { - return s.startTime -} - -// EndTime returns the time the span stopped recording. It will be zero if -// the span has not ended. -func (s snapshot) EndTime() time.Time { - return s.endTime -} - -// Attributes returns the defining attributes of the span. -func (s snapshot) Attributes() []attribute.KeyValue { - return s.attributes -} - -// Links returns all the links the span has to other spans. -func (s snapshot) Links() []Link { - return s.links -} - -// Events returns all the events that occurred within in the spans -// lifetime. -func (s snapshot) Events() []Event { - return s.events -} - -// Status returns the spans status. -func (s snapshot) Status() Status { - return s.status -} - -// InstrumentationScope returns information about the instrumentation -// scope that created the span. -func (s snapshot) InstrumentationScope() instrumentation.Scope { - return s.instrumentationScope -} - -// InstrumentationLibrary returns information about the instrumentation -// library that created the span. -func (s snapshot) InstrumentationLibrary() instrumentation.Library { - return s.instrumentationScope -} - -// Resource returns information about the entity that produced the span. -func (s snapshot) Resource() *resource.Resource { - return s.resource -} - -// DroppedAttributes returns the number of attributes dropped by the span -// due to limits being reached. -func (s snapshot) DroppedAttributes() int { - return s.droppedAttributeCount -} - -// DroppedLinks returns the number of links dropped by the span due to limits -// being reached. -func (s snapshot) DroppedLinks() int { - return s.droppedLinkCount -} - -// DroppedEvents returns the number of events dropped by the span due to -// limits being reached. -func (s snapshot) DroppedEvents() int { - return s.droppedEventCount -} - -// ChildSpanCount returns the count of spans that consider the span a -// direct parent. -func (s snapshot) ChildSpanCount() int { - return s.childSpanCount -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go deleted file mode 100644 index 4fcca26e..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ /dev/null @@ -1,828 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -import ( - "context" - "fmt" - "reflect" - "runtime" - rt "runtime/trace" - "strings" - "sync" - "time" - "unicode/utf8" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/sdk/instrumentation" - "go.opentelemetry.io/otel/sdk/internal" - "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.17.0" - "go.opentelemetry.io/otel/trace" -) - -// ReadOnlySpan allows reading information from the data structure underlying a -// trace.Span. It is used in places where reading information from a span is -// necessary but changing the span isn't necessary or allowed. -// -// Warning: methods may be added to this interface in minor releases. -type ReadOnlySpan interface { - // Name returns the name of the span. - Name() string - // SpanContext returns the unique SpanContext that identifies the span. - SpanContext() trace.SpanContext - // Parent returns the unique SpanContext that identifies the parent of the - // span if one exists. If the span has no parent the returned SpanContext - // will be invalid. - Parent() trace.SpanContext - // SpanKind returns the role the span plays in a Trace. - SpanKind() trace.SpanKind - // StartTime returns the time the span started recording. - StartTime() time.Time - // EndTime returns the time the span stopped recording. It will be zero if - // the span has not ended. - EndTime() time.Time - // Attributes returns the defining attributes of the span. - // The order of the returned attributes is not guaranteed to be stable across invocations. - Attributes() []attribute.KeyValue - // Links returns all the links the span has to other spans. - Links() []Link - // Events returns all the events that occurred within in the spans - // lifetime. - Events() []Event - // Status returns the spans status. - Status() Status - // InstrumentationScope returns information about the instrumentation - // scope that created the span. - InstrumentationScope() instrumentation.Scope - // InstrumentationLibrary returns information about the instrumentation - // library that created the span. - // Deprecated: please use InstrumentationScope instead. - InstrumentationLibrary() instrumentation.Library - // Resource returns information about the entity that produced the span. - Resource() *resource.Resource - // DroppedAttributes returns the number of attributes dropped by the span - // due to limits being reached. - DroppedAttributes() int - // DroppedLinks returns the number of links dropped by the span due to - // limits being reached. - DroppedLinks() int - // DroppedEvents returns the number of events dropped by the span due to - // limits being reached. - DroppedEvents() int - // ChildSpanCount returns the count of spans that consider the span a - // direct parent. - ChildSpanCount() int - - // A private method to prevent users implementing the - // interface and so future additions to it will not - // violate compatibility. - private() -} - -// ReadWriteSpan exposes the same methods as trace.Span and in addition allows -// reading information from the underlying data structure. -// This interface exposes the union of the methods of trace.Span (which is a -// "write-only" span) and ReadOnlySpan. New methods for writing or reading span -// information should be added under trace.Span or ReadOnlySpan, respectively. -// -// Warning: methods may be added to this interface in minor releases. -type ReadWriteSpan interface { - trace.Span - ReadOnlySpan -} - -// recordingSpan is an implementation of the OpenTelemetry Span API -// representing the individual component of a trace that is sampled. -type recordingSpan struct { - // mu protects the contents of this span. - mu sync.Mutex - - // parent holds the parent span of this span as a trace.SpanContext. - parent trace.SpanContext - - // spanKind represents the kind of this span as a trace.SpanKind. - spanKind trace.SpanKind - - // name is the name of this span. - name string - - // startTime is the time at which this span was started. - startTime time.Time - - // endTime is the time at which this span was ended. It contains the zero - // value of time.Time until the span is ended. - endTime time.Time - - // status is the status of this span. - status Status - - // childSpanCount holds the number of child spans created for this span. - childSpanCount int - - // spanContext holds the SpanContext of this span. - spanContext trace.SpanContext - - // attributes is a collection of user provided key/values. The collection - // is constrained by a configurable maximum held by the parent - // TracerProvider. When additional attributes are added after this maximum - // is reached these attributes the user is attempting to add are dropped. - // This dropped number of attributes is tracked and reported in the - // ReadOnlySpan exported when the span ends. - attributes []attribute.KeyValue - droppedAttributes int - - // events are stored in FIFO queue capped by configured limit. - events evictedQueue - - // links are stored in FIFO queue capped by configured limit. - links evictedQueue - - // executionTracerTaskEnd ends the execution tracer span. - executionTracerTaskEnd func() - - // tracer is the SDK tracer that created this span. - tracer *tracer -} - -var _ ReadWriteSpan = (*recordingSpan)(nil) -var _ runtimeTracer = (*recordingSpan)(nil) - -// SpanContext returns the SpanContext of this span. -func (s *recordingSpan) SpanContext() trace.SpanContext { - if s == nil { - return trace.SpanContext{} - } - return s.spanContext -} - -// IsRecording returns if this span is being recorded. If this span has ended -// this will return false. -func (s *recordingSpan) IsRecording() bool { - if s == nil { - return false - } - s.mu.Lock() - defer s.mu.Unlock() - - return s.endTime.IsZero() -} - -// SetStatus sets the status of the Span in the form of a code and a -// description, overriding previous values set. The description is only -// included in the set status when the code is for an error. If this span is -// not being recorded than this method does nothing. -func (s *recordingSpan) SetStatus(code codes.Code, description string) { - if !s.IsRecording() { - return - } - s.mu.Lock() - defer s.mu.Unlock() - if s.status.Code > code { - return - } - - status := Status{Code: code} - if code == codes.Error { - status.Description = description - } - - s.status = status -} - -// SetAttributes sets attributes of this span. -// -// If a key from attributes already exists the value associated with that key -// will be overwritten with the value contained in attributes. -// -// If this span is not being recorded than this method does nothing. -// -// If adding attributes to the span would exceed the maximum amount of -// attributes the span is configured to have, the last added attributes will -// be dropped. -func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) { - if !s.IsRecording() { - return - } - - s.mu.Lock() - defer s.mu.Unlock() - - limit := s.tracer.provider.spanLimits.AttributeCountLimit - if limit == 0 { - // No attributes allowed. - s.droppedAttributes += len(attributes) - return - } - - // If adding these attributes could exceed the capacity of s perform a - // de-duplication and truncation while adding to avoid over allocation. - if limit > 0 && len(s.attributes)+len(attributes) > limit { - s.addOverCapAttrs(limit, attributes) - return - } - - // Otherwise, add without deduplication. When attributes are read they - // will be deduplicated, optimizing the operation. - for _, a := range attributes { - if !a.Valid() { - // Drop all invalid attributes. - s.droppedAttributes++ - continue - } - a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a) - s.attributes = append(s.attributes, a) - } -} - -// addOverCapAttrs adds the attributes attrs to the span s while -// de-duplicating the attributes of s and attrs and dropping attributes that -// exceed the limit. -// -// This method assumes s.mu.Lock is held by the caller. -// -// This method should only be called when there is a possibility that adding -// attrs to s will exceed the limit. Otherwise, attrs should be added to s -// without checking for duplicates and all retrieval methods of the attributes -// for s will de-duplicate as needed. -// -// This method assumes limit is a value > 0. The argument should be validated -// by the caller. -func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) { - // In order to not allocate more capacity to s.attributes than needed, - // prune and truncate this addition of attributes while adding. - - // Do not set a capacity when creating this map. Benchmark testing has - // showed this to only add unused memory allocations in general use. - exists := make(map[attribute.Key]int) - s.dedupeAttrsFromRecord(&exists) - - // Now that s.attributes is deduplicated, adding unique attributes up to - // the capacity of s will not over allocate s.attributes. - for _, a := range attrs { - if !a.Valid() { - // Drop all invalid attributes. - s.droppedAttributes++ - continue - } - - if idx, ok := exists[a.Key]; ok { - // Perform all updates before dropping, even when at capacity. - s.attributes[idx] = a - continue - } - - if len(s.attributes) >= limit { - // Do not just drop all of the remaining attributes, make sure - // updates are checked and performed. - s.droppedAttributes++ - } else { - a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a) - s.attributes = append(s.attributes, a) - exists[a.Key] = len(s.attributes) - 1 - } - } -} - -// truncateAttr returns a truncated version of attr. Only string and string -// slice attribute values are truncated. String values are truncated to at -// most a length of limit. Each string slice value is truncated in this fashion -// (the slice length itself is unaffected). -// -// No truncation is performed for a negative limit. -func truncateAttr(limit int, attr attribute.KeyValue) attribute.KeyValue { - if limit < 0 { - return attr - } - switch attr.Value.Type() { - case attribute.STRING: - if v := attr.Value.AsString(); len(v) > limit { - return attr.Key.String(safeTruncate(v, limit)) - } - case attribute.STRINGSLICE: - v := attr.Value.AsStringSlice() - for i := range v { - if len(v[i]) > limit { - v[i] = safeTruncate(v[i], limit) - } - } - return attr.Key.StringSlice(v) - } - return attr -} - -// safeTruncate truncates the string and guarantees valid UTF-8 is returned. -func safeTruncate(input string, limit int) string { - if trunc, ok := safeTruncateValidUTF8(input, limit); ok { - return trunc - } - trunc, _ := safeTruncateValidUTF8(strings.ToValidUTF8(input, ""), limit) - return trunc -} - -// safeTruncateValidUTF8 returns a copy of the input string safely truncated to -// limit. The truncation is ensured to occur at the bounds of complete UTF-8 -// characters. If invalid encoding of UTF-8 is encountered, input is returned -// with false, otherwise, the truncated input will be returned with true. -func safeTruncateValidUTF8(input string, limit int) (string, bool) { - for cnt := 0; cnt <= limit; { - r, size := utf8.DecodeRuneInString(input[cnt:]) - if r == utf8.RuneError { - return input, false - } - - if cnt+size > limit { - return input[:cnt], true - } - cnt += size - } - return input, true -} - -// End ends the span. This method does nothing if the span is already ended or -// is not being recorded. -// -// The only SpanOption currently supported is WithTimestamp which will set the -// end time for a Span's life-cycle. -// -// If this method is called while panicking an error event is added to the -// Span before ending it and the panic is continued. -func (s *recordingSpan) End(options ...trace.SpanEndOption) { - // Do not start by checking if the span is being recorded which requires - // acquiring a lock. Make a minimal check that the span is not nil. - if s == nil { - return - } - - // Store the end time as soon as possible to avoid artificially increasing - // the span's duration in case some operation below takes a while. - et := internal.MonotonicEndTime(s.startTime) - - // Do relative expensive check now that we have an end time and see if we - // need to do any more processing. - if !s.IsRecording() { - return - } - - config := trace.NewSpanEndConfig(options...) - if recovered := recover(); recovered != nil { - // Record but don't stop the panic. - defer panic(recovered) - opts := []trace.EventOption{ - trace.WithAttributes( - semconv.ExceptionType(typeStr(recovered)), - semconv.ExceptionMessage(fmt.Sprint(recovered)), - ), - } - - if config.StackTrace() { - opts = append(opts, trace.WithAttributes( - semconv.ExceptionStacktrace(recordStackTrace()), - )) - } - - s.addEvent(semconv.ExceptionEventName, opts...) - } - - if s.executionTracerTaskEnd != nil { - s.executionTracerTaskEnd() - } - - s.mu.Lock() - // Setting endTime to non-zero marks the span as ended and not recording. - if config.Timestamp().IsZero() { - s.endTime = et - } else { - s.endTime = config.Timestamp() - } - s.mu.Unlock() - - sps := s.tracer.provider.getSpanProcessors() - if len(sps) == 0 { - return - } - snap := s.snapshot() - for _, sp := range sps { - sp.sp.OnEnd(snap) - } -} - -// RecordError will record err as a span event for this span. An additional call to -// SetStatus is required if the Status of the Span should be set to Error, this method -// does not change the Span status. If this span is not being recorded or err is nil -// than this method does nothing. -func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) { - if s == nil || err == nil || !s.IsRecording() { - return - } - - opts = append(opts, trace.WithAttributes( - semconv.ExceptionType(typeStr(err)), - semconv.ExceptionMessage(err.Error()), - )) - - c := trace.NewEventConfig(opts...) - if c.StackTrace() { - opts = append(opts, trace.WithAttributes( - semconv.ExceptionStacktrace(recordStackTrace()), - )) - } - - s.addEvent(semconv.ExceptionEventName, opts...) -} - -func typeStr(i interface{}) string { - t := reflect.TypeOf(i) - if t.PkgPath() == "" && t.Name() == "" { - // Likely a builtin type. - return t.String() - } - return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) -} - -func recordStackTrace() string { - stackTrace := make([]byte, 2048) - n := runtime.Stack(stackTrace, false) - - return string(stackTrace[0:n]) -} - -// AddEvent adds an event with the provided name and options. If this span is -// not being recorded than this method does nothing. -func (s *recordingSpan) AddEvent(name string, o ...trace.EventOption) { - if !s.IsRecording() { - return - } - s.addEvent(name, o...) -} - -func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) { - c := trace.NewEventConfig(o...) - e := Event{Name: name, Attributes: c.Attributes(), Time: c.Timestamp()} - - // Discard attributes over limit. - limit := s.tracer.provider.spanLimits.AttributePerEventCountLimit - if limit == 0 { - // Drop all attributes. - e.DroppedAttributeCount = len(e.Attributes) - e.Attributes = nil - } else if limit > 0 && len(e.Attributes) > limit { - // Drop over capacity. - e.DroppedAttributeCount = len(e.Attributes) - limit - e.Attributes = e.Attributes[:limit] - } - - s.mu.Lock() - s.events.add(e) - s.mu.Unlock() -} - -// SetName sets the name of this span. If this span is not being recorded than -// this method does nothing. -func (s *recordingSpan) SetName(name string) { - if !s.IsRecording() { - return - } - - s.mu.Lock() - defer s.mu.Unlock() - s.name = name -} - -// Name returns the name of this span. -func (s *recordingSpan) Name() string { - s.mu.Lock() - defer s.mu.Unlock() - return s.name -} - -// Name returns the SpanContext of this span's parent span. -func (s *recordingSpan) Parent() trace.SpanContext { - s.mu.Lock() - defer s.mu.Unlock() - return s.parent -} - -// SpanKind returns the SpanKind of this span. -func (s *recordingSpan) SpanKind() trace.SpanKind { - s.mu.Lock() - defer s.mu.Unlock() - return s.spanKind -} - -// StartTime returns the time this span started. -func (s *recordingSpan) StartTime() time.Time { - s.mu.Lock() - defer s.mu.Unlock() - return s.startTime -} - -// EndTime returns the time this span ended. For spans that have not yet -// ended, the returned value will be the zero value of time.Time. -func (s *recordingSpan) EndTime() time.Time { - s.mu.Lock() - defer s.mu.Unlock() - return s.endTime -} - -// Attributes returns the attributes of this span. -// -// The order of the returned attributes is not guaranteed to be stable. -func (s *recordingSpan) Attributes() []attribute.KeyValue { - s.mu.Lock() - defer s.mu.Unlock() - s.dedupeAttrs() - return s.attributes -} - -// dedupeAttrs deduplicates the attributes of s to fit capacity. -// -// This method assumes s.mu.Lock is held by the caller. -func (s *recordingSpan) dedupeAttrs() { - // Do not set a capacity when creating this map. Benchmark testing has - // showed this to only add unused memory allocations in general use. - exists := make(map[attribute.Key]int) - s.dedupeAttrsFromRecord(&exists) -} - -// dedupeAttrsFromRecord deduplicates the attributes of s to fit capacity -// using record as the record of unique attribute keys to their index. -// -// This method assumes s.mu.Lock is held by the caller. -func (s *recordingSpan) dedupeAttrsFromRecord(record *map[attribute.Key]int) { - // Use the fact that slices share the same backing array. - unique := s.attributes[:0] - for _, a := range s.attributes { - if idx, ok := (*record)[a.Key]; ok { - unique[idx] = a - } else { - unique = append(unique, a) - (*record)[a.Key] = len(unique) - 1 - } - } - // s.attributes have element types of attribute.KeyValue. These types are - // not pointers and they themselves do not contain pointer fields, - // therefore the duplicate values do not need to be zeroed for them to be - // garbage collected. - s.attributes = unique -} - -// Links returns the links of this span. -func (s *recordingSpan) Links() []Link { - s.mu.Lock() - defer s.mu.Unlock() - if len(s.links.queue) == 0 { - return []Link{} - } - return s.interfaceArrayToLinksArray() -} - -// Events returns the events of this span. -func (s *recordingSpan) Events() []Event { - s.mu.Lock() - defer s.mu.Unlock() - if len(s.events.queue) == 0 { - return []Event{} - } - return s.interfaceArrayToEventArray() -} - -// Status returns the status of this span. -func (s *recordingSpan) Status() Status { - s.mu.Lock() - defer s.mu.Unlock() - return s.status -} - -// InstrumentationScope returns the instrumentation.Scope associated with -// the Tracer that created this span. -func (s *recordingSpan) InstrumentationScope() instrumentation.Scope { - s.mu.Lock() - defer s.mu.Unlock() - return s.tracer.instrumentationScope -} - -// InstrumentationLibrary returns the instrumentation.Library associated with -// the Tracer that created this span. -func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library { - s.mu.Lock() - defer s.mu.Unlock() - return s.tracer.instrumentationScope -} - -// Resource returns the Resource associated with the Tracer that created this -// span. -func (s *recordingSpan) Resource() *resource.Resource { - s.mu.Lock() - defer s.mu.Unlock() - return s.tracer.provider.resource -} - -func (s *recordingSpan) addLink(link trace.Link) { - if !s.IsRecording() || !link.SpanContext.IsValid() { - return - } - - l := Link{SpanContext: link.SpanContext, Attributes: link.Attributes} - - // Discard attributes over limit. - limit := s.tracer.provider.spanLimits.AttributePerLinkCountLimit - if limit == 0 { - // Drop all attributes. - l.DroppedAttributeCount = len(l.Attributes) - l.Attributes = nil - } else if limit > 0 && len(l.Attributes) > limit { - l.DroppedAttributeCount = len(l.Attributes) - limit - l.Attributes = l.Attributes[:limit] - } - - s.mu.Lock() - s.links.add(l) - s.mu.Unlock() -} - -// DroppedAttributes returns the number of attributes dropped by the span -// due to limits being reached. -func (s *recordingSpan) DroppedAttributes() int { - s.mu.Lock() - defer s.mu.Unlock() - return s.droppedAttributes -} - -// DroppedLinks returns the number of links dropped by the span due to limits -// being reached. -func (s *recordingSpan) DroppedLinks() int { - s.mu.Lock() - defer s.mu.Unlock() - return s.links.droppedCount -} - -// DroppedEvents returns the number of events dropped by the span due to -// limits being reached. -func (s *recordingSpan) DroppedEvents() int { - s.mu.Lock() - defer s.mu.Unlock() - return s.events.droppedCount -} - -// ChildSpanCount returns the count of spans that consider the span a -// direct parent. -func (s *recordingSpan) ChildSpanCount() int { - s.mu.Lock() - defer s.mu.Unlock() - return s.childSpanCount -} - -// TracerProvider returns a trace.TracerProvider that can be used to generate -// additional Spans on the same telemetry pipeline as the current Span. -func (s *recordingSpan) TracerProvider() trace.TracerProvider { - return s.tracer.provider -} - -// snapshot creates a read-only copy of the current state of the span. -func (s *recordingSpan) snapshot() ReadOnlySpan { - var sd snapshot - s.mu.Lock() - defer s.mu.Unlock() - - sd.endTime = s.endTime - sd.instrumentationScope = s.tracer.instrumentationScope - sd.name = s.name - sd.parent = s.parent - sd.resource = s.tracer.provider.resource - sd.spanContext = s.spanContext - sd.spanKind = s.spanKind - sd.startTime = s.startTime - sd.status = s.status - sd.childSpanCount = s.childSpanCount - - if len(s.attributes) > 0 { - s.dedupeAttrs() - sd.attributes = s.attributes - } - sd.droppedAttributeCount = s.droppedAttributes - if len(s.events.queue) > 0 { - sd.events = s.interfaceArrayToEventArray() - sd.droppedEventCount = s.events.droppedCount - } - if len(s.links.queue) > 0 { - sd.links = s.interfaceArrayToLinksArray() - sd.droppedLinkCount = s.links.droppedCount - } - return &sd -} - -func (s *recordingSpan) interfaceArrayToLinksArray() []Link { - linkArr := make([]Link, 0) - for _, value := range s.links.queue { - linkArr = append(linkArr, value.(Link)) - } - return linkArr -} - -func (s *recordingSpan) interfaceArrayToEventArray() []Event { - eventArr := make([]Event, 0) - for _, value := range s.events.queue { - eventArr = append(eventArr, value.(Event)) - } - return eventArr -} - -func (s *recordingSpan) addChild() { - if !s.IsRecording() { - return - } - s.mu.Lock() - s.childSpanCount++ - s.mu.Unlock() -} - -func (*recordingSpan) private() {} - -// runtimeTrace starts a "runtime/trace".Task for the span and returns a -// context containing the task. -func (s *recordingSpan) runtimeTrace(ctx context.Context) context.Context { - if !rt.IsEnabled() { - // Avoid additional overhead if runtime/trace is not enabled. - return ctx - } - nctx, task := rt.NewTask(ctx, s.name) - - s.mu.Lock() - s.executionTracerTaskEnd = task.End - s.mu.Unlock() - - return nctx -} - -// nonRecordingSpan is a minimal implementation of the OpenTelemetry Span API -// that wraps a SpanContext. It performs no operations other than to return -// the wrapped SpanContext or TracerProvider that created it. -type nonRecordingSpan struct { - // tracer is the SDK tracer that created this span. - tracer *tracer - sc trace.SpanContext -} - -var _ trace.Span = nonRecordingSpan{} - -// SpanContext returns the wrapped SpanContext. -func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc } - -// IsRecording always returns false. -func (nonRecordingSpan) IsRecording() bool { return false } - -// SetStatus does nothing. -func (nonRecordingSpan) SetStatus(codes.Code, string) {} - -// SetError does nothing. -func (nonRecordingSpan) SetError(bool) {} - -// SetAttributes does nothing. -func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {} - -// End does nothing. -func (nonRecordingSpan) End(...trace.SpanEndOption) {} - -// RecordError does nothing. -func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {} - -// AddEvent does nothing. -func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {} - -// SetName does nothing. -func (nonRecordingSpan) SetName(string) {} - -// TracerProvider returns the trace.TracerProvider that provided the Tracer -// that created this span. -func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider } - -func isRecording(s SamplingResult) bool { - return s.Decision == RecordOnly || s.Decision == RecordAndSample -} - -func isSampled(s SamplingResult) bool { - return s.Decision == RecordAndSample -} - -// Status is the classified state of a Span. -type Status struct { - // Code is an identifier of a Spans state classification. - Code codes.Code - // Description is a user hint about why that status was set. It is only - // applicable when Code is Error. - Description string -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go deleted file mode 100644 index c9bd52f7..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -import "context" - -// SpanExporter handles the delivery of spans to external receivers. This is -// the final component in the trace export pipeline. -type SpanExporter interface { - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // ExportSpans exports a batch of spans. - // - // This function is called synchronously, so there is no concurrency - // safety requirement. However, due to the synchronous calling pattern, - // it is critical that all timeouts and cancellations contained in the - // passed context must be honored. - // - // Any retry logic must be contained in this function. The SDK that - // calls this function will not implement any retry logic. All errors - // returned by this function are considered unrecoverable and will be - // reported to a configured error Handler. - ExportSpans(ctx context.Context, spans []ReadOnlySpan) error - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // Shutdown notifies the exporter of a pending halt to operations. The - // exporter is expected to perform any cleanup or synchronization it - // requires while honoring all timeouts and cancellations contained in - // the passed context. - Shutdown(ctx context.Context) error - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go deleted file mode 100644 index aa4d4221..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -import "go.opentelemetry.io/otel/sdk/internal/env" - -const ( - // DefaultAttributeValueLengthLimit is the default maximum allowed - // attribute value length, unlimited. - DefaultAttributeValueLengthLimit = -1 - - // DefaultAttributeCountLimit is the default maximum number of attributes - // a span can have. - DefaultAttributeCountLimit = 128 - - // DefaultEventCountLimit is the default maximum number of events a span - // can have. - DefaultEventCountLimit = 128 - - // DefaultLinkCountLimit is the default maximum number of links a span can - // have. - DefaultLinkCountLimit = 128 - - // DefaultAttributePerEventCountLimit is the default maximum number of - // attributes a span event can have. - DefaultAttributePerEventCountLimit = 128 - - // DefaultAttributePerLinkCountLimit is the default maximum number of - // attributes a span link can have. - DefaultAttributePerLinkCountLimit = 128 -) - -// SpanLimits represents the limits of a span. -type SpanLimits struct { - // AttributeValueLengthLimit is the maximum allowed attribute value length. - // - // This limit only applies to string and string slice attribute values. - // Any string longer than this value will be truncated to this length. - // - // Setting this to a negative value means no limit is applied. - AttributeValueLengthLimit int - - // AttributeCountLimit is the maximum allowed span attribute count. Any - // attribute added to a span once this limit is reached will be dropped. - // - // Setting this to zero means no attributes will be recorded. - // - // Setting this to a negative value means no limit is applied. - AttributeCountLimit int - - // EventCountLimit is the maximum allowed span event count. Any event - // added to a span once this limit is reached means it will be added but - // the oldest event will be dropped. - // - // Setting this to zero means no events we be recorded. - // - // Setting this to a negative value means no limit is applied. - EventCountLimit int - - // LinkCountLimit is the maximum allowed span link count. Any link added - // to a span once this limit is reached means it will be added but the - // oldest link will be dropped. - // - // Setting this to zero means no links we be recorded. - // - // Setting this to a negative value means no limit is applied. - LinkCountLimit int - - // AttributePerEventCountLimit is the maximum number of attributes allowed - // per span event. Any attribute added after this limit reached will be - // dropped. - // - // Setting this to zero means no attributes will be recorded for events. - // - // Setting this to a negative value means no limit is applied. - AttributePerEventCountLimit int - - // AttributePerLinkCountLimit is the maximum number of attributes allowed - // per span link. Any attribute added after this limit reached will be - // dropped. - // - // Setting this to zero means no attributes will be recorded for links. - // - // Setting this to a negative value means no limit is applied. - AttributePerLinkCountLimit int -} - -// NewSpanLimits returns a SpanLimits with all limits set to the value their -// corresponding environment variable holds, or the default if unset. -// -// • AttributeValueLengthLimit: OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT -// (default: unlimited) -// -// • AttributeCountLimit: OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT (default: 128) -// -// • EventCountLimit: OTEL_SPAN_EVENT_COUNT_LIMIT (default: 128) -// -// • AttributePerEventCountLimit: OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT (default: -// 128) -// -// • LinkCountLimit: OTEL_SPAN_LINK_COUNT_LIMIT (default: 128) -// -// • AttributePerLinkCountLimit: OTEL_LINK_ATTRIBUTE_COUNT_LIMIT (default: 128) -func NewSpanLimits() SpanLimits { - return SpanLimits{ - AttributeValueLengthLimit: env.SpanAttributeValueLength(DefaultAttributeValueLengthLimit), - AttributeCountLimit: env.SpanAttributeCount(DefaultAttributeCountLimit), - EventCountLimit: env.SpanEventCount(DefaultEventCountLimit), - LinkCountLimit: env.SpanLinkCount(DefaultLinkCountLimit), - AttributePerEventCountLimit: env.SpanEventAttributeCount(DefaultAttributePerEventCountLimit), - AttributePerLinkCountLimit: env.SpanLinkAttributeCount(DefaultAttributePerLinkCountLimit), - } -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go deleted file mode 100644 index 9c53657a..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -import ( - "context" - "sync" -) - -// SpanProcessor is a processing pipeline for spans in the trace signal. -// SpanProcessors registered with a TracerProvider and are called at the start -// and end of a Span's lifecycle, and are called in the order they are -// registered. -type SpanProcessor interface { - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // OnStart is called when a span is started. It is called synchronously - // and should not block. - OnStart(parent context.Context, s ReadWriteSpan) - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // OnEnd is called when span is finished. It is called synchronously and - // hence not block. - OnEnd(s ReadOnlySpan) - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // Shutdown is called when the SDK shuts down. Any cleanup or release of - // resources held by the processor should be done in this call. - // - // Calls to OnStart, OnEnd, or ForceFlush after this has been called - // should be ignored. - // - // All timeouts and cancellations contained in ctx must be honored, this - // should not block indefinitely. - Shutdown(ctx context.Context) error - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // ForceFlush exports all ended spans to the configured Exporter that have not yet - // been exported. It should only be called when absolutely necessary, such as when - // using a FaaS provider that may suspend the process after an invocation, but before - // the Processor can export the completed spans. - ForceFlush(ctx context.Context) error - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. -} - -type spanProcessorState struct { - sp SpanProcessor - state sync.Once -} - -func newSpanProcessorState(sp SpanProcessor) *spanProcessorState { - return &spanProcessorState{sp: sp} -} - -type spanProcessorStates []*spanProcessorState diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go deleted file mode 100644 index 85a71227..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -import ( - "context" - "time" - - "go.opentelemetry.io/otel/sdk/instrumentation" - "go.opentelemetry.io/otel/trace" -) - -type tracer struct { - provider *TracerProvider - instrumentationScope instrumentation.Scope -} - -var _ trace.Tracer = &tracer{} - -// Start starts a Span and returns it along with a context containing it. -// -// The Span is created with the provided name and as a child of any existing -// span context found in the passed context. The created Span will be -// configured appropriately by any SpanOption passed. -func (tr *tracer) Start(ctx context.Context, name string, options ...trace.SpanStartOption) (context.Context, trace.Span) { - config := trace.NewSpanStartConfig(options...) - - if ctx == nil { - // Prevent trace.ContextWithSpan from panicking. - ctx = context.Background() - } - - // For local spans created by this SDK, track child span count. - if p := trace.SpanFromContext(ctx); p != nil { - if sdkSpan, ok := p.(*recordingSpan); ok { - sdkSpan.addChild() - } - } - - s := tr.newSpan(ctx, name, &config) - if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() { - sps := tr.provider.getSpanProcessors() - for _, sp := range sps { - sp.sp.OnStart(ctx, rw) - } - } - if rtt, ok := s.(runtimeTracer); ok { - ctx = rtt.runtimeTrace(ctx) - } - - return trace.ContextWithSpan(ctx, s), s -} - -type runtimeTracer interface { - // runtimeTrace starts a "runtime/trace".Task for the span and - // returns a context containing the task. - runtimeTrace(ctx context.Context) context.Context -} - -// newSpan returns a new configured span. -func (tr *tracer) newSpan(ctx context.Context, name string, config *trace.SpanConfig) trace.Span { - // If told explicitly to make this a new root use a zero value SpanContext - // as a parent which contains an invalid trace ID and is not remote. - var psc trace.SpanContext - if config.NewRoot() { - ctx = trace.ContextWithSpanContext(ctx, psc) - } else { - psc = trace.SpanContextFromContext(ctx) - } - - // If there is a valid parent trace ID, use it to ensure the continuity of - // the trace. Always generate a new span ID so other components can rely - // on a unique span ID, even if the Span is non-recording. - var tid trace.TraceID - var sid trace.SpanID - if !psc.TraceID().IsValid() { - tid, sid = tr.provider.idGenerator.NewIDs(ctx) - } else { - tid = psc.TraceID() - sid = tr.provider.idGenerator.NewSpanID(ctx, tid) - } - - samplingResult := tr.provider.sampler.ShouldSample(SamplingParameters{ - ParentContext: ctx, - TraceID: tid, - Name: name, - Kind: config.SpanKind(), - Attributes: config.Attributes(), - Links: config.Links(), - }) - - scc := trace.SpanContextConfig{ - TraceID: tid, - SpanID: sid, - TraceState: samplingResult.Tracestate, - } - if isSampled(samplingResult) { - scc.TraceFlags = psc.TraceFlags() | trace.FlagsSampled - } else { - scc.TraceFlags = psc.TraceFlags() &^ trace.FlagsSampled - } - sc := trace.NewSpanContext(scc) - - if !isRecording(samplingResult) { - return tr.newNonRecordingSpan(sc) - } - return tr.newRecordingSpan(psc, sc, name, samplingResult, config) -} - -// newRecordingSpan returns a new configured recordingSpan. -func (tr *tracer) newRecordingSpan(psc, sc trace.SpanContext, name string, sr SamplingResult, config *trace.SpanConfig) *recordingSpan { - startTime := config.Timestamp() - if startTime.IsZero() { - startTime = time.Now() - } - - s := &recordingSpan{ - // Do not pre-allocate the attributes slice here! Doing so will - // allocate memory that is likely never going to be used, or if used, - // will be over-sized. The default Go compiler has been tested to - // dynamically allocate needed space very well. Benchmarking has shown - // it to be more performant than what we can predetermine here, - // especially for the common use case of few to no added - // attributes. - - parent: psc, - spanContext: sc, - spanKind: trace.ValidateSpanKind(config.SpanKind()), - name: name, - startTime: startTime, - events: newEvictedQueue(tr.provider.spanLimits.EventCountLimit), - links: newEvictedQueue(tr.provider.spanLimits.LinkCountLimit), - tracer: tr, - } - - for _, l := range config.Links() { - s.addLink(l) - } - - s.SetAttributes(sr.Attributes...) - s.SetAttributes(config.Attributes()...) - - return s -} - -// newNonRecordingSpan returns a new configured nonRecordingSpan. -func (tr *tracer) newNonRecordingSpan(sc trace.SpanContext) nonRecordingSpan { - return nonRecordingSpan{tracer: tr, sc: sc} -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/version.go b/vendor/go.opentelemetry.io/otel/sdk/trace/version.go deleted file mode 100644 index d3457ed1..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/version.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -// version is the current release version of the metric SDK in use. -func version() string { - return "1.16.0-rc.1" -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go deleted file mode 100644 index dbef90b0..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sdk // import "go.opentelemetry.io/otel/sdk" - -// Version is the current release version of the OpenTelemetry SDK in use. -func Version() string { - return "1.16.0" -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go deleted file mode 100644 index 19c394c6..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go +++ /dev/null @@ -1,338 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal // import "go.opentelemetry.io/otel/semconv/internal" - -import ( - "fmt" - "net" - "net/http" - "strconv" - "strings" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace" -) - -// SemanticConventions are the semantic convention values defined for a -// version of the OpenTelemetry specification. -type SemanticConventions struct { - EnduserIDKey attribute.Key - HTTPClientIPKey attribute.Key - HTTPFlavorKey attribute.Key - HTTPHostKey attribute.Key - HTTPMethodKey attribute.Key - HTTPRequestContentLengthKey attribute.Key - HTTPRouteKey attribute.Key - HTTPSchemeHTTP attribute.KeyValue - HTTPSchemeHTTPS attribute.KeyValue - HTTPServerNameKey attribute.Key - HTTPStatusCodeKey attribute.Key - HTTPTargetKey attribute.Key - HTTPURLKey attribute.Key - HTTPUserAgentKey attribute.Key - NetHostIPKey attribute.Key - NetHostNameKey attribute.Key - NetHostPortKey attribute.Key - NetPeerIPKey attribute.Key - NetPeerNameKey attribute.Key - NetPeerPortKey attribute.Key - NetTransportIP attribute.KeyValue - NetTransportOther attribute.KeyValue - NetTransportTCP attribute.KeyValue - NetTransportUDP attribute.KeyValue - NetTransportUnix attribute.KeyValue -} - -// NetAttributesFromHTTPRequest generates attributes of the net -// namespace as specified by the OpenTelemetry specification for a -// span. The network parameter is a string that net.Dial function -// from standard library can understand. -func (sc *SemanticConventions) NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{} - - switch network { - case "tcp", "tcp4", "tcp6": - attrs = append(attrs, sc.NetTransportTCP) - case "udp", "udp4", "udp6": - attrs = append(attrs, sc.NetTransportUDP) - case "ip", "ip4", "ip6": - attrs = append(attrs, sc.NetTransportIP) - case "unix", "unixgram", "unixpacket": - attrs = append(attrs, sc.NetTransportUnix) - default: - attrs = append(attrs, sc.NetTransportOther) - } - - peerIP, peerName, peerPort := hostIPNamePort(request.RemoteAddr) - if peerIP != "" { - attrs = append(attrs, sc.NetPeerIPKey.String(peerIP)) - } - if peerName != "" { - attrs = append(attrs, sc.NetPeerNameKey.String(peerName)) - } - if peerPort != 0 { - attrs = append(attrs, sc.NetPeerPortKey.Int(peerPort)) - } - - hostIP, hostName, hostPort := "", "", 0 - for _, someHost := range []string{request.Host, request.Header.Get("Host"), request.URL.Host} { - hostIP, hostName, hostPort = hostIPNamePort(someHost) - if hostIP != "" || hostName != "" || hostPort != 0 { - break - } - } - if hostIP != "" { - attrs = append(attrs, sc.NetHostIPKey.String(hostIP)) - } - if hostName != "" { - attrs = append(attrs, sc.NetHostNameKey.String(hostName)) - } - if hostPort != 0 { - attrs = append(attrs, sc.NetHostPortKey.Int(hostPort)) - } - - return attrs -} - -// hostIPNamePort extracts the IP address, name and (optional) port from hostWithPort. -// It handles both IPv4 and IPv6 addresses. If the host portion is not recognized -// as a valid IPv4 or IPv6 address, the `ip` result will be empty and the -// host portion will instead be returned in `name`. -func hostIPNamePort(hostWithPort string) (ip string, name string, port int) { - var ( - hostPart, portPart string - parsedPort uint64 - err error - ) - if hostPart, portPart, err = net.SplitHostPort(hostWithPort); err != nil { - hostPart, portPart = hostWithPort, "" - } - if parsedIP := net.ParseIP(hostPart); parsedIP != nil { - ip = parsedIP.String() - } else { - name = hostPart - } - if parsedPort, err = strconv.ParseUint(portPart, 10, 16); err == nil { - port = int(parsedPort) - } - return -} - -// EndUserAttributesFromHTTPRequest generates attributes of the -// enduser namespace as specified by the OpenTelemetry specification -// for a span. -func (sc *SemanticConventions) EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - if username, _, ok := request.BasicAuth(); ok { - return []attribute.KeyValue{sc.EnduserIDKey.String(username)} - } - return nil -} - -// HTTPClientAttributesFromHTTPRequest generates attributes of the -// http namespace as specified by the OpenTelemetry specification for -// a span on the client side. -func (sc *SemanticConventions) HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{} - - // remove any username/password info that may be in the URL - // before adding it to the attributes - userinfo := request.URL.User - request.URL.User = nil - - attrs = append(attrs, sc.HTTPURLKey.String(request.URL.String())) - - // restore any username/password info that was removed - request.URL.User = userinfo - - return append(attrs, sc.httpCommonAttributesFromHTTPRequest(request)...) -} - -func (sc *SemanticConventions) httpCommonAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{} - if ua := request.UserAgent(); ua != "" { - attrs = append(attrs, sc.HTTPUserAgentKey.String(ua)) - } - if request.ContentLength > 0 { - attrs = append(attrs, sc.HTTPRequestContentLengthKey.Int64(request.ContentLength)) - } - - return append(attrs, sc.httpBasicAttributesFromHTTPRequest(request)...) -} - -func (sc *SemanticConventions) httpBasicAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - // as these attributes are used by HTTPServerMetricAttributesFromHTTPRequest, they should be low-cardinality - attrs := []attribute.KeyValue{} - - if request.TLS != nil { - attrs = append(attrs, sc.HTTPSchemeHTTPS) - } else { - attrs = append(attrs, sc.HTTPSchemeHTTP) - } - - if request.Host != "" { - attrs = append(attrs, sc.HTTPHostKey.String(request.Host)) - } else if request.URL != nil && request.URL.Host != "" { - attrs = append(attrs, sc.HTTPHostKey.String(request.URL.Host)) - } - - flavor := "" - if request.ProtoMajor == 1 { - flavor = fmt.Sprintf("1.%d", request.ProtoMinor) - } else if request.ProtoMajor == 2 { - flavor = "2" - } - if flavor != "" { - attrs = append(attrs, sc.HTTPFlavorKey.String(flavor)) - } - - if request.Method != "" { - attrs = append(attrs, sc.HTTPMethodKey.String(request.Method)) - } else { - attrs = append(attrs, sc.HTTPMethodKey.String(http.MethodGet)) - } - - return attrs -} - -// HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes -// to be used with server-side HTTP metrics. -func (sc *SemanticConventions) HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{} - if serverName != "" { - attrs = append(attrs, sc.HTTPServerNameKey.String(serverName)) - } - return append(attrs, sc.httpBasicAttributesFromHTTPRequest(request)...) -} - -// HTTPServerAttributesFromHTTPRequest generates attributes of the -// http namespace as specified by the OpenTelemetry specification for -// a span on the server side. Currently, only basic authentication is -// supported. -func (sc *SemanticConventions) HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{ - sc.HTTPTargetKey.String(request.RequestURI), - } - - if serverName != "" { - attrs = append(attrs, sc.HTTPServerNameKey.String(serverName)) - } - if route != "" { - attrs = append(attrs, sc.HTTPRouteKey.String(route)) - } - if values := request.Header["X-Forwarded-For"]; len(values) > 0 { - addr := values[0] - if i := strings.Index(addr, ","); i > 0 { - addr = addr[:i] - } - attrs = append(attrs, sc.HTTPClientIPKey.String(addr)) - } - - return append(attrs, sc.httpCommonAttributesFromHTTPRequest(request)...) -} - -// HTTPAttributesFromHTTPStatusCode generates attributes of the http -// namespace as specified by the OpenTelemetry specification for a -// span. -func (sc *SemanticConventions) HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue { - attrs := []attribute.KeyValue{ - sc.HTTPStatusCodeKey.Int(code), - } - return attrs -} - -type codeRange struct { - fromInclusive int - toInclusive int -} - -func (r codeRange) contains(code int) bool { - return r.fromInclusive <= code && code <= r.toInclusive -} - -var validRangesPerCategory = map[int][]codeRange{ - 1: { - {http.StatusContinue, http.StatusEarlyHints}, - }, - 2: { - {http.StatusOK, http.StatusAlreadyReported}, - {http.StatusIMUsed, http.StatusIMUsed}, - }, - 3: { - {http.StatusMultipleChoices, http.StatusUseProxy}, - {http.StatusTemporaryRedirect, http.StatusPermanentRedirect}, - }, - 4: { - {http.StatusBadRequest, http.StatusTeapot}, // yes, teapot is so useful… - {http.StatusMisdirectedRequest, http.StatusUpgradeRequired}, - {http.StatusPreconditionRequired, http.StatusTooManyRequests}, - {http.StatusRequestHeaderFieldsTooLarge, http.StatusRequestHeaderFieldsTooLarge}, - {http.StatusUnavailableForLegalReasons, http.StatusUnavailableForLegalReasons}, - }, - 5: { - {http.StatusInternalServerError, http.StatusLoopDetected}, - {http.StatusNotExtended, http.StatusNetworkAuthenticationRequired}, - }, -} - -// SpanStatusFromHTTPStatusCode generates a status code and a message -// as specified by the OpenTelemetry specification for a span. -func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) { - spanCode, valid := validateHTTPStatusCode(code) - if !valid { - return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code) - } - return spanCode, "" -} - -// SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message -// as specified by the OpenTelemetry specification for a span. -// Exclude 4xx for SERVER to set the appropriate status. -func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) { - spanCode, valid := validateHTTPStatusCode(code) - if !valid { - return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code) - } - category := code / 100 - if spanKind == trace.SpanKindServer && category == 4 { - return codes.Unset, "" - } - return spanCode, "" -} - -// validateHTTPStatusCode validates the HTTP status code and returns -// corresponding span status code. If the `code` is not a valid HTTP status -// code, returns span status Error and false. -func validateHTTPStatusCode(code int) (codes.Code, bool) { - category := code / 100 - ranges, ok := validRangesPerCategory[category] - if !ok { - return codes.Error, false - } - ok = false - for _, crange := range ranges { - ok = crange.contains(code) - if ok { - break - } - } - if !ok { - return codes.Error, false - } - if category > 0 && category < 4 { - return codes.Unset, true - } - return codes.Error, true -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go deleted file mode 100644 index 71a1f774..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package semconv implements OpenTelemetry semantic conventions. -// -// OpenTelemetry semantic conventions are agreed standardized naming -// patterns for OpenTelemetry things. This package represents the conventions -// as of the v1.17.0 version of the OpenTelemetry specification. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go deleted file mode 100644 index 679c40c4..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" - -import "go.opentelemetry.io/otel/attribute" - -// This semantic convention defines the attributes used to represent a feature -// flag evaluation as an event. -const ( - // FeatureFlagKeyKey is the attribute Key conforming to the - // "feature_flag.key" semantic conventions. It represents the unique - // identifier of the feature flag. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'logo-color' - FeatureFlagKeyKey = attribute.Key("feature_flag.key") - - // FeatureFlagProviderNameKey is the attribute Key conforming to the - // "feature_flag.provider_name" semantic conventions. It represents the - // name of the service provider that performs the flag evaluation. - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'Flag Manager' - FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") - - // FeatureFlagVariantKey is the attribute Key conforming to the - // "feature_flag.variant" semantic conventions. It represents the sHOULD be - // a semantic identifier for a value. If one is unavailable, a stringified - // version of the value can be used. - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'red', 'true', 'on' - // Note: A semantic identifier, commonly referred to as a variant, provides - // a means - // for referring to a value without including the value itself. This can - // provide additional context for understanding the meaning behind a value. - // For example, the variant `red` maybe be used for the value `#c05543`. - // - // A stringified version of the value can be used in situations where a - // semantic identifier is unavailable. String representation of the value - // should be determined by the implementer. - FeatureFlagVariantKey = attribute.Key("feature_flag.variant") -) - -// FeatureFlagKey returns an attribute KeyValue conforming to the -// "feature_flag.key" semantic conventions. It represents the unique identifier -// of the feature flag. -func FeatureFlagKey(val string) attribute.KeyValue { - return FeatureFlagKeyKey.String(val) -} - -// FeatureFlagProviderName returns an attribute KeyValue conforming to the -// "feature_flag.provider_name" semantic conventions. It represents the name of -// the service provider that performs the flag evaluation. -func FeatureFlagProviderName(val string) attribute.KeyValue { - return FeatureFlagProviderNameKey.String(val) -} - -// FeatureFlagVariant returns an attribute KeyValue conforming to the -// "feature_flag.variant" semantic conventions. It represents the sHOULD be a -// semantic identifier for a value. If one is unavailable, a stringified -// version of the value can be used. -func FeatureFlagVariant(val string) attribute.KeyValue { - return FeatureFlagVariantKey.String(val) -} - -// RPC received/sent message. -const ( - // MessageTypeKey is the attribute Key conforming to the "message.type" - // semantic conventions. It represents the whether this is a received or - // sent message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - MessageTypeKey = attribute.Key("message.type") - - // MessageIDKey is the attribute Key conforming to the "message.id" - // semantic conventions. It represents the mUST be calculated as two - // different counters starting from `1` one for sent messages and one for - // received message. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Note: This way we guarantee that the values will be consistent between - // different implementations. - MessageIDKey = attribute.Key("message.id") - - // MessageCompressedSizeKey is the attribute Key conforming to the - // "message.compressed_size" semantic conventions. It represents the - // compressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - MessageCompressedSizeKey = attribute.Key("message.compressed_size") - - // MessageUncompressedSizeKey is the attribute Key conforming to the - // "message.uncompressed_size" semantic conventions. It represents the - // uncompressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") -) - -var ( - // sent - MessageTypeSent = MessageTypeKey.String("SENT") - // received - MessageTypeReceived = MessageTypeKey.String("RECEIVED") -) - -// MessageID returns an attribute KeyValue conforming to the "message.id" -// semantic conventions. It represents the mUST be calculated as two different -// counters starting from `1` one for sent messages and one for received -// message. -func MessageID(val int) attribute.KeyValue { - return MessageIDKey.Int(val) -} - -// MessageCompressedSize returns an attribute KeyValue conforming to the -// "message.compressed_size" semantic conventions. It represents the compressed -// size of the message in bytes. -func MessageCompressedSize(val int) attribute.KeyValue { - return MessageCompressedSizeKey.Int(val) -} - -// MessageUncompressedSize returns an attribute KeyValue conforming to the -// "message.uncompressed_size" semantic conventions. It represents the -// uncompressed size of the message in bytes. -func MessageUncompressedSize(val int) attribute.KeyValue { - return MessageUncompressedSizeKey.Int(val) -} - -// The attributes used to report a single exception associated with a span. -const ( - // ExceptionEscapedKey is the attribute Key conforming to the - // "exception.escaped" semantic conventions. It represents the sHOULD be - // set to true if the exception event is recorded at a point where it is - // known that the exception is escaping the scope of the span. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - // Note: An exception is considered to have escaped (or left) the scope of - // a span, - // if that span is ended while the exception is still logically "in - // flight". - // This may be actually "in flight" in some languages (e.g. if the - // exception - // is passed to a Context manager's `__exit__` method in Python) but will - // usually be caught at the point of recording the exception in most - // languages. - // - // It is usually not possible to determine at the point where an exception - // is thrown - // whether it will escape the scope of a span. - // However, it is trivial to know that an exception - // will escape, if one checks for an active exception just before ending - // the span, - // as done in the [example above](#recording-an-exception). - // - // It follows that an exception may still escape the scope of the span - // even if the `exception.escaped` attribute was not set or set to false, - // since the event might have been recorded at a time where it was not - // clear whether the exception will escape. - ExceptionEscapedKey = attribute.Key("exception.escaped") -) - -// ExceptionEscaped returns an attribute KeyValue conforming to the -// "exception.escaped" semantic conventions. It represents the sHOULD be set to -// true if the exception event is recorded at a point where it is known that -// the exception is escaping the scope of the span. -func ExceptionEscaped(val bool) attribute.KeyValue { - return ExceptionEscapedKey.Bool(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go deleted file mode 100644 index 9b8c559d..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" - -const ( - // ExceptionEventName is the name of the Span event representing an exception. - ExceptionEventName = "exception" -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go deleted file mode 100644 index d5c4b5c1..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" - -// HTTP scheme attributes. -var ( - HTTPSchemeHTTP = HTTPSchemeKey.String("http") - HTTPSchemeHTTPS = HTTPSchemeKey.String("https") -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go deleted file mode 100644 index 39a2eab3..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go +++ /dev/null @@ -1,2010 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" - -import "go.opentelemetry.io/otel/attribute" - -// The web browser in which the application represented by the resource is -// running. The `browser.*` attributes MUST be used only for resources that -// represent applications running in a web browser (regardless of whether -// running on a mobile or desktop device). -const ( - // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" - // semantic conventions. It represents the array of brand name and version - // separated by a space - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.brands`). - BrowserBrandsKey = attribute.Key("browser.brands") - - // BrowserPlatformKey is the attribute Key conforming to the - // "browser.platform" semantic conventions. It represents the platform on - // which the browser is running - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Windows', 'macOS', 'Android' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.platform`). If unavailable, the legacy - // `navigator.platform` API SHOULD NOT be used instead and this attribute - // SHOULD be left unset in order for the values to be consistent. - // The list of possible values is defined in the [W3C User-Agent Client - // Hints - // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). - // Note that some (but not all) of these values can overlap with values in - // the [`os.type` and `os.name` attributes](./os.md). However, for - // consistency, the values in the `browser.platform` attribute should - // capture the exact value that the user agent provides. - BrowserPlatformKey = attribute.Key("browser.platform") - - // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" - // semantic conventions. It represents a boolean that is true if the - // browser is running on a mobile device - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.mobile`). If unavailable, this attribute - // SHOULD be left unset. - BrowserMobileKey = attribute.Key("browser.mobile") - - // BrowserUserAgentKey is the attribute Key conforming to the - // "browser.user_agent" semantic conventions. It represents the full - // user-agent string provided by the browser - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) - // AppleWebKit/537.36 (KHTML, ' - // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36' - // Note: The user-agent value SHOULD be provided only from browsers that do - // not have a mechanism to retrieve brands and platform individually from - // the User-Agent Client Hints API. To retrieve the value, the legacy - // `navigator.userAgent` API can be used. - BrowserUserAgentKey = attribute.Key("browser.user_agent") - - // BrowserLanguageKey is the attribute Key conforming to the - // "browser.language" semantic conventions. It represents the preferred - // language of the user using the browser - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'en', 'en-US', 'fr', 'fr-FR' - // Note: This value is intended to be taken from the Navigator API - // `navigator.language`. - BrowserLanguageKey = attribute.Key("browser.language") -) - -// BrowserBrands returns an attribute KeyValue conforming to the -// "browser.brands" semantic conventions. It represents the array of brand name -// and version separated by a space -func BrowserBrands(val ...string) attribute.KeyValue { - return BrowserBrandsKey.StringSlice(val) -} - -// BrowserPlatform returns an attribute KeyValue conforming to the -// "browser.platform" semantic conventions. It represents the platform on which -// the browser is running -func BrowserPlatform(val string) attribute.KeyValue { - return BrowserPlatformKey.String(val) -} - -// BrowserMobile returns an attribute KeyValue conforming to the -// "browser.mobile" semantic conventions. It represents a boolean that is true -// if the browser is running on a mobile device -func BrowserMobile(val bool) attribute.KeyValue { - return BrowserMobileKey.Bool(val) -} - -// BrowserUserAgent returns an attribute KeyValue conforming to the -// "browser.user_agent" semantic conventions. It represents the full user-agent -// string provided by the browser -func BrowserUserAgent(val string) attribute.KeyValue { - return BrowserUserAgentKey.String(val) -} - -// BrowserLanguage returns an attribute KeyValue conforming to the -// "browser.language" semantic conventions. It represents the preferred -// language of the user using the browser -func BrowserLanguage(val string) attribute.KeyValue { - return BrowserLanguageKey.String(val) -} - -// A cloud environment (e.g. GCP, Azure, AWS) -const ( - // CloudProviderKey is the attribute Key conforming to the "cloud.provider" - // semantic conventions. It represents the name of the cloud provider. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - CloudProviderKey = attribute.Key("cloud.provider") - - // CloudAccountIDKey is the attribute Key conforming to the - // "cloud.account.id" semantic conventions. It represents the cloud account - // ID the resource is assigned to. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '111111111111', 'opentelemetry' - CloudAccountIDKey = attribute.Key("cloud.account.id") - - // CloudRegionKey is the attribute Key conforming to the "cloud.region" - // semantic conventions. It represents the geographical region the resource - // is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'us-central1', 'us-east-1' - // Note: Refer to your provider's docs to see the available regions, for - // example [Alibaba Cloud - // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS - // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), - // [Azure - // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), - // [Google Cloud regions](https://cloud.google.com/about/locations), or - // [Tencent Cloud - // regions](https://intl.cloud.tencent.com/document/product/213/6091). - CloudRegionKey = attribute.Key("cloud.region") - - // CloudAvailabilityZoneKey is the attribute Key conforming to the - // "cloud.availability_zone" semantic conventions. It represents the cloud - // regions often have multiple, isolated locations known as zones to - // increase availability. Availability zone represents the zone where the - // resource is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'us-east-1c' - // Note: Availability zones are called "zones" on Alibaba Cloud and Google - // Cloud. - CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") - - // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" - // semantic conventions. It represents the cloud platform in use. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Note: The prefix of the service SHOULD match the one specified in - // `cloud.provider`. - CloudPlatformKey = attribute.Key("cloud.platform") -) - -var ( - // Alibaba Cloud - CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") - // Amazon Web Services - CloudProviderAWS = CloudProviderKey.String("aws") - // Microsoft Azure - CloudProviderAzure = CloudProviderKey.String("azure") - // Google Cloud Platform - CloudProviderGCP = CloudProviderKey.String("gcp") - // IBM Cloud - CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") - // Tencent Cloud - CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") -) - -var ( - // Alibaba Cloud Elastic Compute Service - CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") - // Alibaba Cloud Function Compute - CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") - // Red Hat OpenShift on Alibaba Cloud - CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") - // AWS Elastic Compute Cloud - CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") - // AWS Elastic Container Service - CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") - // AWS Elastic Kubernetes Service - CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") - // AWS Lambda - CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") - // AWS Elastic Beanstalk - CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") - // AWS App Runner - CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") - // Red Hat OpenShift on AWS (ROSA) - CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") - // Azure Virtual Machines - CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") - // Azure Container Instances - CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") - // Azure Kubernetes Service - CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") - // Azure Functions - CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") - // Azure App Service - CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") - // Azure Red Hat OpenShift - CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") - // Google Cloud Compute Engine (GCE) - CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") - // Google Cloud Run - CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") - // Google Cloud Kubernetes Engine (GKE) - CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") - // Google Cloud Functions (GCF) - CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") - // Google Cloud App Engine (GAE) - CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") - // Red Hat OpenShift on Google Cloud - CloudPlatformGoogleCloudOpenshift = CloudPlatformKey.String("google_cloud_openshift") - // Red Hat OpenShift on IBM Cloud - CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") - // Tencent Cloud Cloud Virtual Machine (CVM) - CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") - // Tencent Cloud Elastic Kubernetes Service (EKS) - CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") - // Tencent Cloud Serverless Cloud Function (SCF) - CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") -) - -// CloudAccountID returns an attribute KeyValue conforming to the -// "cloud.account.id" semantic conventions. It represents the cloud account ID -// the resource is assigned to. -func CloudAccountID(val string) attribute.KeyValue { - return CloudAccountIDKey.String(val) -} - -// CloudRegion returns an attribute KeyValue conforming to the -// "cloud.region" semantic conventions. It represents the geographical region -// the resource is running. -func CloudRegion(val string) attribute.KeyValue { - return CloudRegionKey.String(val) -} - -// CloudAvailabilityZone returns an attribute KeyValue conforming to the -// "cloud.availability_zone" semantic conventions. It represents the cloud -// regions often have multiple, isolated locations known as zones to increase -// availability. Availability zone represents the zone where the resource is -// running. -func CloudAvailabilityZone(val string) attribute.KeyValue { - return CloudAvailabilityZoneKey.String(val) -} - -// Resources used by AWS Elastic Container Service (ECS). -const ( - // AWSECSContainerARNKey is the attribute Key conforming to the - // "aws.ecs.container.arn" semantic conventions. It represents the Amazon - // Resource Name (ARN) of an [ECS container - // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' - AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") - - // AWSECSClusterARNKey is the attribute Key conforming to the - // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an - // [ECS - // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") - - // AWSECSLaunchtypeKey is the attribute Key conforming to the - // "aws.ecs.launchtype" semantic conventions. It represents the [launch - // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) - // for an ECS task. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") - - // AWSECSTaskARNKey is the attribute Key conforming to the - // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an - // [ECS task - // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") - - // AWSECSTaskFamilyKey is the attribute Key conforming to the - // "aws.ecs.task.family" semantic conventions. It represents the task - // definition family this task definition is a member of. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry-family' - AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") - - // AWSECSTaskRevisionKey is the attribute Key conforming to the - // "aws.ecs.task.revision" semantic conventions. It represents the revision - // for this task definition. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '8', '26' - AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") -) - -var ( - // ec2 - AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") - // fargate - AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") -) - -// AWSECSContainerARN returns an attribute KeyValue conforming to the -// "aws.ecs.container.arn" semantic conventions. It represents the Amazon -// Resource Name (ARN) of an [ECS container -// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). -func AWSECSContainerARN(val string) attribute.KeyValue { - return AWSECSContainerARNKey.String(val) -} - -// AWSECSClusterARN returns an attribute KeyValue conforming to the -// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS -// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). -func AWSECSClusterARN(val string) attribute.KeyValue { - return AWSECSClusterARNKey.String(val) -} - -// AWSECSTaskARN returns an attribute KeyValue conforming to the -// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS -// task -// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). -func AWSECSTaskARN(val string) attribute.KeyValue { - return AWSECSTaskARNKey.String(val) -} - -// AWSECSTaskFamily returns an attribute KeyValue conforming to the -// "aws.ecs.task.family" semantic conventions. It represents the task -// definition family this task definition is a member of. -func AWSECSTaskFamily(val string) attribute.KeyValue { - return AWSECSTaskFamilyKey.String(val) -} - -// AWSECSTaskRevision returns an attribute KeyValue conforming to the -// "aws.ecs.task.revision" semantic conventions. It represents the revision for -// this task definition. -func AWSECSTaskRevision(val string) attribute.KeyValue { - return AWSECSTaskRevisionKey.String(val) -} - -// Resources used by AWS Elastic Kubernetes Service (EKS). -const ( - // AWSEKSClusterARNKey is the attribute Key conforming to the - // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an - // EKS cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") -) - -// AWSEKSClusterARN returns an attribute KeyValue conforming to the -// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS -// cluster. -func AWSEKSClusterARN(val string) attribute.KeyValue { - return AWSEKSClusterARNKey.String(val) -} - -// Resources specific to Amazon Web Services. -const ( - // AWSLogGroupNamesKey is the attribute Key conforming to the - // "aws.log.group.names" semantic conventions. It represents the name(s) of - // the AWS log group(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: '/aws/lambda/my-function', 'opentelemetry-service' - // Note: Multiple log groups must be supported for cases like - // multi-container applications, where a single application has sidecar - // containers, and each write to their own log group. - AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") - - // AWSLogGroupARNsKey is the attribute Key conforming to the - // "aws.log.group.arns" semantic conventions. It represents the Amazon - // Resource Name(s) (ARN) of the AWS log group(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' - // Note: See the [log group ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") - - // AWSLogStreamNamesKey is the attribute Key conforming to the - // "aws.log.stream.names" semantic conventions. It represents the name(s) - // of the AWS log stream(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") - - // AWSLogStreamARNsKey is the attribute Key conforming to the - // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of - // the AWS log stream(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - // Note: See the [log stream ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - // One log group can contain several log streams, so these ARNs necessarily - // identify both a log group and a log stream. - AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") -) - -// AWSLogGroupNames returns an attribute KeyValue conforming to the -// "aws.log.group.names" semantic conventions. It represents the name(s) of the -// AWS log group(s) an application is writing to. -func AWSLogGroupNames(val ...string) attribute.KeyValue { - return AWSLogGroupNamesKey.StringSlice(val) -} - -// AWSLogGroupARNs returns an attribute KeyValue conforming to the -// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource -// Name(s) (ARN) of the AWS log group(s). -func AWSLogGroupARNs(val ...string) attribute.KeyValue { - return AWSLogGroupARNsKey.StringSlice(val) -} - -// AWSLogStreamNames returns an attribute KeyValue conforming to the -// "aws.log.stream.names" semantic conventions. It represents the name(s) of -// the AWS log stream(s) an application is writing to. -func AWSLogStreamNames(val ...string) attribute.KeyValue { - return AWSLogStreamNamesKey.StringSlice(val) -} - -// AWSLogStreamARNs returns an attribute KeyValue conforming to the -// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the -// AWS log stream(s). -func AWSLogStreamARNs(val ...string) attribute.KeyValue { - return AWSLogStreamARNsKey.StringSlice(val) -} - -// A container instance. -const ( - // ContainerNameKey is the attribute Key conforming to the "container.name" - // semantic conventions. It represents the container name used by container - // runtime. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry-autoconf' - ContainerNameKey = attribute.Key("container.name") - - // ContainerIDKey is the attribute Key conforming to the "container.id" - // semantic conventions. It represents the container ID. Usually a UUID, as - // for example used to [identify Docker - // containers](https://docs.docker.com/engine/reference/run/#container-identification). - // The UUID might be abbreviated. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'a3bf90e006b2' - ContainerIDKey = attribute.Key("container.id") - - // ContainerRuntimeKey is the attribute Key conforming to the - // "container.runtime" semantic conventions. It represents the container - // runtime managing this container. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'docker', 'containerd', 'rkt' - ContainerRuntimeKey = attribute.Key("container.runtime") - - // ContainerImageNameKey is the attribute Key conforming to the - // "container.image.name" semantic conventions. It represents the name of - // the image the container was built on. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'gcr.io/opentelemetry/operator' - ContainerImageNameKey = attribute.Key("container.image.name") - - // ContainerImageTagKey is the attribute Key conforming to the - // "container.image.tag" semantic conventions. It represents the container - // image tag. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '0.1' - ContainerImageTagKey = attribute.Key("container.image.tag") -) - -// ContainerName returns an attribute KeyValue conforming to the -// "container.name" semantic conventions. It represents the container name used -// by container runtime. -func ContainerName(val string) attribute.KeyValue { - return ContainerNameKey.String(val) -} - -// ContainerID returns an attribute KeyValue conforming to the -// "container.id" semantic conventions. It represents the container ID. Usually -// a UUID, as for example used to [identify Docker -// containers](https://docs.docker.com/engine/reference/run/#container-identification). -// The UUID might be abbreviated. -func ContainerID(val string) attribute.KeyValue { - return ContainerIDKey.String(val) -} - -// ContainerRuntime returns an attribute KeyValue conforming to the -// "container.runtime" semantic conventions. It represents the container -// runtime managing this container. -func ContainerRuntime(val string) attribute.KeyValue { - return ContainerRuntimeKey.String(val) -} - -// ContainerImageName returns an attribute KeyValue conforming to the -// "container.image.name" semantic conventions. It represents the name of the -// image the container was built on. -func ContainerImageName(val string) attribute.KeyValue { - return ContainerImageNameKey.String(val) -} - -// ContainerImageTag returns an attribute KeyValue conforming to the -// "container.image.tag" semantic conventions. It represents the container -// image tag. -func ContainerImageTag(val string) attribute.KeyValue { - return ContainerImageTagKey.String(val) -} - -// The software deployment. -const ( - // DeploymentEnvironmentKey is the attribute Key conforming to the - // "deployment.environment" semantic conventions. It represents the name of - // the [deployment - // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka - // deployment tier). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'staging', 'production' - DeploymentEnvironmentKey = attribute.Key("deployment.environment") -) - -// DeploymentEnvironment returns an attribute KeyValue conforming to the -// "deployment.environment" semantic conventions. It represents the name of the -// [deployment -// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka -// deployment tier). -func DeploymentEnvironment(val string) attribute.KeyValue { - return DeploymentEnvironmentKey.String(val) -} - -// The device on which the process represented by this resource is running. -const ( - // DeviceIDKey is the attribute Key conforming to the "device.id" semantic - // conventions. It represents a unique identifier representing the device - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' - // Note: The device identifier MUST only be defined using the values - // outlined below. This value is not an advertising identifier and MUST NOT - // be used as such. On iOS (Swift or Objective-C), this value MUST be equal - // to the [vendor - // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). - // On Android (Java or Kotlin), this value MUST be equal to the Firebase - // Installation ID or a globally unique UUID which is persisted across - // sessions in your application. More information can be found - // [here](https://developer.android.com/training/articles/user-data-ids) on - // best practices and exact implementation details. Caution should be taken - // when storing personal data or anything which can identify a user. GDPR - // and data protection laws may apply, ensure you do your own due - // diligence. - DeviceIDKey = attribute.Key("device.id") - - // DeviceModelIdentifierKey is the attribute Key conforming to the - // "device.model.identifier" semantic conventions. It represents the model - // identifier for the device - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'iPhone3,4', 'SM-G920F' - // Note: It's recommended this value represents a machine readable version - // of the model identifier rather than the market or consumer-friendly name - // of the device. - DeviceModelIdentifierKey = attribute.Key("device.model.identifier") - - // DeviceModelNameKey is the attribute Key conforming to the - // "device.model.name" semantic conventions. It represents the marketing - // name for the device model - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' - // Note: It's recommended this value represents a human readable version of - // the device model rather than a machine readable alternative. - DeviceModelNameKey = attribute.Key("device.model.name") - - // DeviceManufacturerKey is the attribute Key conforming to the - // "device.manufacturer" semantic conventions. It represents the name of - // the device manufacturer - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Apple', 'Samsung' - // Note: The Android OS provides this field via - // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). - // iOS apps SHOULD hardcode the value `Apple`. - DeviceManufacturerKey = attribute.Key("device.manufacturer") -) - -// DeviceID returns an attribute KeyValue conforming to the "device.id" -// semantic conventions. It represents a unique identifier representing the -// device -func DeviceID(val string) attribute.KeyValue { - return DeviceIDKey.String(val) -} - -// DeviceModelIdentifier returns an attribute KeyValue conforming to the -// "device.model.identifier" semantic conventions. It represents the model -// identifier for the device -func DeviceModelIdentifier(val string) attribute.KeyValue { - return DeviceModelIdentifierKey.String(val) -} - -// DeviceModelName returns an attribute KeyValue conforming to the -// "device.model.name" semantic conventions. It represents the marketing name -// for the device model -func DeviceModelName(val string) attribute.KeyValue { - return DeviceModelNameKey.String(val) -} - -// DeviceManufacturer returns an attribute KeyValue conforming to the -// "device.manufacturer" semantic conventions. It represents the name of the -// device manufacturer -func DeviceManufacturer(val string) attribute.KeyValue { - return DeviceManufacturerKey.String(val) -} - -// A serverless instance. -const ( - // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic - // conventions. It represents the name of the single function that this - // runtime instance executes. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'my-function', 'myazurefunctionapp/some-function-name' - // Note: This is the name of the function as configured/deployed on the - // FaaS - // platform and is usually different from the name of the callback - // function (which may be stored in the - // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes) - // span attributes). - // - // For some cloud providers, the above definition is ambiguous. The - // following - // definition of function name MUST be used for this attribute - // (and consequently the span name) for the listed cloud - // providers/products: - // - // * **Azure:** The full name `/`, i.e., function app name - // followed by a forward slash followed by the function name (this form - // can also be seen in the resource JSON for the function). - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider (see also the `faas.id` attribute). - FaaSNameKey = attribute.Key("faas.name") - - // FaaSIDKey is the attribute Key conforming to the "faas.id" semantic - // conventions. It represents the unique ID of the single function that - // this runtime instance executes. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' - // Note: On some cloud providers, it may not be possible to determine the - // full ID at startup, - // so consider setting `faas.id` as a span attribute instead. - // - // The exact value to use for `faas.id` depends on the cloud provider: - // - // * **AWS Lambda:** The function - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). - // Take care not to use the "invoked ARN" directly but replace any - // [alias - // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) - // with the resolved function version, as the same runtime instance may - // be invokable with - // multiple different aliases. - // * **GCP:** The [URI of the - // resource](https://cloud.google.com/iam/docs/full-resource-names) - // * **Azure:** The [Fully Qualified Resource - // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) - // of the invoked function, - // *not* the function app, having the form - // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider. - FaaSIDKey = attribute.Key("faas.id") - - // FaaSVersionKey is the attribute Key conforming to the "faas.version" - // semantic conventions. It represents the immutable version of the - // function being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '26', 'pinkfroid-00002' - // Note: Depending on the cloud provider and platform, use: - // - // * **AWS Lambda:** The [function - // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) - // (an integer represented as a decimal string). - // * **Google Cloud Run:** The - // [revision](https://cloud.google.com/run/docs/managing/revisions) - // (i.e., the function name plus the revision suffix). - // * **Google Cloud Functions:** The value of the - // [`K_REVISION` environment - // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). - // * **Azure Functions:** Not applicable. Do not set this attribute. - FaaSVersionKey = attribute.Key("faas.version") - - // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" - // semantic conventions. It represents the execution environment ID as a - // string, that will be potentially reused for other invocations to the - // same function/function version. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' - // Note: * **AWS Lambda:** Use the (full) log stream name. - FaaSInstanceKey = attribute.Key("faas.instance") - - // FaaSMaxMemoryKey is the attribute Key conforming to the - // "faas.max_memory" semantic conventions. It represents the amount of - // memory available to the serverless function in MiB. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 128 - // Note: It's recommended to set this attribute since e.g. too little - // memory can easily stop a Java AWS Lambda function from working - // correctly. On AWS Lambda, the environment variable - // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information. - FaaSMaxMemoryKey = attribute.Key("faas.max_memory") -) - -// FaaSName returns an attribute KeyValue conforming to the "faas.name" -// semantic conventions. It represents the name of the single function that -// this runtime instance executes. -func FaaSName(val string) attribute.KeyValue { - return FaaSNameKey.String(val) -} - -// FaaSID returns an attribute KeyValue conforming to the "faas.id" semantic -// conventions. It represents the unique ID of the single function that this -// runtime instance executes. -func FaaSID(val string) attribute.KeyValue { - return FaaSIDKey.String(val) -} - -// FaaSVersion returns an attribute KeyValue conforming to the -// "faas.version" semantic conventions. It represents the immutable version of -// the function being executed. -func FaaSVersion(val string) attribute.KeyValue { - return FaaSVersionKey.String(val) -} - -// FaaSInstance returns an attribute KeyValue conforming to the -// "faas.instance" semantic conventions. It represents the execution -// environment ID as a string, that will be potentially reused for other -// invocations to the same function/function version. -func FaaSInstance(val string) attribute.KeyValue { - return FaaSInstanceKey.String(val) -} - -// FaaSMaxMemory returns an attribute KeyValue conforming to the -// "faas.max_memory" semantic conventions. It represents the amount of memory -// available to the serverless function in MiB. -func FaaSMaxMemory(val int) attribute.KeyValue { - return FaaSMaxMemoryKey.Int(val) -} - -// A host is defined as a general computing instance. -const ( - // HostIDKey is the attribute Key conforming to the "host.id" semantic - // conventions. It represents the unique host ID. For Cloud, this must be - // the instance_id assigned by the cloud provider. For non-containerized - // Linux systems, the `machine-id` located in `/etc/machine-id` or - // `/var/lib/dbus/machine-id` may be used. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'fdbf79e8af94cb7f9e8df36789187052' - HostIDKey = attribute.Key("host.id") - - // HostNameKey is the attribute Key conforming to the "host.name" semantic - // conventions. It represents the name of the host. On Unix systems, it may - // contain what the hostname command returns, or the fully qualified - // hostname, or another name specified by the user. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry-test' - HostNameKey = attribute.Key("host.name") - - // HostTypeKey is the attribute Key conforming to the "host.type" semantic - // conventions. It represents the type of host. For Cloud, this must be the - // machine type. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'n1-standard-1' - HostTypeKey = attribute.Key("host.type") - - // HostArchKey is the attribute Key conforming to the "host.arch" semantic - // conventions. It represents the CPU architecture the host system is - // running on. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - HostArchKey = attribute.Key("host.arch") - - // HostImageNameKey is the attribute Key conforming to the - // "host.image.name" semantic conventions. It represents the name of the VM - // image or OS install the host was instantiated from. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' - HostImageNameKey = attribute.Key("host.image.name") - - // HostImageIDKey is the attribute Key conforming to the "host.image.id" - // semantic conventions. It represents the vM image ID. For Cloud, this - // value is from the provider. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'ami-07b06b442921831e5' - HostImageIDKey = attribute.Key("host.image.id") - - // HostImageVersionKey is the attribute Key conforming to the - // "host.image.version" semantic conventions. It represents the version - // string of the VM image as defined in [Version - // Attributes](README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '0.1' - HostImageVersionKey = attribute.Key("host.image.version") -) - -var ( - // AMD64 - HostArchAMD64 = HostArchKey.String("amd64") - // ARM32 - HostArchARM32 = HostArchKey.String("arm32") - // ARM64 - HostArchARM64 = HostArchKey.String("arm64") - // Itanium - HostArchIA64 = HostArchKey.String("ia64") - // 32-bit PowerPC - HostArchPPC32 = HostArchKey.String("ppc32") - // 64-bit PowerPC - HostArchPPC64 = HostArchKey.String("ppc64") - // IBM z/Architecture - HostArchS390x = HostArchKey.String("s390x") - // 32-bit x86 - HostArchX86 = HostArchKey.String("x86") -) - -// HostID returns an attribute KeyValue conforming to the "host.id" semantic -// conventions. It represents the unique host ID. For Cloud, this must be the -// instance_id assigned by the cloud provider. For non-containerized Linux -// systems, the `machine-id` located in `/etc/machine-id` or -// `/var/lib/dbus/machine-id` may be used. -func HostID(val string) attribute.KeyValue { - return HostIDKey.String(val) -} - -// HostName returns an attribute KeyValue conforming to the "host.name" -// semantic conventions. It represents the name of the host. On Unix systems, -// it may contain what the hostname command returns, or the fully qualified -// hostname, or another name specified by the user. -func HostName(val string) attribute.KeyValue { - return HostNameKey.String(val) -} - -// HostType returns an attribute KeyValue conforming to the "host.type" -// semantic conventions. It represents the type of host. For Cloud, this must -// be the machine type. -func HostType(val string) attribute.KeyValue { - return HostTypeKey.String(val) -} - -// HostImageName returns an attribute KeyValue conforming to the -// "host.image.name" semantic conventions. It represents the name of the VM -// image or OS install the host was instantiated from. -func HostImageName(val string) attribute.KeyValue { - return HostImageNameKey.String(val) -} - -// HostImageID returns an attribute KeyValue conforming to the -// "host.image.id" semantic conventions. It represents the vM image ID. For -// Cloud, this value is from the provider. -func HostImageID(val string) attribute.KeyValue { - return HostImageIDKey.String(val) -} - -// HostImageVersion returns an attribute KeyValue conforming to the -// "host.image.version" semantic conventions. It represents the version string -// of the VM image as defined in [Version -// Attributes](README.md#version-attributes). -func HostImageVersion(val string) attribute.KeyValue { - return HostImageVersionKey.String(val) -} - -// A Kubernetes Cluster. -const ( - // K8SClusterNameKey is the attribute Key conforming to the - // "k8s.cluster.name" semantic conventions. It represents the name of the - // cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry-cluster' - K8SClusterNameKey = attribute.Key("k8s.cluster.name") -) - -// K8SClusterName returns an attribute KeyValue conforming to the -// "k8s.cluster.name" semantic conventions. It represents the name of the -// cluster. -func K8SClusterName(val string) attribute.KeyValue { - return K8SClusterNameKey.String(val) -} - -// A Kubernetes Node object. -const ( - // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" - // semantic conventions. It represents the name of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'node-1' - K8SNodeNameKey = attribute.Key("k8s.node.name") - - // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" - // semantic conventions. It represents the UID of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' - K8SNodeUIDKey = attribute.Key("k8s.node.uid") -) - -// K8SNodeName returns an attribute KeyValue conforming to the -// "k8s.node.name" semantic conventions. It represents the name of the Node. -func K8SNodeName(val string) attribute.KeyValue { - return K8SNodeNameKey.String(val) -} - -// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" -// semantic conventions. It represents the UID of the Node. -func K8SNodeUID(val string) attribute.KeyValue { - return K8SNodeUIDKey.String(val) -} - -// A Kubernetes Namespace. -const ( - // K8SNamespaceNameKey is the attribute Key conforming to the - // "k8s.namespace.name" semantic conventions. It represents the name of the - // namespace that the pod is running in. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'default' - K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") -) - -// K8SNamespaceName returns an attribute KeyValue conforming to the -// "k8s.namespace.name" semantic conventions. It represents the name of the -// namespace that the pod is running in. -func K8SNamespaceName(val string) attribute.KeyValue { - return K8SNamespaceNameKey.String(val) -} - -// A Kubernetes Pod object. -const ( - // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" - // semantic conventions. It represents the UID of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SPodUIDKey = attribute.Key("k8s.pod.uid") - - // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" - // semantic conventions. It represents the name of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry-pod-autoconf' - K8SPodNameKey = attribute.Key("k8s.pod.name") -) - -// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" -// semantic conventions. It represents the UID of the Pod. -func K8SPodUID(val string) attribute.KeyValue { - return K8SPodUIDKey.String(val) -} - -// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" -// semantic conventions. It represents the name of the Pod. -func K8SPodName(val string) attribute.KeyValue { - return K8SPodNameKey.String(val) -} - -// A container in a -// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). -const ( - // K8SContainerNameKey is the attribute Key conforming to the - // "k8s.container.name" semantic conventions. It represents the name of the - // Container from Pod specification, must be unique within a Pod. Container - // runtime usually uses different globally unique name (`container.name`). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'redis' - K8SContainerNameKey = attribute.Key("k8s.container.name") - - // K8SContainerRestartCountKey is the attribute Key conforming to the - // "k8s.container.restart_count" semantic conventions. It represents the - // number of times the container was restarted. This attribute can be used - // to identify a particular container (running or stopped) within a - // container spec. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 0, 2 - K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") -) - -// K8SContainerName returns an attribute KeyValue conforming to the -// "k8s.container.name" semantic conventions. It represents the name of the -// Container from Pod specification, must be unique within a Pod. Container -// runtime usually uses different globally unique name (`container.name`). -func K8SContainerName(val string) attribute.KeyValue { - return K8SContainerNameKey.String(val) -} - -// K8SContainerRestartCount returns an attribute KeyValue conforming to the -// "k8s.container.restart_count" semantic conventions. It represents the number -// of times the container was restarted. This attribute can be used to identify -// a particular container (running or stopped) within a container spec. -func K8SContainerRestartCount(val int) attribute.KeyValue { - return K8SContainerRestartCountKey.Int(val) -} - -// A Kubernetes ReplicaSet object. -const ( - // K8SReplicaSetUIDKey is the attribute Key conforming to the - // "k8s.replicaset.uid" semantic conventions. It represents the UID of the - // ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") - - // K8SReplicaSetNameKey is the attribute Key conforming to the - // "k8s.replicaset.name" semantic conventions. It represents the name of - // the ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") -) - -// K8SReplicaSetUID returns an attribute KeyValue conforming to the -// "k8s.replicaset.uid" semantic conventions. It represents the UID of the -// ReplicaSet. -func K8SReplicaSetUID(val string) attribute.KeyValue { - return K8SReplicaSetUIDKey.String(val) -} - -// K8SReplicaSetName returns an attribute KeyValue conforming to the -// "k8s.replicaset.name" semantic conventions. It represents the name of the -// ReplicaSet. -func K8SReplicaSetName(val string) attribute.KeyValue { - return K8SReplicaSetNameKey.String(val) -} - -// A Kubernetes Deployment object. -const ( - // K8SDeploymentUIDKey is the attribute Key conforming to the - // "k8s.deployment.uid" semantic conventions. It represents the UID of the - // Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") - - // K8SDeploymentNameKey is the attribute Key conforming to the - // "k8s.deployment.name" semantic conventions. It represents the name of - // the Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") -) - -// K8SDeploymentUID returns an attribute KeyValue conforming to the -// "k8s.deployment.uid" semantic conventions. It represents the UID of the -// Deployment. -func K8SDeploymentUID(val string) attribute.KeyValue { - return K8SDeploymentUIDKey.String(val) -} - -// K8SDeploymentName returns an attribute KeyValue conforming to the -// "k8s.deployment.name" semantic conventions. It represents the name of the -// Deployment. -func K8SDeploymentName(val string) attribute.KeyValue { - return K8SDeploymentNameKey.String(val) -} - -// A Kubernetes StatefulSet object. -const ( - // K8SStatefulSetUIDKey is the attribute Key conforming to the - // "k8s.statefulset.uid" semantic conventions. It represents the UID of the - // StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") - - // K8SStatefulSetNameKey is the attribute Key conforming to the - // "k8s.statefulset.name" semantic conventions. It represents the name of - // the StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") -) - -// K8SStatefulSetUID returns an attribute KeyValue conforming to the -// "k8s.statefulset.uid" semantic conventions. It represents the UID of the -// StatefulSet. -func K8SStatefulSetUID(val string) attribute.KeyValue { - return K8SStatefulSetUIDKey.String(val) -} - -// K8SStatefulSetName returns an attribute KeyValue conforming to the -// "k8s.statefulset.name" semantic conventions. It represents the name of the -// StatefulSet. -func K8SStatefulSetName(val string) attribute.KeyValue { - return K8SStatefulSetNameKey.String(val) -} - -// A Kubernetes DaemonSet object. -const ( - // K8SDaemonSetUIDKey is the attribute Key conforming to the - // "k8s.daemonset.uid" semantic conventions. It represents the UID of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") - - // K8SDaemonSetNameKey is the attribute Key conforming to the - // "k8s.daemonset.name" semantic conventions. It represents the name of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") -) - -// K8SDaemonSetUID returns an attribute KeyValue conforming to the -// "k8s.daemonset.uid" semantic conventions. It represents the UID of the -// DaemonSet. -func K8SDaemonSetUID(val string) attribute.KeyValue { - return K8SDaemonSetUIDKey.String(val) -} - -// K8SDaemonSetName returns an attribute KeyValue conforming to the -// "k8s.daemonset.name" semantic conventions. It represents the name of the -// DaemonSet. -func K8SDaemonSetName(val string) attribute.KeyValue { - return K8SDaemonSetNameKey.String(val) -} - -// A Kubernetes Job object. -const ( - // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" - // semantic conventions. It represents the UID of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SJobUIDKey = attribute.Key("k8s.job.uid") - - // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" - // semantic conventions. It represents the name of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - K8SJobNameKey = attribute.Key("k8s.job.name") -) - -// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" -// semantic conventions. It represents the UID of the Job. -func K8SJobUID(val string) attribute.KeyValue { - return K8SJobUIDKey.String(val) -} - -// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" -// semantic conventions. It represents the name of the Job. -func K8SJobName(val string) attribute.KeyValue { - return K8SJobNameKey.String(val) -} - -// A Kubernetes CronJob object. -const ( - // K8SCronJobUIDKey is the attribute Key conforming to the - // "k8s.cronjob.uid" semantic conventions. It represents the UID of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") - - // K8SCronJobNameKey is the attribute Key conforming to the - // "k8s.cronjob.name" semantic conventions. It represents the name of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") -) - -// K8SCronJobUID returns an attribute KeyValue conforming to the -// "k8s.cronjob.uid" semantic conventions. It represents the UID of the -// CronJob. -func K8SCronJobUID(val string) attribute.KeyValue { - return K8SCronJobUIDKey.String(val) -} - -// K8SCronJobName returns an attribute KeyValue conforming to the -// "k8s.cronjob.name" semantic conventions. It represents the name of the -// CronJob. -func K8SCronJobName(val string) attribute.KeyValue { - return K8SCronJobNameKey.String(val) -} - -// The operating system (OS) on which the process represented by this resource -// is running. -const ( - // OSTypeKey is the attribute Key conforming to the "os.type" semantic - // conventions. It represents the operating system type. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - OSTypeKey = attribute.Key("os.type") - - // OSDescriptionKey is the attribute Key conforming to the "os.description" - // semantic conventions. It represents the human readable (not intended to - // be parsed) OS version information, like e.g. reported by `ver` or - // `lsb_release -a` commands. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 - // LTS' - OSDescriptionKey = attribute.Key("os.description") - - // OSNameKey is the attribute Key conforming to the "os.name" semantic - // conventions. It represents the human readable operating system name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'iOS', 'Android', 'Ubuntu' - OSNameKey = attribute.Key("os.name") - - // OSVersionKey is the attribute Key conforming to the "os.version" - // semantic conventions. It represents the version string of the operating - // system as defined in [Version - // Attributes](../../resource/semantic_conventions/README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '14.2.1', '18.04.1' - OSVersionKey = attribute.Key("os.version") -) - -var ( - // Microsoft Windows - OSTypeWindows = OSTypeKey.String("windows") - // Linux - OSTypeLinux = OSTypeKey.String("linux") - // Apple Darwin - OSTypeDarwin = OSTypeKey.String("darwin") - // FreeBSD - OSTypeFreeBSD = OSTypeKey.String("freebsd") - // NetBSD - OSTypeNetBSD = OSTypeKey.String("netbsd") - // OpenBSD - OSTypeOpenBSD = OSTypeKey.String("openbsd") - // DragonFly BSD - OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") - // HP-UX (Hewlett Packard Unix) - OSTypeHPUX = OSTypeKey.String("hpux") - // AIX (Advanced Interactive eXecutive) - OSTypeAIX = OSTypeKey.String("aix") - // SunOS, Oracle Solaris - OSTypeSolaris = OSTypeKey.String("solaris") - // IBM z/OS - OSTypeZOS = OSTypeKey.String("z_os") -) - -// OSDescription returns an attribute KeyValue conforming to the -// "os.description" semantic conventions. It represents the human readable (not -// intended to be parsed) OS version information, like e.g. reported by `ver` -// or `lsb_release -a` commands. -func OSDescription(val string) attribute.KeyValue { - return OSDescriptionKey.String(val) -} - -// OSName returns an attribute KeyValue conforming to the "os.name" semantic -// conventions. It represents the human readable operating system name. -func OSName(val string) attribute.KeyValue { - return OSNameKey.String(val) -} - -// OSVersion returns an attribute KeyValue conforming to the "os.version" -// semantic conventions. It represents the version string of the operating -// system as defined in [Version -// Attributes](../../resource/semantic_conventions/README.md#version-attributes). -func OSVersion(val string) attribute.KeyValue { - return OSVersionKey.String(val) -} - -// An operating system process. -const ( - // ProcessPIDKey is the attribute Key conforming to the "process.pid" - // semantic conventions. It represents the process identifier (PID). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 1234 - ProcessPIDKey = attribute.Key("process.pid") - - // ProcessParentPIDKey is the attribute Key conforming to the - // "process.parent_pid" semantic conventions. It represents the parent - // Process identifier (PID). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 111 - ProcessParentPIDKey = attribute.Key("process.parent_pid") - - // ProcessExecutableNameKey is the attribute Key conforming to the - // "process.executable.name" semantic conventions. It represents the name - // of the process executable. On Linux based systems, can be set to the - // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name - // of `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: ConditionallyRequired (See alternative attributes - // below.) - // Stability: stable - // Examples: 'otelcol' - ProcessExecutableNameKey = attribute.Key("process.executable.name") - - // ProcessExecutablePathKey is the attribute Key conforming to the - // "process.executable.path" semantic conventions. It represents the full - // path to the process executable. On Linux based systems, can be set to - // the target of `proc/[pid]/exe`. On Windows, can be set to the result of - // `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: ConditionallyRequired (See alternative attributes - // below.) - // Stability: stable - // Examples: '/usr/bin/cmd/otelcol' - ProcessExecutablePathKey = attribute.Key("process.executable.path") - - // ProcessCommandKey is the attribute Key conforming to the - // "process.command" semantic conventions. It represents the command used - // to launch the process (i.e. the command name). On Linux based systems, - // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can - // be set to the first parameter extracted from `GetCommandLineW`. - // - // Type: string - // RequirementLevel: ConditionallyRequired (See alternative attributes - // below.) - // Stability: stable - // Examples: 'cmd/otelcol' - ProcessCommandKey = attribute.Key("process.command") - - // ProcessCommandLineKey is the attribute Key conforming to the - // "process.command_line" semantic conventions. It represents the full - // command used to launch the process as a single string representing the - // full command. On Windows, can be set to the result of `GetCommandLineW`. - // Do not set this if you have to assemble it just for monitoring; use - // `process.command_args` instead. - // - // Type: string - // RequirementLevel: ConditionallyRequired (See alternative attributes - // below.) - // Stability: stable - // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' - ProcessCommandLineKey = attribute.Key("process.command_line") - - // ProcessCommandArgsKey is the attribute Key conforming to the - // "process.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) as received - // by the process. On Linux-based systems (and some other Unixoid systems - // supporting procfs), can be set according to the list of null-delimited - // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, - // this would be the full argv vector passed to `main`. - // - // Type: string[] - // RequirementLevel: ConditionallyRequired (See alternative attributes - // below.) - // Stability: stable - // Examples: 'cmd/otecol', '--config=config.yaml' - ProcessCommandArgsKey = attribute.Key("process.command_args") - - // ProcessOwnerKey is the attribute Key conforming to the "process.owner" - // semantic conventions. It represents the username of the user that owns - // the process. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'root' - ProcessOwnerKey = attribute.Key("process.owner") -) - -// ProcessPID returns an attribute KeyValue conforming to the "process.pid" -// semantic conventions. It represents the process identifier (PID). -func ProcessPID(val int) attribute.KeyValue { - return ProcessPIDKey.Int(val) -} - -// ProcessParentPID returns an attribute KeyValue conforming to the -// "process.parent_pid" semantic conventions. It represents the parent Process -// identifier (PID). -func ProcessParentPID(val int) attribute.KeyValue { - return ProcessParentPIDKey.Int(val) -} - -// ProcessExecutableName returns an attribute KeyValue conforming to the -// "process.executable.name" semantic conventions. It represents the name of -// the process executable. On Linux based systems, can be set to the `Name` in -// `proc/[pid]/status`. On Windows, can be set to the base name of -// `GetProcessImageFileNameW`. -func ProcessExecutableName(val string) attribute.KeyValue { - return ProcessExecutableNameKey.String(val) -} - -// ProcessExecutablePath returns an attribute KeyValue conforming to the -// "process.executable.path" semantic conventions. It represents the full path -// to the process executable. On Linux based systems, can be set to the target -// of `proc/[pid]/exe`. On Windows, can be set to the result of -// `GetProcessImageFileNameW`. -func ProcessExecutablePath(val string) attribute.KeyValue { - return ProcessExecutablePathKey.String(val) -} - -// ProcessCommand returns an attribute KeyValue conforming to the -// "process.command" semantic conventions. It represents the command used to -// launch the process (i.e. the command name). On Linux based systems, can be -// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to -// the first parameter extracted from `GetCommandLineW`. -func ProcessCommand(val string) attribute.KeyValue { - return ProcessCommandKey.String(val) -} - -// ProcessCommandLine returns an attribute KeyValue conforming to the -// "process.command_line" semantic conventions. It represents the full command -// used to launch the process as a single string representing the full command. -// On Windows, can be set to the result of `GetCommandLineW`. Do not set this -// if you have to assemble it just for monitoring; use `process.command_args` -// instead. -func ProcessCommandLine(val string) attribute.KeyValue { - return ProcessCommandLineKey.String(val) -} - -// ProcessCommandArgs returns an attribute KeyValue conforming to the -// "process.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) as received by -// the process. On Linux-based systems (and some other Unixoid systems -// supporting procfs), can be set according to the list of null-delimited -// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, -// this would be the full argv vector passed to `main`. -func ProcessCommandArgs(val ...string) attribute.KeyValue { - return ProcessCommandArgsKey.StringSlice(val) -} - -// ProcessOwner returns an attribute KeyValue conforming to the -// "process.owner" semantic conventions. It represents the username of the user -// that owns the process. -func ProcessOwner(val string) attribute.KeyValue { - return ProcessOwnerKey.String(val) -} - -// The single (language) runtime instance which is monitored. -const ( - // ProcessRuntimeNameKey is the attribute Key conforming to the - // "process.runtime.name" semantic conventions. It represents the name of - // the runtime of this process. For compiled native binaries, this SHOULD - // be the name of the compiler. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'OpenJDK Runtime Environment' - ProcessRuntimeNameKey = attribute.Key("process.runtime.name") - - // ProcessRuntimeVersionKey is the attribute Key conforming to the - // "process.runtime.version" semantic conventions. It represents the - // version of the runtime of this process, as returned by the runtime - // without modification. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '14.0.2' - ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") - - // ProcessRuntimeDescriptionKey is the attribute Key conforming to the - // "process.runtime.description" semantic conventions. It represents an - // additional description about the runtime of the process, for example a - // specific vendor customization of the runtime environment. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' - ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") -) - -// ProcessRuntimeName returns an attribute KeyValue conforming to the -// "process.runtime.name" semantic conventions. It represents the name of the -// runtime of this process. For compiled native binaries, this SHOULD be the -// name of the compiler. -func ProcessRuntimeName(val string) attribute.KeyValue { - return ProcessRuntimeNameKey.String(val) -} - -// ProcessRuntimeVersion returns an attribute KeyValue conforming to the -// "process.runtime.version" semantic conventions. It represents the version of -// the runtime of this process, as returned by the runtime without -// modification. -func ProcessRuntimeVersion(val string) attribute.KeyValue { - return ProcessRuntimeVersionKey.String(val) -} - -// ProcessRuntimeDescription returns an attribute KeyValue conforming to the -// "process.runtime.description" semantic conventions. It represents an -// additional description about the runtime of the process, for example a -// specific vendor customization of the runtime environment. -func ProcessRuntimeDescription(val string) attribute.KeyValue { - return ProcessRuntimeDescriptionKey.String(val) -} - -// A service instance. -const ( - // ServiceNameKey is the attribute Key conforming to the "service.name" - // semantic conventions. It represents the logical name of the service. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'shoppingcart' - // Note: MUST be the same for all instances of horizontally scaled - // services. If the value was not specified, SDKs MUST fallback to - // `unknown_service:` concatenated with - // [`process.executable.name`](process.md#process), e.g. - // `unknown_service:bash`. If `process.executable.name` is not available, - // the value MUST be set to `unknown_service`. - ServiceNameKey = attribute.Key("service.name") - - // ServiceNamespaceKey is the attribute Key conforming to the - // "service.namespace" semantic conventions. It represents a namespace for - // `service.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Shop' - // Note: A string value having a meaning that helps to distinguish a group - // of services, for example the team name that owns a group of services. - // `service.name` is expected to be unique within the same namespace. If - // `service.namespace` is not specified in the Resource then `service.name` - // is expected to be unique for all services that have no explicit - // namespace defined (so the empty/unspecified namespace is simply one more - // valid namespace). Zero-length namespace string is assumed equal to - // unspecified namespace. - ServiceNamespaceKey = attribute.Key("service.namespace") - - // ServiceInstanceIDKey is the attribute Key conforming to the - // "service.instance.id" semantic conventions. It represents the string ID - // of the service instance. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '627cc493-f310-47de-96bd-71410b7dec09' - // Note: MUST be unique for each instance of the same - // `service.namespace,service.name` pair (in other words - // `service.namespace,service.name,service.instance.id` triplet MUST be - // globally unique). The ID helps to distinguish instances of the same - // service that exist at the same time (e.g. instances of a horizontally - // scaled service). It is preferable for the ID to be persistent and stay - // the same for the lifetime of the service instance, however it is - // acceptable that the ID is ephemeral and changes during important - // lifetime events for the service (e.g. service restarts). If the service - // has no inherent unique ID that can be used as the value of this - // attribute it is recommended to generate a random Version 1 or Version 4 - // RFC 4122 UUID (services aiming for reproducible UUIDs may also use - // Version 5, see RFC 4122 for more recommendations). - ServiceInstanceIDKey = attribute.Key("service.instance.id") - - // ServiceVersionKey is the attribute Key conforming to the - // "service.version" semantic conventions. It represents the version string - // of the service API or implementation. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2.0.0' - ServiceVersionKey = attribute.Key("service.version") -) - -// ServiceName returns an attribute KeyValue conforming to the -// "service.name" semantic conventions. It represents the logical name of the -// service. -func ServiceName(val string) attribute.KeyValue { - return ServiceNameKey.String(val) -} - -// ServiceNamespace returns an attribute KeyValue conforming to the -// "service.namespace" semantic conventions. It represents a namespace for -// `service.name`. -func ServiceNamespace(val string) attribute.KeyValue { - return ServiceNamespaceKey.String(val) -} - -// ServiceInstanceID returns an attribute KeyValue conforming to the -// "service.instance.id" semantic conventions. It represents the string ID of -// the service instance. -func ServiceInstanceID(val string) attribute.KeyValue { - return ServiceInstanceIDKey.String(val) -} - -// ServiceVersion returns an attribute KeyValue conforming to the -// "service.version" semantic conventions. It represents the version string of -// the service API or implementation. -func ServiceVersion(val string) attribute.KeyValue { - return ServiceVersionKey.String(val) -} - -// The telemetry SDK used to capture data recorded by the instrumentation -// libraries. -const ( - // TelemetrySDKNameKey is the attribute Key conforming to the - // "telemetry.sdk.name" semantic conventions. It represents the name of the - // telemetry SDK as defined above. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") - - // TelemetrySDKLanguageKey is the attribute Key conforming to the - // "telemetry.sdk.language" semantic conventions. It represents the - // language of the telemetry SDK. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") - - // TelemetrySDKVersionKey is the attribute Key conforming to the - // "telemetry.sdk.version" semantic conventions. It represents the version - // string of the telemetry SDK. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1.2.3' - TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") - - // TelemetryAutoVersionKey is the attribute Key conforming to the - // "telemetry.auto.version" semantic conventions. It represents the version - // string of the auto instrumentation agent, if used. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1.2.3' - TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") -) - -var ( - // cpp - TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") - // dotnet - TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") - // erlang - TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") - // go - TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") - // java - TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") - // nodejs - TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") - // php - TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") - // python - TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") - // ruby - TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") - // webjs - TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") - // swift - TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") -) - -// TelemetrySDKName returns an attribute KeyValue conforming to the -// "telemetry.sdk.name" semantic conventions. It represents the name of the -// telemetry SDK as defined above. -func TelemetrySDKName(val string) attribute.KeyValue { - return TelemetrySDKNameKey.String(val) -} - -// TelemetrySDKVersion returns an attribute KeyValue conforming to the -// "telemetry.sdk.version" semantic conventions. It represents the version -// string of the telemetry SDK. -func TelemetrySDKVersion(val string) attribute.KeyValue { - return TelemetrySDKVersionKey.String(val) -} - -// TelemetryAutoVersion returns an attribute KeyValue conforming to the -// "telemetry.auto.version" semantic conventions. It represents the version -// string of the auto instrumentation agent, if used. -func TelemetryAutoVersion(val string) attribute.KeyValue { - return TelemetryAutoVersionKey.String(val) -} - -// Resource describing the packaged software running the application code. Web -// engines are typically executed using process.runtime. -const ( - // WebEngineNameKey is the attribute Key conforming to the "webengine.name" - // semantic conventions. It represents the name of the web engine. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'WildFly' - WebEngineNameKey = attribute.Key("webengine.name") - - // WebEngineVersionKey is the attribute Key conforming to the - // "webengine.version" semantic conventions. It represents the version of - // the web engine. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '21.0.0' - WebEngineVersionKey = attribute.Key("webengine.version") - - // WebEngineDescriptionKey is the attribute Key conforming to the - // "webengine.description" semantic conventions. It represents the - // additional description of the web engine (e.g. detailed version and - // edition information). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - - // 2.2.2.Final' - WebEngineDescriptionKey = attribute.Key("webengine.description") -) - -// WebEngineName returns an attribute KeyValue conforming to the -// "webengine.name" semantic conventions. It represents the name of the web -// engine. -func WebEngineName(val string) attribute.KeyValue { - return WebEngineNameKey.String(val) -} - -// WebEngineVersion returns an attribute KeyValue conforming to the -// "webengine.version" semantic conventions. It represents the version of the -// web engine. -func WebEngineVersion(val string) attribute.KeyValue { - return WebEngineVersionKey.String(val) -} - -// WebEngineDescription returns an attribute KeyValue conforming to the -// "webengine.description" semantic conventions. It represents the additional -// description of the web engine (e.g. detailed version and edition -// information). -func WebEngineDescription(val string) attribute.KeyValue { - return WebEngineDescriptionKey.String(val) -} - -// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's -// concepts. -const ( - // OtelScopeNameKey is the attribute Key conforming to the - // "otel.scope.name" semantic conventions. It represents the name of the - // instrumentation scope - (`InstrumentationScope.Name` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'io.opentelemetry.contrib.mongodb' - OtelScopeNameKey = attribute.Key("otel.scope.name") - - // OtelScopeVersionKey is the attribute Key conforming to the - // "otel.scope.version" semantic conventions. It represents the version of - // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1.0.0' - OtelScopeVersionKey = attribute.Key("otel.scope.version") -) - -// OtelScopeName returns an attribute KeyValue conforming to the -// "otel.scope.name" semantic conventions. It represents the name of the -// instrumentation scope - (`InstrumentationScope.Name` in OTLP). -func OtelScopeName(val string) attribute.KeyValue { - return OtelScopeNameKey.String(val) -} - -// OtelScopeVersion returns an attribute KeyValue conforming to the -// "otel.scope.version" semantic conventions. It represents the version of the -// instrumentation scope - (`InstrumentationScope.Version` in OTLP). -func OtelScopeVersion(val string) attribute.KeyValue { - return OtelScopeVersionKey.String(val) -} - -// Span attributes used by non-OTLP exporters to represent OpenTelemetry -// Scope's concepts. -const ( - // OtelLibraryNameKey is the attribute Key conforming to the - // "otel.library.name" semantic conventions. It represents the deprecated, - // use the `otel.scope.name` attribute. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'io.opentelemetry.contrib.mongodb' - OtelLibraryNameKey = attribute.Key("otel.library.name") - - // OtelLibraryVersionKey is the attribute Key conforming to the - // "otel.library.version" semantic conventions. It represents the - // deprecated, use the `otel.scope.version` attribute. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '1.0.0' - OtelLibraryVersionKey = attribute.Key("otel.library.version") -) - -// OtelLibraryName returns an attribute KeyValue conforming to the -// "otel.library.name" semantic conventions. It represents the deprecated, use -// the `otel.scope.name` attribute. -func OtelLibraryName(val string) attribute.KeyValue { - return OtelLibraryNameKey.String(val) -} - -// OtelLibraryVersion returns an attribute KeyValue conforming to the -// "otel.library.version" semantic conventions. It represents the deprecated, -// use the `otel.scope.version` attribute. -func OtelLibraryVersion(val string) attribute.KeyValue { - return OtelLibraryVersionKey.String(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go deleted file mode 100644 index 42fc525d..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" - -// SchemaURL is the schema URL that matches the version of the semantic conventions -// that this package defines. Semconv packages starting from v1.4.0 must declare -// non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go deleted file mode 100644 index 8c4a7299..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go +++ /dev/null @@ -1,3375 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" - -import "go.opentelemetry.io/otel/attribute" - -// The shared attributes used to report a single exception associated with a -// span or log. -const ( - // ExceptionTypeKey is the attribute Key conforming to the "exception.type" - // semantic conventions. It represents the type of the exception (its - // fully-qualified class name, if applicable). The dynamic type of the - // exception should be preferred over the static type in languages that - // support it. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'java.net.ConnectException', 'OSError' - ExceptionTypeKey = attribute.Key("exception.type") - - // ExceptionMessageKey is the attribute Key conforming to the - // "exception.message" semantic conventions. It represents the exception - // message. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Division by zero', "Can't convert 'int' object to str - // implicitly" - ExceptionMessageKey = attribute.Key("exception.message") - - // ExceptionStacktraceKey is the attribute Key conforming to the - // "exception.stacktrace" semantic conventions. It represents a stacktrace - // as a string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test - // exception\\n at ' - // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - ExceptionStacktraceKey = attribute.Key("exception.stacktrace") -) - -// ExceptionType returns an attribute KeyValue conforming to the -// "exception.type" semantic conventions. It represents the type of the -// exception (its fully-qualified class name, if applicable). The dynamic type -// of the exception should be preferred over the static type in languages that -// support it. -func ExceptionType(val string) attribute.KeyValue { - return ExceptionTypeKey.String(val) -} - -// ExceptionMessage returns an attribute KeyValue conforming to the -// "exception.message" semantic conventions. It represents the exception -// message. -func ExceptionMessage(val string) attribute.KeyValue { - return ExceptionMessageKey.String(val) -} - -// ExceptionStacktrace returns an attribute KeyValue conforming to the -// "exception.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func ExceptionStacktrace(val string) attribute.KeyValue { - return ExceptionStacktraceKey.String(val) -} - -// Attributes for Events represented using Log Records. -const ( - // EventNameKey is the attribute Key conforming to the "event.name" - // semantic conventions. It represents the name identifies the event. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'click', 'exception' - EventNameKey = attribute.Key("event.name") - - // EventDomainKey is the attribute Key conforming to the "event.domain" - // semantic conventions. It represents the domain identifies the business - // context for the events. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - // Note: Events across different domains may have same `event.name`, yet be - // unrelated events. - EventDomainKey = attribute.Key("event.domain") -) - -var ( - // Events from browser apps - EventDomainBrowser = EventDomainKey.String("browser") - // Events from mobile apps - EventDomainDevice = EventDomainKey.String("device") - // Events from Kubernetes - EventDomainK8S = EventDomainKey.String("k8s") -) - -// EventName returns an attribute KeyValue conforming to the "event.name" -// semantic conventions. It represents the name identifies the event. -func EventName(val string) attribute.KeyValue { - return EventNameKey.String(val) -} - -// Span attributes used by AWS Lambda (in addition to general `faas` -// attributes). -const ( - // AWSLambdaInvokedARNKey is the attribute Key conforming to the - // "aws.lambda.invoked_arn" semantic conventions. It represents the full - // invoked ARN as provided on the `Context` passed to the function - // (`Lambda-Runtime-Invoked-Function-ARN` header on the - // `/runtime/invocation/next` applicable). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' - // Note: This may be different from `faas.id` if an alias is involved. - AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") -) - -// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the -// "aws.lambda.invoked_arn" semantic conventions. It represents the full -// invoked ARN as provided on the `Context` passed to the function -// (`Lambda-Runtime-Invoked-Function-ARN` header on the -// `/runtime/invocation/next` applicable). -func AWSLambdaInvokedARN(val string) attribute.KeyValue { - return AWSLambdaInvokedARNKey.String(val) -} - -// Attributes for CloudEvents. CloudEvents is a specification on how to define -// event data in a standard way. These attributes can be attached to spans when -// performing operations with CloudEvents, regardless of the protocol being -// used. -const ( - // CloudeventsEventIDKey is the attribute Key conforming to the - // "cloudevents.event_id" semantic conventions. It represents the - // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) - // uniquely identifies the event. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' - CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") - - // CloudeventsEventSourceKey is the attribute Key conforming to the - // "cloudevents.event_source" semantic conventions. It represents the - // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) - // identifies the context in which an event happened. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'https://github.com/cloudevents', - // '/cloudevents/spec/pull/123', 'my-service' - CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") - - // CloudeventsEventSpecVersionKey is the attribute Key conforming to the - // "cloudevents.event_spec_version" semantic conventions. It represents the - // [version of the CloudEvents - // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) - // which the event uses. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1.0' - CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") - - // CloudeventsEventTypeKey is the attribute Key conforming to the - // "cloudevents.event_type" semantic conventions. It represents the - // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) - // contains a value describing the type of event related to the originating - // occurrence. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'com.github.pull_request.opened', - // 'com.example.object.deleted.v2' - CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") - - // CloudeventsEventSubjectKey is the attribute Key conforming to the - // "cloudevents.event_subject" semantic conventions. It represents the - // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) - // of the event in the context of the event producer (identified by - // source). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'mynewfile.jpg' - CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") -) - -// CloudeventsEventID returns an attribute KeyValue conforming to the -// "cloudevents.event_id" semantic conventions. It represents the -// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) -// uniquely identifies the event. -func CloudeventsEventID(val string) attribute.KeyValue { - return CloudeventsEventIDKey.String(val) -} - -// CloudeventsEventSource returns an attribute KeyValue conforming to the -// "cloudevents.event_source" semantic conventions. It represents the -// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) -// identifies the context in which an event happened. -func CloudeventsEventSource(val string) attribute.KeyValue { - return CloudeventsEventSourceKey.String(val) -} - -// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to -// the "cloudevents.event_spec_version" semantic conventions. It represents the -// [version of the CloudEvents -// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) -// which the event uses. -func CloudeventsEventSpecVersion(val string) attribute.KeyValue { - return CloudeventsEventSpecVersionKey.String(val) -} - -// CloudeventsEventType returns an attribute KeyValue conforming to the -// "cloudevents.event_type" semantic conventions. It represents the -// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) -// contains a value describing the type of event related to the originating -// occurrence. -func CloudeventsEventType(val string) attribute.KeyValue { - return CloudeventsEventTypeKey.String(val) -} - -// CloudeventsEventSubject returns an attribute KeyValue conforming to the -// "cloudevents.event_subject" semantic conventions. It represents the -// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) -// of the event in the context of the event producer (identified by source). -func CloudeventsEventSubject(val string) attribute.KeyValue { - return CloudeventsEventSubjectKey.String(val) -} - -// Semantic conventions for the OpenTracing Shim -const ( - // OpentracingRefTypeKey is the attribute Key conforming to the - // "opentracing.ref_type" semantic conventions. It represents the - // parent-child Reference type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Note: The causal relationship between a child Span and a parent Span. - OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") -) - -var ( - // The parent Span depends on the child Span in some capacity - OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") - // The parent Span does not depend in any way on the result of the child Span - OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") -) - -// The attributes used to perform database client calls. -const ( - // DBSystemKey is the attribute Key conforming to the "db.system" semantic - // conventions. It represents an identifier for the database management - // system (DBMS) product being used. See below for a list of well-known - // identifiers. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - DBSystemKey = attribute.Key("db.system") - - // DBConnectionStringKey is the attribute Key conforming to the - // "db.connection_string" semantic conventions. It represents the - // connection string used to connect to the database. It is recommended to - // remove embedded credentials. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' - DBConnectionStringKey = attribute.Key("db.connection_string") - - // DBUserKey is the attribute Key conforming to the "db.user" semantic - // conventions. It represents the username for accessing the database. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'readonly_user', 'reporting_user' - DBUserKey = attribute.Key("db.user") - - // DBJDBCDriverClassnameKey is the attribute Key conforming to the - // "db.jdbc.driver_classname" semantic conventions. It represents the - // fully-qualified class name of the [Java Database Connectivity - // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) - // driver used to connect. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'org.postgresql.Driver', - // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' - DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") - - // DBNameKey is the attribute Key conforming to the "db.name" semantic - // conventions. It represents the this attribute is used to report the name - // of the database being accessed. For commands that switch the database, - // this should be set to the target database (even if the command fails). - // - // Type: string - // RequirementLevel: ConditionallyRequired (If applicable.) - // Stability: stable - // Examples: 'customers', 'main' - // Note: In some SQL databases, the database name to be used is called - // "schema name". In case there are multiple layers that could be - // considered for database name (e.g. Oracle instance name and schema - // name), the database name to be used is the more specific layer (e.g. - // Oracle schema name). - DBNameKey = attribute.Key("db.name") - - // DBStatementKey is the attribute Key conforming to the "db.statement" - // semantic conventions. It represents the database statement being - // executed. - // - // Type: string - // RequirementLevel: ConditionallyRequired (If applicable and not - // explicitly disabled via instrumentation configuration.) - // Stability: stable - // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' - // Note: The value may be sanitized to exclude sensitive information. - DBStatementKey = attribute.Key("db.statement") - - // DBOperationKey is the attribute Key conforming to the "db.operation" - // semantic conventions. It represents the name of the operation being - // executed, e.g. the [MongoDB command - // name](https://docs.mongodb.com/manual/reference/command/#database-operations) - // such as `findAndModify`, or the SQL keyword. - // - // Type: string - // RequirementLevel: ConditionallyRequired (If `db.statement` is not - // applicable.) - // Stability: stable - // Examples: 'findAndModify', 'HMSET', 'SELECT' - // Note: When setting this to an SQL keyword, it is not recommended to - // attempt any client-side parsing of `db.statement` just to get this - // property, but it should be set if the operation name is provided by the - // library being instrumented. If the SQL statement has an ambiguous - // operation, or performs more than one operation, this value may be - // omitted. - DBOperationKey = attribute.Key("db.operation") -) - -var ( - // Some other SQL database. Fallback only. See notes - DBSystemOtherSQL = DBSystemKey.String("other_sql") - // Microsoft SQL Server - DBSystemMSSQL = DBSystemKey.String("mssql") - // MySQL - DBSystemMySQL = DBSystemKey.String("mysql") - // Oracle Database - DBSystemOracle = DBSystemKey.String("oracle") - // IBM DB2 - DBSystemDB2 = DBSystemKey.String("db2") - // PostgreSQL - DBSystemPostgreSQL = DBSystemKey.String("postgresql") - // Amazon Redshift - DBSystemRedshift = DBSystemKey.String("redshift") - // Apache Hive - DBSystemHive = DBSystemKey.String("hive") - // Cloudscape - DBSystemCloudscape = DBSystemKey.String("cloudscape") - // HyperSQL DataBase - DBSystemHSQLDB = DBSystemKey.String("hsqldb") - // Progress Database - DBSystemProgress = DBSystemKey.String("progress") - // SAP MaxDB - DBSystemMaxDB = DBSystemKey.String("maxdb") - // SAP HANA - DBSystemHanaDB = DBSystemKey.String("hanadb") - // Ingres - DBSystemIngres = DBSystemKey.String("ingres") - // FirstSQL - DBSystemFirstSQL = DBSystemKey.String("firstsql") - // EnterpriseDB - DBSystemEDB = DBSystemKey.String("edb") - // InterSystems Caché - DBSystemCache = DBSystemKey.String("cache") - // Adabas (Adaptable Database System) - DBSystemAdabas = DBSystemKey.String("adabas") - // Firebird - DBSystemFirebird = DBSystemKey.String("firebird") - // Apache Derby - DBSystemDerby = DBSystemKey.String("derby") - // FileMaker - DBSystemFilemaker = DBSystemKey.String("filemaker") - // Informix - DBSystemInformix = DBSystemKey.String("informix") - // InstantDB - DBSystemInstantDB = DBSystemKey.String("instantdb") - // InterBase - DBSystemInterbase = DBSystemKey.String("interbase") - // MariaDB - DBSystemMariaDB = DBSystemKey.String("mariadb") - // Netezza - DBSystemNetezza = DBSystemKey.String("netezza") - // Pervasive PSQL - DBSystemPervasive = DBSystemKey.String("pervasive") - // PointBase - DBSystemPointbase = DBSystemKey.String("pointbase") - // SQLite - DBSystemSqlite = DBSystemKey.String("sqlite") - // Sybase - DBSystemSybase = DBSystemKey.String("sybase") - // Teradata - DBSystemTeradata = DBSystemKey.String("teradata") - // Vertica - DBSystemVertica = DBSystemKey.String("vertica") - // H2 - DBSystemH2 = DBSystemKey.String("h2") - // ColdFusion IMQ - DBSystemColdfusion = DBSystemKey.String("coldfusion") - // Apache Cassandra - DBSystemCassandra = DBSystemKey.String("cassandra") - // Apache HBase - DBSystemHBase = DBSystemKey.String("hbase") - // MongoDB - DBSystemMongoDB = DBSystemKey.String("mongodb") - // Redis - DBSystemRedis = DBSystemKey.String("redis") - // Couchbase - DBSystemCouchbase = DBSystemKey.String("couchbase") - // CouchDB - DBSystemCouchDB = DBSystemKey.String("couchdb") - // Microsoft Azure Cosmos DB - DBSystemCosmosDB = DBSystemKey.String("cosmosdb") - // Amazon DynamoDB - DBSystemDynamoDB = DBSystemKey.String("dynamodb") - // Neo4j - DBSystemNeo4j = DBSystemKey.String("neo4j") - // Apache Geode - DBSystemGeode = DBSystemKey.String("geode") - // Elasticsearch - DBSystemElasticsearch = DBSystemKey.String("elasticsearch") - // Memcached - DBSystemMemcached = DBSystemKey.String("memcached") - // CockroachDB - DBSystemCockroachdb = DBSystemKey.String("cockroachdb") - // OpenSearch - DBSystemOpensearch = DBSystemKey.String("opensearch") - // ClickHouse - DBSystemClickhouse = DBSystemKey.String("clickhouse") -) - -// DBConnectionString returns an attribute KeyValue conforming to the -// "db.connection_string" semantic conventions. It represents the connection -// string used to connect to the database. It is recommended to remove embedded -// credentials. -func DBConnectionString(val string) attribute.KeyValue { - return DBConnectionStringKey.String(val) -} - -// DBUser returns an attribute KeyValue conforming to the "db.user" semantic -// conventions. It represents the username for accessing the database. -func DBUser(val string) attribute.KeyValue { - return DBUserKey.String(val) -} - -// DBJDBCDriverClassname returns an attribute KeyValue conforming to the -// "db.jdbc.driver_classname" semantic conventions. It represents the -// fully-qualified class name of the [Java Database Connectivity -// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver -// used to connect. -func DBJDBCDriverClassname(val string) attribute.KeyValue { - return DBJDBCDriverClassnameKey.String(val) -} - -// DBName returns an attribute KeyValue conforming to the "db.name" semantic -// conventions. It represents the this attribute is used to report the name of -// the database being accessed. For commands that switch the database, this -// should be set to the target database (even if the command fails). -func DBName(val string) attribute.KeyValue { - return DBNameKey.String(val) -} - -// DBStatement returns an attribute KeyValue conforming to the -// "db.statement" semantic conventions. It represents the database statement -// being executed. -func DBStatement(val string) attribute.KeyValue { - return DBStatementKey.String(val) -} - -// DBOperation returns an attribute KeyValue conforming to the -// "db.operation" semantic conventions. It represents the name of the operation -// being executed, e.g. the [MongoDB command -// name](https://docs.mongodb.com/manual/reference/command/#database-operations) -// such as `findAndModify`, or the SQL keyword. -func DBOperation(val string) attribute.KeyValue { - return DBOperationKey.String(val) -} - -// Connection-level attributes for Microsoft SQL Server -const ( - // DBMSSQLInstanceNameKey is the attribute Key conforming to the - // "db.mssql.instance_name" semantic conventions. It represents the - // Microsoft SQL Server [instance - // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) - // connecting to. This name is used to determine the port of a named - // instance. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'MSSQLSERVER' - // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no - // longer required (but still recommended if non-standard). - DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") -) - -// DBMSSQLInstanceName returns an attribute KeyValue conforming to the -// "db.mssql.instance_name" semantic conventions. It represents the Microsoft -// SQL Server [instance -// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) -// connecting to. This name is used to determine the port of a named instance. -func DBMSSQLInstanceName(val string) attribute.KeyValue { - return DBMSSQLInstanceNameKey.String(val) -} - -// Call-level attributes for Cassandra -const ( - // DBCassandraPageSizeKey is the attribute Key conforming to the - // "db.cassandra.page_size" semantic conventions. It represents the fetch - // size used for paging, i.e. how many rows will be returned at once. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 5000 - DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") - - // DBCassandraConsistencyLevelKey is the attribute Key conforming to the - // "db.cassandra.consistency_level" semantic conventions. It represents the - // consistency level of the query. Based on consistency values from - // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") - - // DBCassandraTableKey is the attribute Key conforming to the - // "db.cassandra.table" semantic conventions. It represents the name of the - // primary table that the operation is acting upon, including the keyspace - // name (if applicable). - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'mytable' - // Note: This mirrors the db.sql.table attribute but references cassandra - // rather than sql. It is not recommended to attempt any client-side - // parsing of `db.statement` just to get this property, but it should be - // set if it is provided by the library being instrumented. If the - // operation is acting upon an anonymous table, or more than one table, - // this value MUST NOT be set. - DBCassandraTableKey = attribute.Key("db.cassandra.table") - - // DBCassandraIdempotenceKey is the attribute Key conforming to the - // "db.cassandra.idempotence" semantic conventions. It represents the - // whether or not the query is idempotent. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") - - // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming - // to the "db.cassandra.speculative_execution_count" semantic conventions. - // It represents the number of times a query was speculatively executed. - // Not set or `0` if the query was not executed speculatively. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 0, 2 - DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") - - // DBCassandraCoordinatorIDKey is the attribute Key conforming to the - // "db.cassandra.coordinator.id" semantic conventions. It represents the ID - // of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' - DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") - - // DBCassandraCoordinatorDCKey is the attribute Key conforming to the - // "db.cassandra.coordinator.dc" semantic conventions. It represents the - // data center of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'us-west-2' - DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") -) - -var ( - // all - DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") - // each_quorum - DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") - // quorum - DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") - // local_quorum - DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") - // one - DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") - // two - DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") - // three - DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") - // local_one - DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") - // any - DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") - // serial - DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") - // local_serial - DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") -) - -// DBCassandraPageSize returns an attribute KeyValue conforming to the -// "db.cassandra.page_size" semantic conventions. It represents the fetch size -// used for paging, i.e. how many rows will be returned at once. -func DBCassandraPageSize(val int) attribute.KeyValue { - return DBCassandraPageSizeKey.Int(val) -} - -// DBCassandraTable returns an attribute KeyValue conforming to the -// "db.cassandra.table" semantic conventions. It represents the name of the -// primary table that the operation is acting upon, including the keyspace name -// (if applicable). -func DBCassandraTable(val string) attribute.KeyValue { - return DBCassandraTableKey.String(val) -} - -// DBCassandraIdempotence returns an attribute KeyValue conforming to the -// "db.cassandra.idempotence" semantic conventions. It represents the whether -// or not the query is idempotent. -func DBCassandraIdempotence(val bool) attribute.KeyValue { - return DBCassandraIdempotenceKey.Bool(val) -} - -// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue -// conforming to the "db.cassandra.speculative_execution_count" semantic -// conventions. It represents the number of times a query was speculatively -// executed. Not set or `0` if the query was not executed speculatively. -func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { - return DBCassandraSpeculativeExecutionCountKey.Int(val) -} - -// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of -// the coordinating node for a query. -func DBCassandraCoordinatorID(val string) attribute.KeyValue { - return DBCassandraCoordinatorIDKey.String(val) -} - -// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.dc" semantic conventions. It represents the data -// center of the coordinating node for a query. -func DBCassandraCoordinatorDC(val string) attribute.KeyValue { - return DBCassandraCoordinatorDCKey.String(val) -} - -// Call-level attributes for Redis -const ( - // DBRedisDBIndexKey is the attribute Key conforming to the - // "db.redis.database_index" semantic conventions. It represents the index - // of the database being accessed as used in the [`SELECT` - // command](https://redis.io/commands/select), provided as an integer. To - // be used instead of the generic `db.name` attribute. - // - // Type: int - // RequirementLevel: ConditionallyRequired (If other than the default - // database (`0`).) - // Stability: stable - // Examples: 0, 1, 15 - DBRedisDBIndexKey = attribute.Key("db.redis.database_index") -) - -// DBRedisDBIndex returns an attribute KeyValue conforming to the -// "db.redis.database_index" semantic conventions. It represents the index of -// the database being accessed as used in the [`SELECT` -// command](https://redis.io/commands/select), provided as an integer. To be -// used instead of the generic `db.name` attribute. -func DBRedisDBIndex(val int) attribute.KeyValue { - return DBRedisDBIndexKey.Int(val) -} - -// Call-level attributes for MongoDB -const ( - // DBMongoDBCollectionKey is the attribute Key conforming to the - // "db.mongodb.collection" semantic conventions. It represents the - // collection being accessed within the database stated in `db.name`. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'customers', 'products' - DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") -) - -// DBMongoDBCollection returns an attribute KeyValue conforming to the -// "db.mongodb.collection" semantic conventions. It represents the collection -// being accessed within the database stated in `db.name`. -func DBMongoDBCollection(val string) attribute.KeyValue { - return DBMongoDBCollectionKey.String(val) -} - -// Call-level attributes for SQL databases -const ( - // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" - // semantic conventions. It represents the name of the primary table that - // the operation is acting upon, including the database name (if - // applicable). - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'public.users', 'customers' - // Note: It is not recommended to attempt any client-side parsing of - // `db.statement` just to get this property, but it should be set if it is - // provided by the library being instrumented. If the operation is acting - // upon an anonymous table, or more than one table, this value MUST NOT be - // set. - DBSQLTableKey = attribute.Key("db.sql.table") -) - -// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" -// semantic conventions. It represents the name of the primary table that the -// operation is acting upon, including the database name (if applicable). -func DBSQLTable(val string) attribute.KeyValue { - return DBSQLTableKey.String(val) -} - -// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's -// concepts. -const ( - // OtelStatusCodeKey is the attribute Key conforming to the - // "otel.status_code" semantic conventions. It represents the name of the - // code, either "OK" or "ERROR". MUST NOT be set if the status code is - // UNSET. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - OtelStatusCodeKey = attribute.Key("otel.status_code") - - // OtelStatusDescriptionKey is the attribute Key conforming to the - // "otel.status_description" semantic conventions. It represents the - // description of the Status if it has a value, otherwise not set. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'resource not found' - OtelStatusDescriptionKey = attribute.Key("otel.status_description") -) - -var ( - // The operation has been validated by an Application developer or Operator to have completed successfully - OtelStatusCodeOk = OtelStatusCodeKey.String("OK") - // The operation contains an error - OtelStatusCodeError = OtelStatusCodeKey.String("ERROR") -) - -// OtelStatusDescription returns an attribute KeyValue conforming to the -// "otel.status_description" semantic conventions. It represents the -// description of the Status if it has a value, otherwise not set. -func OtelStatusDescription(val string) attribute.KeyValue { - return OtelStatusDescriptionKey.String(val) -} - -// This semantic convention describes an instance of a function that runs -// without provisioning or managing of servers (also known as serverless -// functions or Function as a Service (FaaS)) with spans. -const ( - // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" - // semantic conventions. It represents the type of the trigger which caused - // this function execution. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Note: For the server/consumer span on the incoming side, - // `faas.trigger` MUST be set. - // - // Clients invoking FaaS instances usually cannot set `faas.trigger`, - // since they would typically need to look in the payload to determine - // the event type. If clients set it, it should be the same as the - // trigger that corresponding incoming would have (i.e., this has - // nothing to do with the underlying transport used to make the API - // call to invoke the lambda, which is often HTTP). - FaaSTriggerKey = attribute.Key("faas.trigger") - - // FaaSExecutionKey is the attribute Key conforming to the "faas.execution" - // semantic conventions. It represents the execution ID of the current - // function execution. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' - FaaSExecutionKey = attribute.Key("faas.execution") -) - -var ( - // A response to some data source operation such as a database or filesystem read/write - FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") - // To provide an answer to an inbound HTTP request - FaaSTriggerHTTP = FaaSTriggerKey.String("http") - // A function is set to be executed when messages are sent to a messaging system - FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") - // A function is scheduled to be executed regularly - FaaSTriggerTimer = FaaSTriggerKey.String("timer") - // If none of the others apply - FaaSTriggerOther = FaaSTriggerKey.String("other") -) - -// FaaSExecution returns an attribute KeyValue conforming to the -// "faas.execution" semantic conventions. It represents the execution ID of the -// current function execution. -func FaaSExecution(val string) attribute.KeyValue { - return FaaSExecutionKey.String(val) -} - -// Semantic Convention for FaaS triggered as a response to some data source -// operation such as a database or filesystem read/write. -const ( - // FaaSDocumentCollectionKey is the attribute Key conforming to the - // "faas.document.collection" semantic conventions. It represents the name - // of the source on which the triggering operation was performed. For - // example, in Cloud Storage or S3 corresponds to the bucket name, and in - // Cosmos DB to the database name. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'myBucketName', 'myDBName' - FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") - - // FaaSDocumentOperationKey is the attribute Key conforming to the - // "faas.document.operation" semantic conventions. It represents the - // describes the type of the operation that was performed on the data. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - FaaSDocumentOperationKey = attribute.Key("faas.document.operation") - - // FaaSDocumentTimeKey is the attribute Key conforming to the - // "faas.document.time" semantic conventions. It represents a string - // containing the time when the data was accessed in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2020-01-23T13:47:06Z' - FaaSDocumentTimeKey = attribute.Key("faas.document.time") - - // FaaSDocumentNameKey is the attribute Key conforming to the - // "faas.document.name" semantic conventions. It represents the document - // name/table subjected to the operation. For example, in Cloud Storage or - // S3 is the name of the file, and in Cosmos DB the table name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'myFile.txt', 'myTableName' - FaaSDocumentNameKey = attribute.Key("faas.document.name") -) - -var ( - // When a new object is created - FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") - // When an object is modified - FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") - // When an object is deleted - FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") -) - -// FaaSDocumentCollection returns an attribute KeyValue conforming to the -// "faas.document.collection" semantic conventions. It represents the name of -// the source on which the triggering operation was performed. For example, in -// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the -// database name. -func FaaSDocumentCollection(val string) attribute.KeyValue { - return FaaSDocumentCollectionKey.String(val) -} - -// FaaSDocumentTime returns an attribute KeyValue conforming to the -// "faas.document.time" semantic conventions. It represents a string containing -// the time when the data was accessed in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSDocumentTime(val string) attribute.KeyValue { - return FaaSDocumentTimeKey.String(val) -} - -// FaaSDocumentName returns an attribute KeyValue conforming to the -// "faas.document.name" semantic conventions. It represents the document -// name/table subjected to the operation. For example, in Cloud Storage or S3 -// is the name of the file, and in Cosmos DB the table name. -func FaaSDocumentName(val string) attribute.KeyValue { - return FaaSDocumentNameKey.String(val) -} - -// Semantic Convention for FaaS scheduled to be executed regularly. -const ( - // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic - // conventions. It represents a string containing the function invocation - // time in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2020-01-23T13:47:06Z' - FaaSTimeKey = attribute.Key("faas.time") - - // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic - // conventions. It represents a string containing the schedule period as - // [Cron - // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '0/5 * * * ? *' - FaaSCronKey = attribute.Key("faas.cron") -) - -// FaaSTime returns an attribute KeyValue conforming to the "faas.time" -// semantic conventions. It represents a string containing the function -// invocation time in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSTime(val string) attribute.KeyValue { - return FaaSTimeKey.String(val) -} - -// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" -// semantic conventions. It represents a string containing the schedule period -// as [Cron -// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). -func FaaSCron(val string) attribute.KeyValue { - return FaaSCronKey.String(val) -} - -// Contains additional attributes for incoming FaaS spans. -const ( - // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" - // semantic conventions. It represents a boolean that is true if the - // serverless function is executed for the first time (aka cold-start). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - FaaSColdstartKey = attribute.Key("faas.coldstart") -) - -// FaaSColdstart returns an attribute KeyValue conforming to the -// "faas.coldstart" semantic conventions. It represents a boolean that is true -// if the serverless function is executed for the first time (aka cold-start). -func FaaSColdstart(val bool) attribute.KeyValue { - return FaaSColdstartKey.Bool(val) -} - -// Contains additional attributes for outgoing FaaS spans. -const ( - // FaaSInvokedNameKey is the attribute Key conforming to the - // "faas.invoked_name" semantic conventions. It represents the name of the - // invoked function. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'my-function' - // Note: SHOULD be equal to the `faas.name` resource attribute of the - // invoked function. - FaaSInvokedNameKey = attribute.Key("faas.invoked_name") - - // FaaSInvokedProviderKey is the attribute Key conforming to the - // "faas.invoked_provider" semantic conventions. It represents the cloud - // provider of the invoked function. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - // Note: SHOULD be equal to the `cloud.provider` resource attribute of the - // invoked function. - FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") - - // FaaSInvokedRegionKey is the attribute Key conforming to the - // "faas.invoked_region" semantic conventions. It represents the cloud - // region of the invoked function. - // - // Type: string - // RequirementLevel: ConditionallyRequired (For some cloud providers, like - // AWS or GCP, the region in which a function is hosted is essential to - // uniquely identify the function and also part of its endpoint. Since it's - // part of the endpoint being called, the region is always known to - // clients. In these cases, `faas.invoked_region` MUST be set accordingly. - // If the region is unknown to the client or not required for identifying - // the invoked function, setting `faas.invoked_region` is optional.) - // Stability: stable - // Examples: 'eu-central-1' - // Note: SHOULD be equal to the `cloud.region` resource attribute of the - // invoked function. - FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") -) - -var ( - // Alibaba Cloud - FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") - // Amazon Web Services - FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") - // Microsoft Azure - FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") - // Google Cloud Platform - FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") - // Tencent Cloud - FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") -) - -// FaaSInvokedName returns an attribute KeyValue conforming to the -// "faas.invoked_name" semantic conventions. It represents the name of the -// invoked function. -func FaaSInvokedName(val string) attribute.KeyValue { - return FaaSInvokedNameKey.String(val) -} - -// FaaSInvokedRegion returns an attribute KeyValue conforming to the -// "faas.invoked_region" semantic conventions. It represents the cloud region -// of the invoked function. -func FaaSInvokedRegion(val string) attribute.KeyValue { - return FaaSInvokedRegionKey.String(val) -} - -// These attributes may be used for any network related operation. -const ( - // NetTransportKey is the attribute Key conforming to the "net.transport" - // semantic conventions. It represents the transport protocol used. See - // note below. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - NetTransportKey = attribute.Key("net.transport") - - // NetAppProtocolNameKey is the attribute Key conforming to the - // "net.app.protocol.name" semantic conventions. It represents the - // application layer protocol used. The value SHOULD be normalized to - // lowercase. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'amqp', 'http', 'mqtt' - NetAppProtocolNameKey = attribute.Key("net.app.protocol.name") - - // NetAppProtocolVersionKey is the attribute Key conforming to the - // "net.app.protocol.version" semantic conventions. It represents the - // version of the application layer protocol used. See note below. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '3.1.1' - // Note: `net.app.protocol.version` refers to the version of the protocol - // used and might be different from the protocol client's version. If the - // HTTP client used has a version of `0.27.2`, but sends HTTP version - // `1.1`, this attribute should be set to `1.1`. - NetAppProtocolVersionKey = attribute.Key("net.app.protocol.version") - - // NetSockPeerNameKey is the attribute Key conforming to the - // "net.sock.peer.name" semantic conventions. It represents the remote - // socket peer name. - // - // Type: string - // RequirementLevel: Recommended (If available and different from - // `net.peer.name` and if `net.sock.peer.addr` is set.) - // Stability: stable - // Examples: 'proxy.example.com' - NetSockPeerNameKey = attribute.Key("net.sock.peer.name") - - // NetSockPeerAddrKey is the attribute Key conforming to the - // "net.sock.peer.addr" semantic conventions. It represents the remote - // socket peer address: IPv4 or IPv6 for internet protocols, path for local - // communication, - // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '127.0.0.1', '/tmp/mysql.sock' - NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") - - // NetSockPeerPortKey is the attribute Key conforming to the - // "net.sock.peer.port" semantic conventions. It represents the remote - // socket peer port. - // - // Type: int - // RequirementLevel: Recommended (If defined for the address family and if - // different than `net.peer.port` and if `net.sock.peer.addr` is set.) - // Stability: stable - // Examples: 16456 - NetSockPeerPortKey = attribute.Key("net.sock.peer.port") - - // NetSockFamilyKey is the attribute Key conforming to the - // "net.sock.family" semantic conventions. It represents the protocol - // [address - // family](https://man7.org/linux/man-pages/man7/address_families.7.html) - // which is used for communication. - // - // Type: Enum - // RequirementLevel: ConditionallyRequired (If different than `inet` and if - // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers - // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in - // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support - // instrumentations that follow previous versions of this document.) - // Stability: stable - // Examples: 'inet6', 'bluetooth' - NetSockFamilyKey = attribute.Key("net.sock.family") - - // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" - // semantic conventions. It represents the logical remote hostname, see - // note below. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'example.com' - // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an - // extra DNS lookup. - NetPeerNameKey = attribute.Key("net.peer.name") - - // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" - // semantic conventions. It represents the logical remote port number - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 80, 8080, 443 - NetPeerPortKey = attribute.Key("net.peer.port") - - // NetHostNameKey is the attribute Key conforming to the "net.host.name" - // semantic conventions. It represents the logical local hostname or - // similar, see note below. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'localhost' - NetHostNameKey = attribute.Key("net.host.name") - - // NetHostPortKey is the attribute Key conforming to the "net.host.port" - // semantic conventions. It represents the logical local port number, - // preferably the one that the peer used to connect - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 8080 - NetHostPortKey = attribute.Key("net.host.port") - - // NetSockHostAddrKey is the attribute Key conforming to the - // "net.sock.host.addr" semantic conventions. It represents the local - // socket address. Useful in case of a multi-IP host. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '192.168.0.1' - NetSockHostAddrKey = attribute.Key("net.sock.host.addr") - - // NetSockHostPortKey is the attribute Key conforming to the - // "net.sock.host.port" semantic conventions. It represents the local - // socket port number. - // - // Type: int - // RequirementLevel: Recommended (If defined for the address family and if - // different than `net.host.port` and if `net.sock.host.addr` is set.) - // Stability: stable - // Examples: 35555 - NetSockHostPortKey = attribute.Key("net.sock.host.port") - - // NetHostConnectionTypeKey is the attribute Key conforming to the - // "net.host.connection.type" semantic conventions. It represents the - // internet connection type currently being used by the host. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'wifi' - NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") - - // NetHostConnectionSubtypeKey is the attribute Key conforming to the - // "net.host.connection.subtype" semantic conventions. It represents the - // this describes more details regarding the connection.type. It may be the - // type of cell technology connection, but it could be used for describing - // details about a wifi connection. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'LTE' - NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") - - // NetHostCarrierNameKey is the attribute Key conforming to the - // "net.host.carrier.name" semantic conventions. It represents the name of - // the mobile carrier. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'sprint' - NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") - - // NetHostCarrierMccKey is the attribute Key conforming to the - // "net.host.carrier.mcc" semantic conventions. It represents the mobile - // carrier country code. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '310' - NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") - - // NetHostCarrierMncKey is the attribute Key conforming to the - // "net.host.carrier.mnc" semantic conventions. It represents the mobile - // carrier network code. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '001' - NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") - - // NetHostCarrierIccKey is the attribute Key conforming to the - // "net.host.carrier.icc" semantic conventions. It represents the ISO - // 3166-1 alpha-2 2-character country code associated with the mobile - // carrier network. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'DE' - NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") -) - -var ( - // ip_tcp - NetTransportTCP = NetTransportKey.String("ip_tcp") - // ip_udp - NetTransportUDP = NetTransportKey.String("ip_udp") - // Named or anonymous pipe. See note below - NetTransportPipe = NetTransportKey.String("pipe") - // In-process communication - NetTransportInProc = NetTransportKey.String("inproc") - // Something else (non IP-based) - NetTransportOther = NetTransportKey.String("other") -) - -var ( - // IPv4 address - NetSockFamilyInet = NetSockFamilyKey.String("inet") - // IPv6 address - NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") - // Unix domain socket path - NetSockFamilyUnix = NetSockFamilyKey.String("unix") -) - -var ( - // wifi - NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") - // wired - NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") - // cell - NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") - // unavailable - NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") - // unknown - NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") -) - -var ( - // GPRS - NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") - // EDGE - NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") - // UMTS - NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") - // CDMA - NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") - // EVDO Rel. 0 - NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") - // EVDO Rev. A - NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") - // CDMA2000 1XRTT - NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") - // HSDPA - NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") - // HSUPA - NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") - // HSPA - NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") - // IDEN - NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") - // EVDO Rev. B - NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") - // LTE - NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") - // EHRPD - NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") - // HSPAP - NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") - // GSM - NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") - // TD-SCDMA - NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") - // IWLAN - NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") - // 5G NR (New Radio) - NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") - // 5G NRNSA (New Radio Non-Standalone) - NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") - // LTE CA - NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") -) - -// NetAppProtocolName returns an attribute KeyValue conforming to the -// "net.app.protocol.name" semantic conventions. It represents the application -// layer protocol used. The value SHOULD be normalized to lowercase. -func NetAppProtocolName(val string) attribute.KeyValue { - return NetAppProtocolNameKey.String(val) -} - -// NetAppProtocolVersion returns an attribute KeyValue conforming to the -// "net.app.protocol.version" semantic conventions. It represents the version -// of the application layer protocol used. See note below. -func NetAppProtocolVersion(val string) attribute.KeyValue { - return NetAppProtocolVersionKey.String(val) -} - -// NetSockPeerName returns an attribute KeyValue conforming to the -// "net.sock.peer.name" semantic conventions. It represents the remote socket -// peer name. -func NetSockPeerName(val string) attribute.KeyValue { - return NetSockPeerNameKey.String(val) -} - -// NetSockPeerAddr returns an attribute KeyValue conforming to the -// "net.sock.peer.addr" semantic conventions. It represents the remote socket -// peer address: IPv4 or IPv6 for internet protocols, path for local -// communication, -// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). -func NetSockPeerAddr(val string) attribute.KeyValue { - return NetSockPeerAddrKey.String(val) -} - -// NetSockPeerPort returns an attribute KeyValue conforming to the -// "net.sock.peer.port" semantic conventions. It represents the remote socket -// peer port. -func NetSockPeerPort(val int) attribute.KeyValue { - return NetSockPeerPortKey.Int(val) -} - -// NetPeerName returns an attribute KeyValue conforming to the -// "net.peer.name" semantic conventions. It represents the logical remote -// hostname, see note below. -func NetPeerName(val string) attribute.KeyValue { - return NetPeerNameKey.String(val) -} - -// NetPeerPort returns an attribute KeyValue conforming to the -// "net.peer.port" semantic conventions. It represents the logical remote port -// number -func NetPeerPort(val int) attribute.KeyValue { - return NetPeerPortKey.Int(val) -} - -// NetHostName returns an attribute KeyValue conforming to the -// "net.host.name" semantic conventions. It represents the logical local -// hostname or similar, see note below. -func NetHostName(val string) attribute.KeyValue { - return NetHostNameKey.String(val) -} - -// NetHostPort returns an attribute KeyValue conforming to the -// "net.host.port" semantic conventions. It represents the logical local port -// number, preferably the one that the peer used to connect -func NetHostPort(val int) attribute.KeyValue { - return NetHostPortKey.Int(val) -} - -// NetSockHostAddr returns an attribute KeyValue conforming to the -// "net.sock.host.addr" semantic conventions. It represents the local socket -// address. Useful in case of a multi-IP host. -func NetSockHostAddr(val string) attribute.KeyValue { - return NetSockHostAddrKey.String(val) -} - -// NetSockHostPort returns an attribute KeyValue conforming to the -// "net.sock.host.port" semantic conventions. It represents the local socket -// port number. -func NetSockHostPort(val int) attribute.KeyValue { - return NetSockHostPortKey.Int(val) -} - -// NetHostCarrierName returns an attribute KeyValue conforming to the -// "net.host.carrier.name" semantic conventions. It represents the name of the -// mobile carrier. -func NetHostCarrierName(val string) attribute.KeyValue { - return NetHostCarrierNameKey.String(val) -} - -// NetHostCarrierMcc returns an attribute KeyValue conforming to the -// "net.host.carrier.mcc" semantic conventions. It represents the mobile -// carrier country code. -func NetHostCarrierMcc(val string) attribute.KeyValue { - return NetHostCarrierMccKey.String(val) -} - -// NetHostCarrierMnc returns an attribute KeyValue conforming to the -// "net.host.carrier.mnc" semantic conventions. It represents the mobile -// carrier network code. -func NetHostCarrierMnc(val string) attribute.KeyValue { - return NetHostCarrierMncKey.String(val) -} - -// NetHostCarrierIcc returns an attribute KeyValue conforming to the -// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1 -// alpha-2 2-character country code associated with the mobile carrier network. -func NetHostCarrierIcc(val string) attribute.KeyValue { - return NetHostCarrierIccKey.String(val) -} - -// Operations that access some remote service. -const ( - // PeerServiceKey is the attribute Key conforming to the "peer.service" - // semantic conventions. It represents the - // [`service.name`](../../resource/semantic_conventions/README.md#service) - // of the remote service. SHOULD be equal to the actual `service.name` - // resource attribute of the remote service if any. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'AuthTokenCache' - PeerServiceKey = attribute.Key("peer.service") -) - -// PeerService returns an attribute KeyValue conforming to the -// "peer.service" semantic conventions. It represents the -// [`service.name`](../../resource/semantic_conventions/README.md#service) of -// the remote service. SHOULD be equal to the actual `service.name` resource -// attribute of the remote service if any. -func PeerService(val string) attribute.KeyValue { - return PeerServiceKey.String(val) -} - -// These attributes may be used for any operation with an authenticated and/or -// authorized enduser. -const ( - // EnduserIDKey is the attribute Key conforming to the "enduser.id" - // semantic conventions. It represents the username or client_id extracted - // from the access token or - // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header - // in the inbound request from outside the system. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'username' - EnduserIDKey = attribute.Key("enduser.id") - - // EnduserRoleKey is the attribute Key conforming to the "enduser.role" - // semantic conventions. It represents the actual/assumed role the client - // is making the request under extracted from token or application security - // context. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'admin' - EnduserRoleKey = attribute.Key("enduser.role") - - // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" - // semantic conventions. It represents the scopes or granted authorities - // the client currently possesses extracted from token or application - // security context. The value would come from the scope associated with an - // [OAuth 2.0 Access - // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute - // value in a [SAML 2.0 - // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'read:message, write:files' - EnduserScopeKey = attribute.Key("enduser.scope") -) - -// EnduserID returns an attribute KeyValue conforming to the "enduser.id" -// semantic conventions. It represents the username or client_id extracted from -// the access token or -// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in -// the inbound request from outside the system. -func EnduserID(val string) attribute.KeyValue { - return EnduserIDKey.String(val) -} - -// EnduserRole returns an attribute KeyValue conforming to the -// "enduser.role" semantic conventions. It represents the actual/assumed role -// the client is making the request under extracted from token or application -// security context. -func EnduserRole(val string) attribute.KeyValue { - return EnduserRoleKey.String(val) -} - -// EnduserScope returns an attribute KeyValue conforming to the -// "enduser.scope" semantic conventions. It represents the scopes or granted -// authorities the client currently possesses extracted from token or -// application security context. The value would come from the scope associated -// with an [OAuth 2.0 Access -// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute -// value in a [SAML 2.0 -// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). -func EnduserScope(val string) attribute.KeyValue { - return EnduserScopeKey.String(val) -} - -// These attributes may be used for any operation to store information about a -// thread that started a span. -const ( - // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic - // conventions. It represents the current "managed" thread ID (as opposed - // to OS thread ID). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 42 - ThreadIDKey = attribute.Key("thread.id") - - // ThreadNameKey is the attribute Key conforming to the "thread.name" - // semantic conventions. It represents the current thread name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'main' - ThreadNameKey = attribute.Key("thread.name") -) - -// ThreadID returns an attribute KeyValue conforming to the "thread.id" -// semantic conventions. It represents the current "managed" thread ID (as -// opposed to OS thread ID). -func ThreadID(val int) attribute.KeyValue { - return ThreadIDKey.Int(val) -} - -// ThreadName returns an attribute KeyValue conforming to the "thread.name" -// semantic conventions. It represents the current thread name. -func ThreadName(val string) attribute.KeyValue { - return ThreadNameKey.String(val) -} - -// These attributes allow to report this unit of code and therefore to provide -// more context about the span. -const ( - // CodeFunctionKey is the attribute Key conforming to the "code.function" - // semantic conventions. It represents the method or function name, or - // equivalent (usually rightmost part of the code unit's name). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'serveRequest' - CodeFunctionKey = attribute.Key("code.function") - - // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" - // semantic conventions. It represents the "namespace" within which - // `code.function` is defined. Usually the qualified class or module name, - // such that `code.namespace` + some separator + `code.function` form a - // unique identifier for the code unit. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'com.example.MyHTTPService' - CodeNamespaceKey = attribute.Key("code.namespace") - - // CodeFilepathKey is the attribute Key conforming to the "code.filepath" - // semantic conventions. It represents the source code file name that - // identifies the code unit as uniquely as possible (preferably an absolute - // file path). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/usr/local/MyApplication/content_root/app/index.php' - CodeFilepathKey = attribute.Key("code.filepath") - - // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" - // semantic conventions. It represents the line number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 42 - CodeLineNumberKey = attribute.Key("code.lineno") - - // CodeColumnKey is the attribute Key conforming to the "code.column" - // semantic conventions. It represents the column number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 16 - CodeColumnKey = attribute.Key("code.column") -) - -// CodeFunction returns an attribute KeyValue conforming to the -// "code.function" semantic conventions. It represents the method or function -// name, or equivalent (usually rightmost part of the code unit's name). -func CodeFunction(val string) attribute.KeyValue { - return CodeFunctionKey.String(val) -} - -// CodeNamespace returns an attribute KeyValue conforming to the -// "code.namespace" semantic conventions. It represents the "namespace" within -// which `code.function` is defined. Usually the qualified class or module -// name, such that `code.namespace` + some separator + `code.function` form a -// unique identifier for the code unit. -func CodeNamespace(val string) attribute.KeyValue { - return CodeNamespaceKey.String(val) -} - -// CodeFilepath returns an attribute KeyValue conforming to the -// "code.filepath" semantic conventions. It represents the source code file -// name that identifies the code unit as uniquely as possible (preferably an -// absolute file path). -func CodeFilepath(val string) attribute.KeyValue { - return CodeFilepathKey.String(val) -} - -// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" -// semantic conventions. It represents the line number in `code.filepath` best -// representing the operation. It SHOULD point within the code unit named in -// `code.function`. -func CodeLineNumber(val int) attribute.KeyValue { - return CodeLineNumberKey.Int(val) -} - -// CodeColumn returns an attribute KeyValue conforming to the "code.column" -// semantic conventions. It represents the column number in `code.filepath` -// best representing the operation. It SHOULD point within the code unit named -// in `code.function`. -func CodeColumn(val int) attribute.KeyValue { - return CodeColumnKey.Int(val) -} - -// Semantic conventions for HTTP client and server Spans. -const ( - // HTTPMethodKey is the attribute Key conforming to the "http.method" - // semantic conventions. It represents the hTTP request method. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'GET', 'POST', 'HEAD' - HTTPMethodKey = attribute.Key("http.method") - - // HTTPStatusCodeKey is the attribute Key conforming to the - // "http.status_code" semantic conventions. It represents the [HTTP - // response status code](https://tools.ietf.org/html/rfc7231#section-6). - // - // Type: int - // RequirementLevel: ConditionallyRequired (If and only if one was - // received/sent.) - // Stability: stable - // Examples: 200 - HTTPStatusCodeKey = attribute.Key("http.status_code") - - // HTTPFlavorKey is the attribute Key conforming to the "http.flavor" - // semantic conventions. It represents the kind of HTTP protocol used. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Note: If `net.transport` is not specified, it can be assumed to be - // `IP.TCP` except if `http.flavor` is `QUIC`, in which case `IP.UDP` is - // assumed. - HTTPFlavorKey = attribute.Key("http.flavor") - - // HTTPUserAgentKey is the attribute Key conforming to the - // "http.user_agent" semantic conventions. It represents the value of the - // [HTTP - // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) - // header sent by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' - HTTPUserAgentKey = attribute.Key("http.user_agent") - - // HTTPRequestContentLengthKey is the attribute Key conforming to the - // "http.request_content_length" semantic conventions. It represents the - // size of the request payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 3495 - HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") - - // HTTPResponseContentLengthKey is the attribute Key conforming to the - // "http.response_content_length" semantic conventions. It represents the - // size of the response payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 3495 - HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") -) - -var ( - // HTTP/1.0 - HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") - // HTTP/1.1 - HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") - // HTTP/2 - HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") - // HTTP/3 - HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") - // SPDY protocol - HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") - // QUIC protocol - HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") -) - -// HTTPMethod returns an attribute KeyValue conforming to the "http.method" -// semantic conventions. It represents the hTTP request method. -func HTTPMethod(val string) attribute.KeyValue { - return HTTPMethodKey.String(val) -} - -// HTTPStatusCode returns an attribute KeyValue conforming to the -// "http.status_code" semantic conventions. It represents the [HTTP response -// status code](https://tools.ietf.org/html/rfc7231#section-6). -func HTTPStatusCode(val int) attribute.KeyValue { - return HTTPStatusCodeKey.Int(val) -} - -// HTTPUserAgent returns an attribute KeyValue conforming to the -// "http.user_agent" semantic conventions. It represents the value of the [HTTP -// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) -// header sent by the client. -func HTTPUserAgent(val string) attribute.KeyValue { - return HTTPUserAgentKey.String(val) -} - -// HTTPRequestContentLength returns an attribute KeyValue conforming to the -// "http.request_content_length" semantic conventions. It represents the size -// of the request payload body in bytes. This is the number of bytes -// transferred excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPRequestContentLength(val int) attribute.KeyValue { - return HTTPRequestContentLengthKey.Int(val) -} - -// HTTPResponseContentLength returns an attribute KeyValue conforming to the -// "http.response_content_length" semantic conventions. It represents the size -// of the response payload body in bytes. This is the number of bytes -// transferred excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPResponseContentLength(val int) attribute.KeyValue { - return HTTPResponseContentLengthKey.Int(val) -} - -// Semantic Convention for HTTP Client -const ( - // HTTPURLKey is the attribute Key conforming to the "http.url" semantic - // conventions. It represents the full HTTP request URL in the form - // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is - // not transmitted over HTTP, but if it is known, it should be included - // nevertheless. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' - // Note: `http.url` MUST NOT contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. In such case the - // attribute's value should be `https://www.example.com/`. - HTTPURLKey = attribute.Key("http.url") - - // HTTPResendCountKey is the attribute Key conforming to the - // "http.resend_count" semantic conventions. It represents the ordinal - // number of request resending attempt (for any reason, including - // redirects). - // - // Type: int - // RequirementLevel: Recommended (if and only if request was retried.) - // Stability: stable - // Examples: 3 - // Note: The resend count SHOULD be updated each time an HTTP request gets - // resent by the client, regardless of what was the cause of the resending - // (e.g. redirection, authorization failure, 503 Server Unavailable, - // network issues, or any other). - HTTPResendCountKey = attribute.Key("http.resend_count") -) - -// HTTPURL returns an attribute KeyValue conforming to the "http.url" -// semantic conventions. It represents the full HTTP request URL in the form -// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not -// transmitted over HTTP, but if it is known, it should be included -// nevertheless. -func HTTPURL(val string) attribute.KeyValue { - return HTTPURLKey.String(val) -} - -// HTTPResendCount returns an attribute KeyValue conforming to the -// "http.resend_count" semantic conventions. It represents the ordinal number -// of request resending attempt (for any reason, including redirects). -func HTTPResendCount(val int) attribute.KeyValue { - return HTTPResendCountKey.Int(val) -} - -// Semantic Convention for HTTP Server -const ( - // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" - // semantic conventions. It represents the URI scheme identifying the used - // protocol. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'http', 'https' - HTTPSchemeKey = attribute.Key("http.scheme") - - // HTTPTargetKey is the attribute Key conforming to the "http.target" - // semantic conventions. It represents the full request target as passed in - // a HTTP request line or equivalent. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: '/path/12314/?q=ddds' - HTTPTargetKey = attribute.Key("http.target") - - // HTTPRouteKey is the attribute Key conforming to the "http.route" - // semantic conventions. It represents the matched route (path template in - // the format used by the respective server framework). See note below - // - // Type: string - // RequirementLevel: ConditionallyRequired (If and only if it's available) - // Stability: stable - // Examples: '/users/:userID?', '{controller}/{action}/{id?}' - // Note: 'http.route' MUST NOT be populated when this is not supported by - // the HTTP server framework as the route attribute should have - // low-cardinality and the URI path can NOT substitute it. - HTTPRouteKey = attribute.Key("http.route") - - // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip" - // semantic conventions. It represents the IP address of the original - // client behind all proxies, if known (e.g. from - // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '83.164.160.102' - // Note: This is not necessarily the same as `net.sock.peer.addr`, which - // would - // identify the network-level peer, which may be a proxy. - // - // This attribute should be set when a source of information different - // from the one used for `net.sock.peer.addr`, is available even if that - // other - // source just confirms the same value as `net.sock.peer.addr`. - // Rationale: For `net.sock.peer.addr`, one typically does not know if it - // comes from a proxy, reverse proxy, or the actual client. Setting - // `http.client_ip` when it's the same as `net.sock.peer.addr` means that - // one is at least somewhat confident that the address is not that of - // the closest proxy. - HTTPClientIPKey = attribute.Key("http.client_ip") -) - -// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" -// semantic conventions. It represents the URI scheme identifying the used -// protocol. -func HTTPScheme(val string) attribute.KeyValue { - return HTTPSchemeKey.String(val) -} - -// HTTPTarget returns an attribute KeyValue conforming to the "http.target" -// semantic conventions. It represents the full request target as passed in a -// HTTP request line or equivalent. -func HTTPTarget(val string) attribute.KeyValue { - return HTTPTargetKey.String(val) -} - -// HTTPRoute returns an attribute KeyValue conforming to the "http.route" -// semantic conventions. It represents the matched route (path template in the -// format used by the respective server framework). See note below -func HTTPRoute(val string) attribute.KeyValue { - return HTTPRouteKey.String(val) -} - -// HTTPClientIP returns an attribute KeyValue conforming to the -// "http.client_ip" semantic conventions. It represents the IP address of the -// original client behind all proxies, if known (e.g. from -// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). -func HTTPClientIP(val string) attribute.KeyValue { - return HTTPClientIPKey.String(val) -} - -// Attributes that exist for multiple DynamoDB request types. -const ( - // AWSDynamoDBTableNamesKey is the attribute Key conforming to the - // "aws.dynamodb.table_names" semantic conventions. It represents the keys - // in the `RequestItems` object field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Users', 'Cats' - AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") - - // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the - // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the - // JSON-serialized value of each item in the `ConsumedCapacity` response - // field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { - // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number }, "TableName": "string", - // "WriteCapacityUnits": number }' - AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") - - // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to - // the "aws.dynamodb.item_collection_metrics" semantic conventions. It - // represents the JSON-serialized value of the `ItemCollectionMetrics` - // response field. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": - // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { - // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], - // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, - // "SizeEstimateRangeGB": [ number ] } ] }' - AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") - - // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to - // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It - // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` - // request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: stable - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") - - // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming - // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. - // It represents the value of the - // `ProvisionedThroughput.WriteCapacityUnits` request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: stable - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") - - // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the - // "aws.dynamodb.consistent_read" semantic conventions. It represents the - // value of the `ConsistentRead` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") - - // AWSDynamoDBProjectionKey is the attribute Key conforming to the - // "aws.dynamodb.projection" semantic conventions. It represents the value - // of the `ProjectionExpression` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Title', 'Title, Price, Color', 'Title, Description, - // RelatedItems, ProductReviews' - AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") - - // AWSDynamoDBLimitKey is the attribute Key conforming to the - // "aws.dynamodb.limit" semantic conventions. It represents the value of - // the `Limit` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 10 - AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") - - // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the - // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the - // value of the `AttributesToGet` request parameter. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: 'lives', 'id' - AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") - - // AWSDynamoDBIndexNameKey is the attribute Key conforming to the - // "aws.dynamodb.index_name" semantic conventions. It represents the value - // of the `IndexName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'name_to_group' - AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") - - // AWSDynamoDBSelectKey is the attribute Key conforming to the - // "aws.dynamodb.select" semantic conventions. It represents the value of - // the `Select` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'ALL_ATTRIBUTES', 'COUNT' - AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") -) - -// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_names" semantic conventions. It represents the keys in -// the `RequestItems` object field. -func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { - return AWSDynamoDBTableNamesKey.StringSlice(val) -} - -// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to -// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the -// JSON-serialized value of each item in the `ConsumedCapacity` response field. -func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { - return AWSDynamoDBConsumedCapacityKey.StringSlice(val) -} - -// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming -// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It -// represents the JSON-serialized value of the `ItemCollectionMetrics` response -// field. -func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { - return AWSDynamoDBItemCollectionMetricsKey.String(val) -} - -// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.ReadCapacityUnits` request parameter. -func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) -} - -// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.WriteCapacityUnits` request parameter. -func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) -} - -// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the -// "aws.dynamodb.consistent_read" semantic conventions. It represents the value -// of the `ConsistentRead` request parameter. -func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { - return AWSDynamoDBConsistentReadKey.Bool(val) -} - -// AWSDynamoDBProjection returns an attribute KeyValue conforming to the -// "aws.dynamodb.projection" semantic conventions. It represents the value of -// the `ProjectionExpression` request parameter. -func AWSDynamoDBProjection(val string) attribute.KeyValue { - return AWSDynamoDBProjectionKey.String(val) -} - -// AWSDynamoDBLimit returns an attribute KeyValue conforming to the -// "aws.dynamodb.limit" semantic conventions. It represents the value of the -// `Limit` request parameter. -func AWSDynamoDBLimit(val int) attribute.KeyValue { - return AWSDynamoDBLimitKey.Int(val) -} - -// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to -// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the -// value of the `AttributesToGet` request parameter. -func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributesToGetKey.StringSlice(val) -} - -// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the -// "aws.dynamodb.index_name" semantic conventions. It represents the value of -// the `IndexName` request parameter. -func AWSDynamoDBIndexName(val string) attribute.KeyValue { - return AWSDynamoDBIndexNameKey.String(val) -} - -// AWSDynamoDBSelect returns an attribute KeyValue conforming to the -// "aws.dynamodb.select" semantic conventions. It represents the value of the -// `Select` request parameter. -func AWSDynamoDBSelect(val string) attribute.KeyValue { - return AWSDynamoDBSelectKey.String(val) -} - -// DynamoDB.CreateTable -const ( - // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `GlobalSecondaryIndexes` request field - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": - // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ - // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { - // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") - - // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `LocalSecondaryIndexes` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: '{ "IndexARN": "string", "IndexName": "string", - // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' - AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") -) - -// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_indexes" semantic -// conventions. It represents the JSON-serialized value of each item of the -// `GlobalSecondaryIndexes` request field -func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) -} - -// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming -// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It -// represents the JSON-serialized value of each item of the -// `LocalSecondaryIndexes` request field. -func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) -} - -// DynamoDB.ListTables -const ( - // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the - // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents - // the value of the `ExclusiveStartTableName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Users', 'CatsTable' - AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") - - // AWSDynamoDBTableCountKey is the attribute Key conforming to the - // "aws.dynamodb.table_count" semantic conventions. It represents the the - // number of items in the `TableNames` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 20 - AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") -) - -// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming -// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It -// represents the value of the `ExclusiveStartTableName` request parameter. -func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { - return AWSDynamoDBExclusiveStartTableKey.String(val) -} - -// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_count" semantic conventions. It represents the the -// number of items in the `TableNames` response parameter. -func AWSDynamoDBTableCount(val int) attribute.KeyValue { - return AWSDynamoDBTableCountKey.Int(val) -} - -// DynamoDB.Query -const ( - // AWSDynamoDBScanForwardKey is the attribute Key conforming to the - // "aws.dynamodb.scan_forward" semantic conventions. It represents the - // value of the `ScanIndexForward` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") -) - -// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the -// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of -// the `ScanIndexForward` request parameter. -func AWSDynamoDBScanForward(val bool) attribute.KeyValue { - return AWSDynamoDBScanForwardKey.Bool(val) -} - -// DynamoDB.Scan -const ( - // AWSDynamoDBSegmentKey is the attribute Key conforming to the - // "aws.dynamodb.segment" semantic conventions. It represents the value of - // the `Segment` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 10 - AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") - - // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the - // "aws.dynamodb.total_segments" semantic conventions. It represents the - // value of the `TotalSegments` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 100 - AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") - - // AWSDynamoDBCountKey is the attribute Key conforming to the - // "aws.dynamodb.count" semantic conventions. It represents the value of - // the `Count` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 10 - AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") - - // AWSDynamoDBScannedCountKey is the attribute Key conforming to the - // "aws.dynamodb.scanned_count" semantic conventions. It represents the - // value of the `ScannedCount` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 50 - AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") -) - -// AWSDynamoDBSegment returns an attribute KeyValue conforming to the -// "aws.dynamodb.segment" semantic conventions. It represents the value of the -// `Segment` request parameter. -func AWSDynamoDBSegment(val int) attribute.KeyValue { - return AWSDynamoDBSegmentKey.Int(val) -} - -// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the -// "aws.dynamodb.total_segments" semantic conventions. It represents the value -// of the `TotalSegments` request parameter. -func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { - return AWSDynamoDBTotalSegmentsKey.Int(val) -} - -// AWSDynamoDBCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.count" semantic conventions. It represents the value of the -// `Count` response parameter. -func AWSDynamoDBCount(val int) attribute.KeyValue { - return AWSDynamoDBCountKey.Int(val) -} - -// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.scanned_count" semantic conventions. It represents the value -// of the `ScannedCount` response parameter. -func AWSDynamoDBScannedCount(val int) attribute.KeyValue { - return AWSDynamoDBScannedCountKey.Int(val) -} - -// DynamoDB.UpdateTable -const ( - // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to - // the "aws.dynamodb.attribute_definitions" semantic conventions. It - // represents the JSON-serialized value of each item in the - // `AttributeDefinitions` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' - AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") - - // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key - // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic - // conventions. It represents the JSON-serialized value of each item in the - // the `GlobalSecondaryIndexUpdates` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, - // "ProvisionedThroughput": { "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") -) - -// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming -// to the "aws.dynamodb.attribute_definitions" semantic conventions. It -// represents the JSON-serialized value of each item in the -// `AttributeDefinitions` request field. -func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) -} - -// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic -// conventions. It represents the JSON-serialized value of each item in the the -// `GlobalSecondaryIndexUpdates` request field. -func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) -} - -// Semantic conventions to apply when instrumenting the GraphQL implementation. -// They map GraphQL operations to attributes on a Span. -const ( - // GraphqlOperationNameKey is the attribute Key conforming to the - // "graphql.operation.name" semantic conventions. It represents the name of - // the operation being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'findBookByID' - GraphqlOperationNameKey = attribute.Key("graphql.operation.name") - - // GraphqlOperationTypeKey is the attribute Key conforming to the - // "graphql.operation.type" semantic conventions. It represents the type of - // the operation being executed. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'query', 'mutation', 'subscription' - GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") - - // GraphqlDocumentKey is the attribute Key conforming to the - // "graphql.document" semantic conventions. It represents the GraphQL - // document being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'query findBookByID { bookByID(id: ?) { name } }' - // Note: The value may be sanitized to exclude sensitive information. - GraphqlDocumentKey = attribute.Key("graphql.document") -) - -var ( - // GraphQL query - GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") - // GraphQL mutation - GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") - // GraphQL subscription - GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") -) - -// GraphqlOperationName returns an attribute KeyValue conforming to the -// "graphql.operation.name" semantic conventions. It represents the name of the -// operation being executed. -func GraphqlOperationName(val string) attribute.KeyValue { - return GraphqlOperationNameKey.String(val) -} - -// GraphqlDocument returns an attribute KeyValue conforming to the -// "graphql.document" semantic conventions. It represents the GraphQL document -// being executed. -func GraphqlDocument(val string) attribute.KeyValue { - return GraphqlDocumentKey.String(val) -} - -// Semantic convention describing per-message attributes populated on messaging -// spans or links. -const ( - // MessagingMessageIDKey is the attribute Key conforming to the - // "messaging.message.id" semantic conventions. It represents a value used - // by the messaging system as an identifier for the message, represented as - // a string. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '452a7c7c7c7048c2f887f61572b18fc2' - MessagingMessageIDKey = attribute.Key("messaging.message.id") - - // MessagingMessageConversationIDKey is the attribute Key conforming to the - // "messaging.message.conversation_id" semantic conventions. It represents - // the [conversation ID](#conversations) identifying the conversation to - // which the message belongs, represented as a string. Sometimes called - // "Correlation ID". - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'MyConversationID' - MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") - - // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to - // the "messaging.message.payload_size_bytes" semantic conventions. It - // represents the (uncompressed) size of the message payload in bytes. Also - // use this attribute if it is unknown whether the compressed or - // uncompressed payload size is reported. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 2738 - MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") - - // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key - // conforming to the "messaging.message.payload_compressed_size_bytes" - // semantic conventions. It represents the compressed size of the message - // payload in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 2048 - MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") -) - -// MessagingMessageID returns an attribute KeyValue conforming to the -// "messaging.message.id" semantic conventions. It represents a value used by -// the messaging system as an identifier for the message, represented as a -// string. -func MessagingMessageID(val string) attribute.KeyValue { - return MessagingMessageIDKey.String(val) -} - -// MessagingMessageConversationID returns an attribute KeyValue conforming -// to the "messaging.message.conversation_id" semantic conventions. It -// represents the [conversation ID](#conversations) identifying the -// conversation to which the message belongs, represented as a string. -// Sometimes called "Correlation ID". -func MessagingMessageConversationID(val string) attribute.KeyValue { - return MessagingMessageConversationIDKey.String(val) -} - -// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming -// to the "messaging.message.payload_size_bytes" semantic conventions. It -// represents the (uncompressed) size of the message payload in bytes. Also use -// this attribute if it is unknown whether the compressed or uncompressed -// payload size is reported. -func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue { - return MessagingMessagePayloadSizeBytesKey.Int(val) -} - -// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue -// conforming to the "messaging.message.payload_compressed_size_bytes" semantic -// conventions. It represents the compressed size of the message payload in -// bytes. -func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue { - return MessagingMessagePayloadCompressedSizeBytesKey.Int(val) -} - -// Semantic convention for attributes that describe messaging destination on -// broker -const ( - // MessagingDestinationNameKey is the attribute Key conforming to the - // "messaging.destination.name" semantic conventions. It represents the - // message destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'MyQueue', 'MyTopic' - // Note: Destination name SHOULD uniquely identify a specific queue, topic - // or other entity within the broker. If - // the broker does not have such notion, the destination name SHOULD - // uniquely identify the broker. - MessagingDestinationNameKey = attribute.Key("messaging.destination.name") - - // MessagingDestinationKindKey is the attribute Key conforming to the - // "messaging.destination.kind" semantic conventions. It represents the - // kind of message destination - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - MessagingDestinationKindKey = attribute.Key("messaging.destination.kind") - - // MessagingDestinationTemplateKey is the attribute Key conforming to the - // "messaging.destination.template" semantic conventions. It represents the - // low cardinality representation of the messaging destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/customers/{customerID}' - // Note: Destination names could be constructed from templates. An example - // would be a destination name involving a user name or product id. - // Although the destination name in this case is of high cardinality, the - // underlying template is of low cardinality and can be effectively used - // for grouping and aggregation. - MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") - - // MessagingDestinationTemporaryKey is the attribute Key conforming to the - // "messaging.destination.temporary" semantic conventions. It represents a - // boolean that is true if the message destination is temporary and might - // not exist anymore after messages are processed. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") - - // MessagingDestinationAnonymousKey is the attribute Key conforming to the - // "messaging.destination.anonymous" semantic conventions. It represents a - // boolean that is true if the message destination is anonymous (could be - // unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") -) - -var ( - // A message sent to a queue - MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") - // A message sent to a topic - MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") -) - -// MessagingDestinationName returns an attribute KeyValue conforming to the -// "messaging.destination.name" semantic conventions. It represents the message -// destination name -func MessagingDestinationName(val string) attribute.KeyValue { - return MessagingDestinationNameKey.String(val) -} - -// MessagingDestinationTemplate returns an attribute KeyValue conforming to -// the "messaging.destination.template" semantic conventions. It represents the -// low cardinality representation of the messaging destination name -func MessagingDestinationTemplate(val string) attribute.KeyValue { - return MessagingDestinationTemplateKey.String(val) -} - -// MessagingDestinationTemporary returns an attribute KeyValue conforming to -// the "messaging.destination.temporary" semantic conventions. It represents a -// boolean that is true if the message destination is temporary and might not -// exist anymore after messages are processed. -func MessagingDestinationTemporary(val bool) attribute.KeyValue { - return MessagingDestinationTemporaryKey.Bool(val) -} - -// MessagingDestinationAnonymous returns an attribute KeyValue conforming to -// the "messaging.destination.anonymous" semantic conventions. It represents a -// boolean that is true if the message destination is anonymous (could be -// unnamed or have auto-generated name). -func MessagingDestinationAnonymous(val bool) attribute.KeyValue { - return MessagingDestinationAnonymousKey.Bool(val) -} - -// Semantic convention for attributes that describe messaging source on broker -const ( - // MessagingSourceNameKey is the attribute Key conforming to the - // "messaging.source.name" semantic conventions. It represents the message - // source name - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'MyQueue', 'MyTopic' - // Note: Source name SHOULD uniquely identify a specific queue, topic, or - // other entity within the broker. If - // the broker does not have such notion, the source name SHOULD uniquely - // identify the broker. - MessagingSourceNameKey = attribute.Key("messaging.source.name") - - // MessagingSourceKindKey is the attribute Key conforming to the - // "messaging.source.kind" semantic conventions. It represents the kind of - // message source - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - MessagingSourceKindKey = attribute.Key("messaging.source.kind") - - // MessagingSourceTemplateKey is the attribute Key conforming to the - // "messaging.source.template" semantic conventions. It represents the low - // cardinality representation of the messaging source name - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/customers/{customerID}' - // Note: Source names could be constructed from templates. An example would - // be a source name involving a user name or product id. Although the - // source name in this case is of high cardinality, the underlying template - // is of low cardinality and can be effectively used for grouping and - // aggregation. - MessagingSourceTemplateKey = attribute.Key("messaging.source.template") - - // MessagingSourceTemporaryKey is the attribute Key conforming to the - // "messaging.source.temporary" semantic conventions. It represents a - // boolean that is true if the message source is temporary and might not - // exist anymore after messages are processed. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary") - - // MessagingSourceAnonymousKey is the attribute Key conforming to the - // "messaging.source.anonymous" semantic conventions. It represents a - // boolean that is true if the message source is anonymous (could be - // unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous") -) - -var ( - // A message received from a queue - MessagingSourceKindQueue = MessagingSourceKindKey.String("queue") - // A message received from a topic - MessagingSourceKindTopic = MessagingSourceKindKey.String("topic") -) - -// MessagingSourceName returns an attribute KeyValue conforming to the -// "messaging.source.name" semantic conventions. It represents the message -// source name -func MessagingSourceName(val string) attribute.KeyValue { - return MessagingSourceNameKey.String(val) -} - -// MessagingSourceTemplate returns an attribute KeyValue conforming to the -// "messaging.source.template" semantic conventions. It represents the low -// cardinality representation of the messaging source name -func MessagingSourceTemplate(val string) attribute.KeyValue { - return MessagingSourceTemplateKey.String(val) -} - -// MessagingSourceTemporary returns an attribute KeyValue conforming to the -// "messaging.source.temporary" semantic conventions. It represents a boolean -// that is true if the message source is temporary and might not exist anymore -// after messages are processed. -func MessagingSourceTemporary(val bool) attribute.KeyValue { - return MessagingSourceTemporaryKey.Bool(val) -} - -// MessagingSourceAnonymous returns an attribute KeyValue conforming to the -// "messaging.source.anonymous" semantic conventions. It represents a boolean -// that is true if the message source is anonymous (could be unnamed or have -// auto-generated name). -func MessagingSourceAnonymous(val bool) attribute.KeyValue { - return MessagingSourceAnonymousKey.Bool(val) -} - -// General attributes used in messaging systems. -const ( - // MessagingSystemKey is the attribute Key conforming to the - // "messaging.system" semantic conventions. It represents a string - // identifying the messaging system. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' - MessagingSystemKey = attribute.Key("messaging.system") - - // MessagingOperationKey is the attribute Key conforming to the - // "messaging.operation" semantic conventions. It represents a string - // identifying the kind of messaging operation as defined in the [Operation - // names](#operation-names) section above. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - // Note: If a custom value is used, it MUST be of low cardinality. - MessagingOperationKey = attribute.Key("messaging.operation") - - // MessagingBatchMessageCountKey is the attribute Key conforming to the - // "messaging.batch.message_count" semantic conventions. It represents the - // number of messages sent, received, or processed in the scope of the - // batching operation. - // - // Type: int - // RequirementLevel: ConditionallyRequired (If the span describes an - // operation on a batch of messages.) - // Stability: stable - // Examples: 0, 1, 2 - // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on - // spans that operate with a single message. When a messaging client - // library supports both batch and single-message API for the same - // operation, instrumentations SHOULD use `messaging.batch.message_count` - // for batching APIs and SHOULD NOT use it for single-message APIs. - MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") -) - -var ( - // publish - MessagingOperationPublish = MessagingOperationKey.String("publish") - // receive - MessagingOperationReceive = MessagingOperationKey.String("receive") - // process - MessagingOperationProcess = MessagingOperationKey.String("process") -) - -// MessagingSystem returns an attribute KeyValue conforming to the -// "messaging.system" semantic conventions. It represents a string identifying -// the messaging system. -func MessagingSystem(val string) attribute.KeyValue { - return MessagingSystemKey.String(val) -} - -// MessagingBatchMessageCount returns an attribute KeyValue conforming to -// the "messaging.batch.message_count" semantic conventions. It represents the -// number of messages sent, received, or processed in the scope of the batching -// operation. -func MessagingBatchMessageCount(val int) attribute.KeyValue { - return MessagingBatchMessageCountKey.Int(val) -} - -// Semantic convention for a consumer of messages received from a messaging -// system -const ( - // MessagingConsumerIDKey is the attribute Key conforming to the - // "messaging.consumer.id" semantic conventions. It represents the - // identifier for the consumer receiving a message. For Kafka, set it to - // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if - // both are present, or only `messaging.kafka.consumer.group`. For brokers, - // such as RabbitMQ and Artemis, set it to the `client_id` of the client - // consuming the message. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'mygroup - client-6' - MessagingConsumerIDKey = attribute.Key("messaging.consumer.id") -) - -// MessagingConsumerID returns an attribute KeyValue conforming to the -// "messaging.consumer.id" semantic conventions. It represents the identifier -// for the consumer receiving a message. For Kafka, set it to -// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both -// are present, or only `messaging.kafka.consumer.group`. For brokers, such as -// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the -// message. -func MessagingConsumerID(val string) attribute.KeyValue { - return MessagingConsumerIDKey.String(val) -} - -// Attributes for RabbitMQ -const ( - // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key - // conforming to the "messaging.rabbitmq.destination.routing_key" semantic - // conventions. It represents the rabbitMQ message routing key. - // - // Type: string - // RequirementLevel: ConditionallyRequired (If not empty.) - // Stability: stable - // Examples: 'myKey' - MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") -) - -// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue -// conforming to the "messaging.rabbitmq.destination.routing_key" semantic -// conventions. It represents the rabbitMQ message routing key. -func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { - return MessagingRabbitmqDestinationRoutingKeyKey.String(val) -} - -// Attributes for Apache Kafka -const ( - // MessagingKafkaMessageKeyKey is the attribute Key conforming to the - // "messaging.kafka.message.key" semantic conventions. It represents the - // message keys in Kafka are used for grouping alike messages to ensure - // they're processed on the same partition. They differ from - // `messaging.message.id` in that they're not unique. If the key is `null`, - // the attribute MUST NOT be set. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'myKey' - // Note: If the key type is not string, it's string representation has to - // be supplied for the attribute. If the key has no unambiguous, canonical - // string form, don't include its value. - MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") - - // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the - // "messaging.kafka.consumer.group" semantic conventions. It represents the - // name of the Kafka Consumer Group that is handling the message. Only - // applies to consumers, not producers. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'my-group' - MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") - - // MessagingKafkaClientIDKey is the attribute Key conforming to the - // "messaging.kafka.client_id" semantic conventions. It represents the - // client ID for the Consumer or Producer that is handling the message. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'client-5' - MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") - - // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to - // the "messaging.kafka.destination.partition" semantic conventions. It - // represents the partition the message is sent to. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 2 - MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") - - // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the - // "messaging.kafka.source.partition" semantic conventions. It represents - // the partition the message is received from. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 2 - MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition") - - // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the - // "messaging.kafka.message.offset" semantic conventions. It represents the - // offset of a record in the corresponding Kafka partition. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 42 - MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") - - // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the - // "messaging.kafka.message.tombstone" semantic conventions. It represents - // a boolean that is true if the message is a tombstone. - // - // Type: boolean - // RequirementLevel: ConditionallyRequired (If value is `true`. When - // missing, the value is assumed to be `false`.) - // Stability: stable - MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") -) - -// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the -// "messaging.kafka.message.key" semantic conventions. It represents the -// message keys in Kafka are used for grouping alike messages to ensure they're -// processed on the same partition. They differ from `messaging.message.id` in -// that they're not unique. If the key is `null`, the attribute MUST NOT be -// set. -func MessagingKafkaMessageKey(val string) attribute.KeyValue { - return MessagingKafkaMessageKeyKey.String(val) -} - -// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to -// the "messaging.kafka.consumer.group" semantic conventions. It represents the -// name of the Kafka Consumer Group that is handling the message. Only applies -// to consumers, not producers. -func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { - return MessagingKafkaConsumerGroupKey.String(val) -} - -// MessagingKafkaClientID returns an attribute KeyValue conforming to the -// "messaging.kafka.client_id" semantic conventions. It represents the client -// ID for the Consumer or Producer that is handling the message. -func MessagingKafkaClientID(val string) attribute.KeyValue { - return MessagingKafkaClientIDKey.String(val) -} - -// MessagingKafkaDestinationPartition returns an attribute KeyValue -// conforming to the "messaging.kafka.destination.partition" semantic -// conventions. It represents the partition the message is sent to. -func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { - return MessagingKafkaDestinationPartitionKey.Int(val) -} - -// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to -// the "messaging.kafka.source.partition" semantic conventions. It represents -// the partition the message is received from. -func MessagingKafkaSourcePartition(val int) attribute.KeyValue { - return MessagingKafkaSourcePartitionKey.Int(val) -} - -// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to -// the "messaging.kafka.message.offset" semantic conventions. It represents the -// offset of a record in the corresponding Kafka partition. -func MessagingKafkaMessageOffset(val int) attribute.KeyValue { - return MessagingKafkaMessageOffsetKey.Int(val) -} - -// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming -// to the "messaging.kafka.message.tombstone" semantic conventions. It -// represents a boolean that is true if the message is a tombstone. -func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { - return MessagingKafkaMessageTombstoneKey.Bool(val) -} - -// Attributes for Apache RocketMQ -const ( - // MessagingRocketmqNamespaceKey is the attribute Key conforming to the - // "messaging.rocketmq.namespace" semantic conventions. It represents the - // namespace of RocketMQ resources, resources in different namespaces are - // individual. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'myNamespace' - MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") - - // MessagingRocketmqClientGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.client_group" semantic conventions. It represents - // the name of the RocketMQ producer/consumer group that is handling the - // message. The client type is identified by the SpanKind. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'myConsumerGroup' - MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") - - // MessagingRocketmqClientIDKey is the attribute Key conforming to the - // "messaging.rocketmq.client_id" semantic conventions. It represents the - // unique identifier for each client. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'myhost@8742@s8083jm' - MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") - - // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delivery_timestamp" - // semantic conventions. It represents the timestamp in milliseconds that - // the delay message is expected to be delivered to consumer. - // - // Type: int - // RequirementLevel: ConditionallyRequired (If the message type is delay - // and delay time level is not specified.) - // Stability: stable - // Examples: 1665987217045 - MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") - - // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delay_time_level" semantic - // conventions. It represents the delay time level for delay message, which - // determines the message delay time. - // - // Type: int - // RequirementLevel: ConditionallyRequired (If the message type is delay - // and delivery timestamp is not specified.) - // Stability: stable - // Examples: 3 - MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") - - // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.message.group" semantic conventions. It represents - // the it is essential for FIFO message. Messages that belong to the same - // message group are always processed one by one within the same consumer - // group. - // - // Type: string - // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) - // Stability: stable - // Examples: 'myMessageGroup' - MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") - - // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the - // "messaging.rocketmq.message.type" semantic conventions. It represents - // the type of message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") - - // MessagingRocketmqMessageTagKey is the attribute Key conforming to the - // "messaging.rocketmq.message.tag" semantic conventions. It represents the - // secondary classifier of message besides topic. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'tagA' - MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") - - // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the - // "messaging.rocketmq.message.keys" semantic conventions. It represents - // the key(s) of message, another way to mark message besides message id. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: 'keyA', 'keyB' - MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") - - // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to - // the "messaging.rocketmq.consumption_model" semantic conventions. It - // represents the model of message consumption. This only applies to - // consumer spans. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") -) - -var ( - // Normal message - MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") - // FIFO message - MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") - // Delay message - MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") - // Transaction message - MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") -) - -var ( - // Clustering consumption model - MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") - // Broadcasting consumption model - MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") -) - -// MessagingRocketmqNamespace returns an attribute KeyValue conforming to -// the "messaging.rocketmq.namespace" semantic conventions. It represents the -// namespace of RocketMQ resources, resources in different namespaces are -// individual. -func MessagingRocketmqNamespace(val string) attribute.KeyValue { - return MessagingRocketmqNamespaceKey.String(val) -} - -// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.client_group" semantic conventions. It represents -// the name of the RocketMQ producer/consumer group that is handling the -// message. The client type is identified by the SpanKind. -func MessagingRocketmqClientGroup(val string) attribute.KeyValue { - return MessagingRocketmqClientGroupKey.String(val) -} - -// MessagingRocketmqClientID returns an attribute KeyValue conforming to the -// "messaging.rocketmq.client_id" semantic conventions. It represents the -// unique identifier for each client. -func MessagingRocketmqClientID(val string) attribute.KeyValue { - return MessagingRocketmqClientIDKey.String(val) -} - -// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic -// conventions. It represents the timestamp in milliseconds that the delay -// message is expected to be delivered to consumer. -func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { - return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) -} - -// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delay_time_level" semantic -// conventions. It represents the delay time level for delay message, which -// determines the message delay time. -func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { - return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) -} - -// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.group" semantic conventions. It represents -// the it is essential for FIFO message. Messages that belong to the same -// message group are always processed one by one within the same consumer -// group. -func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { - return MessagingRocketmqMessageGroupKey.String(val) -} - -// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.tag" semantic conventions. It represents the -// secondary classifier of message besides topic. -func MessagingRocketmqMessageTag(val string) attribute.KeyValue { - return MessagingRocketmqMessageTagKey.String(val) -} - -// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.keys" semantic conventions. It represents -// the key(s) of message, another way to mark message besides message id. -func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { - return MessagingRocketmqMessageKeysKey.StringSlice(val) -} - -// Semantic conventions for remote procedure calls. -const ( - // RPCSystemKey is the attribute Key conforming to the "rpc.system" - // semantic conventions. It represents a string identifying the remoting - // system. See below for a list of well-known identifiers. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - RPCSystemKey = attribute.Key("rpc.system") - - // RPCServiceKey is the attribute Key conforming to the "rpc.service" - // semantic conventions. It represents the full (logical) name of the - // service being called, including its package name, if applicable. - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'myservice.EchoService' - // Note: This is the logical name of the service from the RPC interface - // perspective, which can be different from the name of any implementing - // class. The `code.namespace` attribute may be used to store the latter - // (despite the attribute name, it may include a class name; e.g., class - // with method actually executing the call on the server side, RPC client - // stub class on the client side). - RPCServiceKey = attribute.Key("rpc.service") - - // RPCMethodKey is the attribute Key conforming to the "rpc.method" - // semantic conventions. It represents the name of the (logical) method - // being called, must be equal to the $method part in the span name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'exampleMethod' - // Note: This is the logical name of the method from the RPC interface - // perspective, which can be different from the name of any implementing - // method/function. The `code.function` attribute may be used to store the - // latter (e.g., method actually executing the call on the server side, RPC - // client stub method on the client side). - RPCMethodKey = attribute.Key("rpc.method") -) - -var ( - // gRPC - RPCSystemGRPC = RPCSystemKey.String("grpc") - // Java RMI - RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") - // .NET WCF - RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") - // Apache Dubbo - RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") -) - -// RPCService returns an attribute KeyValue conforming to the "rpc.service" -// semantic conventions. It represents the full (logical) name of the service -// being called, including its package name, if applicable. -func RPCService(val string) attribute.KeyValue { - return RPCServiceKey.String(val) -} - -// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" -// semantic conventions. It represents the name of the (logical) method being -// called, must be equal to the $method part in the span name. -func RPCMethod(val string) attribute.KeyValue { - return RPCMethodKey.String(val) -} - -// Tech-specific attributes for gRPC. -const ( - // RPCGRPCStatusCodeKey is the attribute Key conforming to the - // "rpc.grpc.status_code" semantic conventions. It represents the [numeric - // status - // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of - // the gRPC request. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") -) - -var ( - // OK - RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) - // CANCELLED - RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) - // UNKNOWN - RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) - // INVALID_ARGUMENT - RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) - // DEADLINE_EXCEEDED - RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) - // NOT_FOUND - RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) - // ALREADY_EXISTS - RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) - // PERMISSION_DENIED - RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) - // RESOURCE_EXHAUSTED - RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) - // FAILED_PRECONDITION - RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) - // ABORTED - RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) - // OUT_OF_RANGE - RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) - // UNIMPLEMENTED - RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) - // INTERNAL - RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) - // UNAVAILABLE - RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) - // DATA_LOSS - RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) - // UNAUTHENTICATED - RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) -) - -// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). -const ( - // RPCJsonrpcVersionKey is the attribute Key conforming to the - // "rpc.jsonrpc.version" semantic conventions. It represents the protocol - // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 - // does not specify this, the value can be omitted. - // - // Type: string - // RequirementLevel: ConditionallyRequired (If other than the default - // version (`1.0`)) - // Stability: stable - // Examples: '2.0', '1.0' - RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") - - // RPCJsonrpcRequestIDKey is the attribute Key conforming to the - // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` - // property of request or response. Since protocol allows id to be int, - // string, `null` or missing (for notifications), value is expected to be - // cast to string for simplicity. Use empty string in case of `null` value. - // Omit entirely if this is a notification. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '10', 'request-7', '' - RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") - - // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_code" semantic conventions. It represents the - // `error.code` property of response if it is an error response. - // - // Type: int - // RequirementLevel: ConditionallyRequired (If response is not successful.) - // Stability: stable - // Examples: -32700, 100 - RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") - - // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_message" semantic conventions. It represents the - // `error.message` property of response if it is an error response. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Parse error', 'User already exists' - RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") -) - -// RPCJsonrpcVersion returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.version" semantic conventions. It represents the protocol -// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 -// does not specify this, the value can be omitted. -func RPCJsonrpcVersion(val string) attribute.KeyValue { - return RPCJsonrpcVersionKey.String(val) -} - -// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` -// property of request or response. Since protocol allows id to be int, string, -// `null` or missing (for notifications), value is expected to be cast to -// string for simplicity. Use empty string in case of `null` value. Omit -// entirely if this is a notification. -func RPCJsonrpcRequestID(val string) attribute.KeyValue { - return RPCJsonrpcRequestIDKey.String(val) -} - -// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_code" semantic conventions. It represents the -// `error.code` property of response if it is an error response. -func RPCJsonrpcErrorCode(val int) attribute.KeyValue { - return RPCJsonrpcErrorCodeKey.Int(val) -} - -// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_message" semantic conventions. It represents the -// `error.message` property of response if it is an error response. -func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { - return RPCJsonrpcErrorMessageKey.String(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/doc.go deleted file mode 100644 index c0b1723f..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package semconv implements OpenTelemetry semantic conventions. -// -// OpenTelemetry semantic conventions are agreed standardized naming -// patterns for OpenTelemetry things. This package represents the conventions -// as of the v1.4.0 version of the OpenTelemetry specification. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.4.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/exception.go deleted file mode 100644 index 311cbf21..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/exception.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.4.0" - -const ( - // ExceptionEventName is the name of the Span event representing an exception. - ExceptionEventName = "exception" -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/http.go deleted file mode 100644 index 8d814edc..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/http.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.4.0" - -import ( - "net/http" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/semconv/internal" - "go.opentelemetry.io/otel/trace" -) - -// HTTP scheme attributes. -var ( - HTTPSchemeHTTP = HTTPSchemeKey.String("http") - HTTPSchemeHTTPS = HTTPSchemeKey.String("https") -) - -var sc = &internal.SemanticConventions{ - EnduserIDKey: EnduserIDKey, - HTTPClientIPKey: HTTPClientIPKey, - HTTPFlavorKey: HTTPFlavorKey, - HTTPHostKey: HTTPHostKey, - HTTPMethodKey: HTTPMethodKey, - HTTPRequestContentLengthKey: HTTPRequestContentLengthKey, - HTTPRouteKey: HTTPRouteKey, - HTTPSchemeHTTP: HTTPSchemeHTTP, - HTTPSchemeHTTPS: HTTPSchemeHTTPS, - HTTPServerNameKey: HTTPServerNameKey, - HTTPStatusCodeKey: HTTPStatusCodeKey, - HTTPTargetKey: HTTPTargetKey, - HTTPURLKey: HTTPURLKey, - HTTPUserAgentKey: HTTPUserAgentKey, - NetHostIPKey: NetHostIPKey, - NetHostNameKey: NetHostNameKey, - NetHostPortKey: NetHostPortKey, - NetPeerIPKey: NetPeerIPKey, - NetPeerNameKey: NetPeerNameKey, - NetPeerPortKey: NetPeerPortKey, - NetTransportIP: NetTransportIP, - NetTransportOther: NetTransportOther, - NetTransportTCP: NetTransportTCP, - NetTransportUDP: NetTransportUDP, - NetTransportUnix: NetTransportUnix, -} - -// NetAttributesFromHTTPRequest generates attributes of the net -// namespace as specified by the OpenTelemetry specification for a -// span. The network parameter is a string that net.Dial function -// from standard library can understand. -func NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue { - return sc.NetAttributesFromHTTPRequest(network, request) -} - -// EndUserAttributesFromHTTPRequest generates attributes of the -// enduser namespace as specified by the OpenTelemetry specification -// for a span. -func EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - return sc.EndUserAttributesFromHTTPRequest(request) -} - -// HTTPClientAttributesFromHTTPRequest generates attributes of the -// http namespace as specified by the OpenTelemetry specification for -// a span on the client side. -func HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - return sc.HTTPClientAttributesFromHTTPRequest(request) -} - -// HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes -// to be used with server-side HTTP metrics. -func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue { - return sc.HTTPServerMetricAttributesFromHTTPRequest(serverName, request) -} - -// HTTPServerAttributesFromHTTPRequest generates attributes of the -// http namespace as specified by the OpenTelemetry specification for -// a span on the server side. Currently, only basic authentication is -// supported. -func HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue { - return sc.HTTPServerAttributesFromHTTPRequest(serverName, route, request) -} - -// HTTPAttributesFromHTTPStatusCode generates attributes of the http -// namespace as specified by the OpenTelemetry specification for a -// span. -func HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue { - return sc.HTTPAttributesFromHTTPStatusCode(code) -} - -// SpanStatusFromHTTPStatusCode generates a status code and a message -// as specified by the OpenTelemetry specification for a span. -func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) { - return internal.SpanStatusFromHTTPStatusCode(code) -} - -// SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message -// as specified by the OpenTelemetry specification for a span. -// Exclude 4xx for SERVER to set the appropriate status. -func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) { - return internal.SpanStatusFromHTTPStatusCodeAndSpanKind(code, spanKind) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/resource.go deleted file mode 100644 index 404bd4e7..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/resource.go +++ /dev/null @@ -1,906 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.4.0" - -import "go.opentelemetry.io/otel/attribute" - -// A cloud environment (e.g. GCP, Azure, AWS) -const ( - // Name of the cloud provider. - // - // Type: Enum - // Required: No - // Stability: stable - // Examples: 'gcp' - CloudProviderKey = attribute.Key("cloud.provider") - // The cloud account ID the resource is assigned to. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '111111111111', 'opentelemetry' - CloudAccountIDKey = attribute.Key("cloud.account.id") - // The geographical region the resource is running. Refer to your provider's docs - // to see the available regions, for example [AWS - // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), - // [Azure regions](https://azure.microsoft.com/en-us/global- - // infrastructure/geographies/), or [Google Cloud - // regions](https://cloud.google.com/about/locations). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'us-central1', 'us-east-1' - CloudRegionKey = attribute.Key("cloud.region") - // Cloud regions often have multiple, isolated locations known as zones to - // increase availability. Availability zone represents the zone where the resource - // is running. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'us-east-1c' - // Note: Availability zones are called "zones" on Google Cloud. - CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") - // The cloud platform in use. - // - // Type: Enum - // Required: No - // Stability: stable - // Examples: 'aws_ec2', 'azure_vm', 'gcp_compute_engine' - // Note: The prefix of the service SHOULD match the one specified in - // `cloud.provider`. - CloudPlatformKey = attribute.Key("cloud.platform") -) - -var ( - // Amazon Web Services - CloudProviderAWS = CloudProviderKey.String("aws") - // Microsoft Azure - CloudProviderAzure = CloudProviderKey.String("azure") - // Google Cloud Platform - CloudProviderGCP = CloudProviderKey.String("gcp") -) - -var ( - // AWS Elastic Compute Cloud - CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") - // AWS Elastic Container Service - CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") - // AWS Elastic Kubernetes Service - CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") - // AWS Lambda - CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") - // AWS Elastic Beanstalk - CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") - // Azure Virtual Machines - CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") - // Azure Container Instances - CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") - // Azure Kubernetes Service - CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") - // Azure Functions - CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") - // Azure App Service - CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") - // Google Cloud Compute Engine (GCE) - CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") - // Google Cloud Run - CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") - // Google Cloud Kubernetes Engine (GKE) - CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") - // Google Cloud Functions (GCF) - CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") - // Google Cloud App Engine (GAE) - CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") -) - -// Resources used by AWS Elastic Container Service (ECS). -const ( - // The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws. - // amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'arn:aws:ecs:us- - // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' - AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") - // The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/develo - // perguide/clusters.html). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") - // The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/l - // aunch_types.html) for an ECS task. - // - // Type: Enum - // Required: No - // Stability: stable - // Examples: 'ec2', 'fargate' - AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") - // The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/lates - // t/developerguide/task_definitions.html). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'arn:aws:ecs:us- - // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") - // The task definition family this task definition is a member of. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry-family' - AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") - // The revision for this task definition. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '8', '26' - AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") -) - -var ( - // ec2 - AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") - // fargate - AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") -) - -// Resources used by AWS Elastic Kubernetes Service (EKS). -const ( - // The ARN of an EKS cluster. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") -) - -// Resources specific to Amazon Web Services. -const ( - // The name(s) of the AWS log group(s) an application is writing to. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: '/aws/lambda/my-function', 'opentelemetry-service' - // Note: Multiple log groups must be supported for cases like multi-container - // applications, where a single application has sidecar containers, and each write - // to their own log group. - AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") - // The Amazon Resource Name(s) (ARN) of the AWS log group(s). - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' - // Note: See the [log group ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- - // access-control-overview-cwl.html#CWL_ARN_Format). - AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") - // The name(s) of the AWS log stream(s) an application is writing to. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") - // The ARN(s) of the AWS log stream(s). - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- - // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - // Note: See the [log stream ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- - // access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain - // several log streams, so these ARNs necessarily identify both a log group and a - // log stream. - AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") -) - -// A container instance. -const ( - // Container name. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry-autoconf' - ContainerNameKey = attribute.Key("container.name") - // Container ID. Usually a UUID, as for example used to [identify Docker - // containers](https://docs.docker.com/engine/reference/run/#container- - // identification). The UUID might be abbreviated. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'a3bf90e006b2' - ContainerIDKey = attribute.Key("container.id") - // The container runtime managing this container. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'docker', 'containerd', 'rkt' - ContainerRuntimeKey = attribute.Key("container.runtime") - // Name of the image the container was built on. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'gcr.io/opentelemetry/operator' - ContainerImageNameKey = attribute.Key("container.image.name") - // Container image tag. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '0.1' - ContainerImageTagKey = attribute.Key("container.image.tag") -) - -// The software deployment. -const ( - // Name of the [deployment - // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka - // deployment tier). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'staging', 'production' - DeploymentEnvironmentKey = attribute.Key("deployment.environment") -) - -// The device on which the process represented by this resource is running. -const ( - // A unique identifier representing the device - // - // Type: string - // Required: No - // Stability: stable - // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' - // Note: The device identifier MUST only be defined using the values outlined - // below. This value is not an advertising identifier and MUST NOT be used as - // such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor id - // entifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-iden - // tifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the - // Firebase Installation ID or a globally unique UUID which is persisted across - // sessions in your application. More information can be found - // [here](https://developer.android.com/training/articles/user-data-ids) on best - // practices and exact implementation details. Caution should be taken when - // storing personal data or anything which can identify a user. GDPR and data - // protection laws may apply, ensure you do your own due diligence. - DeviceIDKey = attribute.Key("device.id") - // The model identifier for the device - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'iPhone3,4', 'SM-G920F' - // Note: It's recommended this value represents a machine readable version of the - // model identifier rather than the market or consumer-friendly name of the - // device. - DeviceModelIdentifierKey = attribute.Key("device.model.identifier") - // The marketing name for the device model - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' - // Note: It's recommended this value represents a human readable version of the - // device model rather than a machine readable alternative. - DeviceModelNameKey = attribute.Key("device.model.name") -) - -// A serverless instance. -const ( - // The name of the function being executed. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'my-function' - FaaSNameKey = attribute.Key("faas.name") - // The unique ID of the function being executed. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' - // Note: For example, in AWS Lambda this field corresponds to the - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and- - // namespaces.html) value, in GCP to the URI of the resource, and in Azure to the - // [FunctionDirectory](https://github.com/Azure/azure-functions- - // host/wiki/Retrieving-information-about-the-currently-running-function) field. - FaaSIDKey = attribute.Key("faas.id") - // The version string of the function being executed as defined in [Version - // Attributes](../../resource/semantic_conventions/README.md#version-attributes). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '2.0.0' - FaaSVersionKey = attribute.Key("faas.version") - // The execution environment ID as a string. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'my-function:instance-0001' - FaaSInstanceKey = attribute.Key("faas.instance") - // The amount of memory available to the serverless function in MiB. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 128 - // Note: It's recommended to set this attribute since e.g. too little memory can - // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, - // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this - // information. - FaaSMaxMemoryKey = attribute.Key("faas.max_memory") -) - -// A host is defined as a general computing instance. -const ( - // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud - // provider. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry-test' - HostIDKey = attribute.Key("host.id") - // Name of the host. On Unix systems, it may contain what the hostname command - // returns, or the fully qualified hostname, or another name specified by the - // user. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry-test' - HostNameKey = attribute.Key("host.name") - // Type of host. For Cloud, this must be the machine type. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'n1-standard-1' - HostTypeKey = attribute.Key("host.type") - // The CPU architecture the host system is running on. - // - // Type: Enum - // Required: No - // Stability: stable - HostArchKey = attribute.Key("host.arch") - // Name of the VM image or OS install the host was instantiated from. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' - HostImageNameKey = attribute.Key("host.image.name") - // VM image ID. For Cloud, this value is from the provider. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'ami-07b06b442921831e5' - HostImageIDKey = attribute.Key("host.image.id") - // The version string of the VM image as defined in [Version - // Attributes](README.md#version-attributes). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '0.1' - HostImageVersionKey = attribute.Key("host.image.version") -) - -var ( - // AMD64 - HostArchAMD64 = HostArchKey.String("amd64") - // ARM32 - HostArchARM32 = HostArchKey.String("arm32") - // ARM64 - HostArchARM64 = HostArchKey.String("arm64") - // Itanium - HostArchIA64 = HostArchKey.String("ia64") - // 32-bit PowerPC - HostArchPPC32 = HostArchKey.String("ppc32") - // 64-bit PowerPC - HostArchPPC64 = HostArchKey.String("ppc64") - // 32-bit x86 - HostArchX86 = HostArchKey.String("x86") -) - -// A Kubernetes Cluster. -const ( - // The name of the cluster. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry-cluster' - K8SClusterNameKey = attribute.Key("k8s.cluster.name") -) - -// A Kubernetes Node object. -const ( - // The name of the Node. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'node-1' - K8SNodeNameKey = attribute.Key("k8s.node.name") - // The UID of the Node. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' - K8SNodeUIDKey = attribute.Key("k8s.node.uid") -) - -// A Kubernetes Namespace. -const ( - // The name of the namespace that the pod is running in. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'default' - K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") -) - -// A Kubernetes Pod object. -const ( - // The UID of the Pod. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SPodUIDKey = attribute.Key("k8s.pod.uid") - // The name of the Pod. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry-pod-autoconf' - K8SPodNameKey = attribute.Key("k8s.pod.name") -) - -// A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). -const ( - // The name of the Container in a Pod template. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'redis' - K8SContainerNameKey = attribute.Key("k8s.container.name") -) - -// A Kubernetes ReplicaSet object. -const ( - // The UID of the ReplicaSet. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SReplicasetUIDKey = attribute.Key("k8s.replicaset.uid") - // The name of the ReplicaSet. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - K8SReplicasetNameKey = attribute.Key("k8s.replicaset.name") -) - -// A Kubernetes Deployment object. -const ( - // The UID of the Deployment. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") - // The name of the Deployment. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") -) - -// A Kubernetes StatefulSet object. -const ( - // The UID of the StatefulSet. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SStatefulsetUIDKey = attribute.Key("k8s.statefulset.uid") - // The name of the StatefulSet. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - K8SStatefulsetNameKey = attribute.Key("k8s.statefulset.name") -) - -// A Kubernetes DaemonSet object. -const ( - // The UID of the DaemonSet. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDaemonsetUIDKey = attribute.Key("k8s.daemonset.uid") - // The name of the DaemonSet. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - K8SDaemonsetNameKey = attribute.Key("k8s.daemonset.name") -) - -// A Kubernetes Job object. -const ( - // The UID of the Job. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SJobUIDKey = attribute.Key("k8s.job.uid") - // The name of the Job. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - K8SJobNameKey = attribute.Key("k8s.job.name") -) - -// A Kubernetes CronJob object. -const ( - // The UID of the CronJob. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") - // The name of the CronJob. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") -) - -// The operating system (OS) on which the process represented by this resource is running. -const ( - // The operating system type. - // - // Type: Enum - // Required: Always - // Stability: stable - OSTypeKey = attribute.Key("os.type") - // Human readable (not intended to be parsed) OS version information, like e.g. - // reported by `ver` or `lsb_release -a` commands. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' - OSDescriptionKey = attribute.Key("os.description") - // Human readable operating system name. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'iOS', 'Android', 'Ubuntu' - OSNameKey = attribute.Key("os.name") - // The version string of the operating system as defined in [Version - // Attributes](../../resource/semantic_conventions/README.md#version-attributes). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '14.2.1', '18.04.1' - OSVersionKey = attribute.Key("os.version") -) - -var ( - // Microsoft Windows - OSTypeWindows = OSTypeKey.String("windows") - // Linux - OSTypeLinux = OSTypeKey.String("linux") - // Apple Darwin - OSTypeDarwin = OSTypeKey.String("darwin") - // FreeBSD - OSTypeFreeBSD = OSTypeKey.String("freebsd") - // NetBSD - OSTypeNetBSD = OSTypeKey.String("netbsd") - // OpenBSD - OSTypeOpenBSD = OSTypeKey.String("openbsd") - // DragonFly BSD - OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") - // HP-UX (Hewlett Packard Unix) - OSTypeHPUX = OSTypeKey.String("hpux") - // AIX (Advanced Interactive eXecutive) - OSTypeAIX = OSTypeKey.String("aix") - // Oracle Solaris - OSTypeSolaris = OSTypeKey.String("solaris") - // IBM z/OS - OSTypeZOS = OSTypeKey.String("z_os") -) - -// An operating system process. -const ( - // Process identifier (PID). - // - // Type: int - // Required: No - // Stability: stable - // Examples: 1234 - ProcessPIDKey = attribute.Key("process.pid") - // The name of the process executable. On Linux based systems, can be set to the - // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of - // `GetProcessImageFileNameW`. - // - // Type: string - // Required: See below - // Stability: stable - // Examples: 'otelcol' - ProcessExecutableNameKey = attribute.Key("process.executable.name") - // The full path to the process executable. On Linux based systems, can be set to - // the target of `proc/[pid]/exe`. On Windows, can be set to the result of - // `GetProcessImageFileNameW`. - // - // Type: string - // Required: See below - // Stability: stable - // Examples: '/usr/bin/cmd/otelcol' - ProcessExecutablePathKey = attribute.Key("process.executable.path") - // The command used to launch the process (i.e. the command name). On Linux based - // systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, - // can be set to the first parameter extracted from `GetCommandLineW`. - // - // Type: string - // Required: See below - // Stability: stable - // Examples: 'cmd/otelcol' - ProcessCommandKey = attribute.Key("process.command") - // The full command used to launch the process as a single string representing the - // full command. On Windows, can be set to the result of `GetCommandLineW`. Do not - // set this if you have to assemble it just for monitoring; use - // `process.command_args` instead. - // - // Type: string - // Required: See below - // Stability: stable - // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' - ProcessCommandLineKey = attribute.Key("process.command_line") - // All the command arguments (including the command/executable itself) as received - // by the process. On Linux-based systems (and some other Unixoid systems - // supporting procfs), can be set according to the list of null-delimited strings - // extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be - // the full argv vector passed to `main`. - // - // Type: string[] - // Required: See below - // Stability: stable - // Examples: 'cmd/otecol', '--config=config.yaml' - ProcessCommandArgsKey = attribute.Key("process.command_args") - // The username of the user that owns the process. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'root' - ProcessOwnerKey = attribute.Key("process.owner") -) - -// The single (language) runtime instance which is monitored. -const ( - // The name of the runtime of this process. For compiled native binaries, this - // SHOULD be the name of the compiler. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'OpenJDK Runtime Environment' - ProcessRuntimeNameKey = attribute.Key("process.runtime.name") - // The version of the runtime of this process, as returned by the runtime without - // modification. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '14.0.2' - ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") - // An additional description about the runtime of the process, for example a - // specific vendor customization of the runtime environment. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' - ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") -) - -// A service instance. -const ( - // Logical name of the service. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'shoppingcart' - // Note: MUST be the same for all instances of horizontally scaled services. If - // the value was not specified, SDKs MUST fallback to `unknown_service:` - // concatenated with [`process.executable.name`](process.md#process), e.g. - // `unknown_service:bash`. If `process.executable.name` is not available, the - // value MUST be set to `unknown_service`. - ServiceNameKey = attribute.Key("service.name") - // A namespace for `service.name`. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Shop' - // Note: A string value having a meaning that helps to distinguish a group of - // services, for example the team name that owns a group of services. - // `service.name` is expected to be unique within the same namespace. If - // `service.namespace` is not specified in the Resource then `service.name` is - // expected to be unique for all services that have no explicit namespace defined - // (so the empty/unspecified namespace is simply one more valid namespace). Zero- - // length namespace string is assumed equal to unspecified namespace. - ServiceNamespaceKey = attribute.Key("service.namespace") - // The string ID of the service instance. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '627cc493-f310-47de-96bd-71410b7dec09' - // Note: MUST be unique for each instance of the same - // `service.namespace,service.name` pair (in other words - // `service.namespace,service.name,service.instance.id` triplet MUST be globally - // unique). The ID helps to distinguish instances of the same service that exist - // at the same time (e.g. instances of a horizontally scaled service). It is - // preferable for the ID to be persistent and stay the same for the lifetime of - // the service instance, however it is acceptable that the ID is ephemeral and - // changes during important lifetime events for the service (e.g. service - // restarts). If the service has no inherent unique ID that can be used as the - // value of this attribute it is recommended to generate a random Version 1 or - // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use - // Version 5, see RFC 4122 for more recommendations). - ServiceInstanceIDKey = attribute.Key("service.instance.id") - // The version string of the service API or implementation. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '2.0.0' - ServiceVersionKey = attribute.Key("service.version") -) - -// The telemetry SDK used to capture data recorded by the instrumentation libraries. -const ( - // The name of the telemetry SDK as defined above. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") - // The language of the telemetry SDK. - // - // Type: Enum - // Required: No - // Stability: stable - TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") - // The version string of the telemetry SDK. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '1.2.3' - TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") - // The version string of the auto instrumentation agent, if used. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '1.2.3' - TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") -) - -var ( - // cpp - TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") - // dotnet - TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") - // erlang - TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") - // go - TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") - // java - TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") - // nodejs - TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") - // php - TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") - // python - TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") - // ruby - TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") - // webjs - TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") -) - -// Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime. -const ( - // The name of the web engine. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'WildFly' - WebEngineNameKey = attribute.Key("webengine.name") - // The version of the web engine. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '21.0.0' - WebEngineVersionKey = attribute.Key("webengine.version") - // Additional description of the web engine (e.g. detailed version and edition - // information). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' - WebEngineDescriptionKey = attribute.Key("webengine.description") -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/schema.go deleted file mode 100644 index a78f1bf4..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/schema.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.4.0" - -// SchemaURL is the schema URL that matches the version of the semantic conventions -// that this package defines. Semconv packages starting from v1.4.0 must declare -// non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/1.4.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/trace.go deleted file mode 100644 index 805eadc9..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/trace.go +++ /dev/null @@ -1,1378 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.4.0" - -import "go.opentelemetry.io/otel/attribute" - -// This document defines the attributes used to perform database client calls. -const ( - // An identifier for the database management system (DBMS) product being used. See - // below for a list of well-known identifiers. - // - // Type: Enum - // Required: Always - // Stability: stable - DBSystemKey = attribute.Key("db.system") - // The connection string used to connect to the database. It is recommended to - // remove embedded credentials. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' - DBConnectionStringKey = attribute.Key("db.connection_string") - // Username for accessing the database. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'readonly_user', 'reporting_user' - DBUserKey = attribute.Key("db.user") - // The fully-qualified class name of the [Java Database Connectivity - // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver - // used to connect. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'org.postgresql.Driver', - // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' - DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") - // If no [tech-specific attribute](#call-level-attributes-for-specific- - // technologies) is defined, this attribute is used to report the name of the - // database being accessed. For commands that switch the database, this should be - // set to the target database (even if the command fails). - // - // Type: string - // Required: Required, if applicable and no more-specific attribute is defined. - // Stability: stable - // Examples: 'customers', 'main' - // Note: In some SQL databases, the database name to be used is called "schema - // name". - DBNameKey = attribute.Key("db.name") - // The database statement being executed. - // - // Type: string - // Required: Required if applicable and not explicitly disabled via - // instrumentation configuration. - // Stability: stable - // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' - // Note: The value may be sanitized to exclude sensitive information. - DBStatementKey = attribute.Key("db.statement") - // The name of the operation being executed, e.g. the [MongoDB command - // name](https://docs.mongodb.com/manual/reference/command/#database-operations) - // such as `findAndModify`, or the SQL keyword. - // - // Type: string - // Required: Required, if `db.statement` is not applicable. - // Stability: stable - // Examples: 'findAndModify', 'HMSET', 'SELECT' - // Note: When setting this to an SQL keyword, it is not recommended to attempt any - // client-side parsing of `db.statement` just to get this property, but it should - // be set if the operation name is provided by the library being instrumented. If - // the SQL statement has an ambiguous operation, or performs more than one - // operation, this value may be omitted. - DBOperationKey = attribute.Key("db.operation") -) - -var ( - // Some other SQL database. Fallback only. See notes - DBSystemOtherSQL = DBSystemKey.String("other_sql") - // Microsoft SQL Server - DBSystemMSSQL = DBSystemKey.String("mssql") - // MySQL - DBSystemMySQL = DBSystemKey.String("mysql") - // Oracle Database - DBSystemOracle = DBSystemKey.String("oracle") - // IBM DB2 - DBSystemDB2 = DBSystemKey.String("db2") - // PostgreSQL - DBSystemPostgreSQL = DBSystemKey.String("postgresql") - // Amazon Redshift - DBSystemRedshift = DBSystemKey.String("redshift") - // Apache Hive - DBSystemHive = DBSystemKey.String("hive") - // Cloudscape - DBSystemCloudscape = DBSystemKey.String("cloudscape") - // HyperSQL DataBase - DBSystemHSQLDB = DBSystemKey.String("hsqldb") - // Progress Database - DBSystemProgress = DBSystemKey.String("progress") - // SAP MaxDB - DBSystemMaxDB = DBSystemKey.String("maxdb") - // SAP HANA - DBSystemHanaDB = DBSystemKey.String("hanadb") - // Ingres - DBSystemIngres = DBSystemKey.String("ingres") - // FirstSQL - DBSystemFirstSQL = DBSystemKey.String("firstsql") - // EnterpriseDB - DBSystemEDB = DBSystemKey.String("edb") - // InterSystems Caché - DBSystemCache = DBSystemKey.String("cache") - // Adabas (Adaptable Database System) - DBSystemAdabas = DBSystemKey.String("adabas") - // Firebird - DBSystemFirebird = DBSystemKey.String("firebird") - // Apache Derby - DBSystemDerby = DBSystemKey.String("derby") - // FileMaker - DBSystemFilemaker = DBSystemKey.String("filemaker") - // Informix - DBSystemInformix = DBSystemKey.String("informix") - // InstantDB - DBSystemInstantDB = DBSystemKey.String("instantdb") - // InterBase - DBSystemInterbase = DBSystemKey.String("interbase") - // MariaDB - DBSystemMariaDB = DBSystemKey.String("mariadb") - // Netezza - DBSystemNetezza = DBSystemKey.String("netezza") - // Pervasive PSQL - DBSystemPervasive = DBSystemKey.String("pervasive") - // PointBase - DBSystemPointbase = DBSystemKey.String("pointbase") - // SQLite - DBSystemSqlite = DBSystemKey.String("sqlite") - // Sybase - DBSystemSybase = DBSystemKey.String("sybase") - // Teradata - DBSystemTeradata = DBSystemKey.String("teradata") - // Vertica - DBSystemVertica = DBSystemKey.String("vertica") - // H2 - DBSystemH2 = DBSystemKey.String("h2") - // ColdFusion IMQ - DBSystemColdfusion = DBSystemKey.String("coldfusion") - // Apache Cassandra - DBSystemCassandra = DBSystemKey.String("cassandra") - // Apache HBase - DBSystemHBase = DBSystemKey.String("hbase") - // MongoDB - DBSystemMongoDB = DBSystemKey.String("mongodb") - // Redis - DBSystemRedis = DBSystemKey.String("redis") - // Couchbase - DBSystemCouchbase = DBSystemKey.String("couchbase") - // CouchDB - DBSystemCouchDB = DBSystemKey.String("couchdb") - // Microsoft Azure Cosmos DB - DBSystemCosmosDB = DBSystemKey.String("cosmosdb") - // Amazon DynamoDB - DBSystemDynamoDB = DBSystemKey.String("dynamodb") - // Neo4j - DBSystemNeo4j = DBSystemKey.String("neo4j") - // Apache Geode - DBSystemGeode = DBSystemKey.String("geode") - // Elasticsearch - DBSystemElasticsearch = DBSystemKey.String("elasticsearch") - // Memcached - DBSystemMemcached = DBSystemKey.String("memcached") - // CockroachDB - DBSystemCockroachdb = DBSystemKey.String("cockroachdb") -) - -// Connection-level attributes for Microsoft SQL Server -const ( - // The Microsoft SQL Server [instance name](https://docs.microsoft.com/en- - // us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) - // connecting to. This name is used to determine the port of a named instance. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'MSSQLSERVER' - // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no longer - // required (but still recommended if non-standard). - DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") -) - -// Call-level attributes for Cassandra -const ( - // The name of the keyspace being accessed. To be used instead of the generic - // `db.name` attribute. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'mykeyspace' - DBCassandraKeyspaceKey = attribute.Key("db.cassandra.keyspace") - // The fetch size used for paging, i.e. how many rows will be returned at once. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 5000 - DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") - // The consistency level of the query. Based on consistency values from - // [CQL](https://docs.datastax.com/en/cassandra- - // oss/3.0/cassandra/dml/dmlConfigConsistency.html). - // - // Type: Enum - // Required: No - // Stability: stable - DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") - // The name of the primary table that the operation is acting upon, including the - // schema name (if applicable). - // - // Type: string - // Required: Recommended if available. - // Stability: stable - // Examples: 'mytable' - // Note: This mirrors the db.sql.table attribute but references cassandra rather - // than sql. It is not recommended to attempt any client-side parsing of - // `db.statement` just to get this property, but it should be set if it is - // provided by the library being instrumented. If the operation is acting upon an - // anonymous table, or more than one table, this value MUST NOT be set. - DBCassandraTableKey = attribute.Key("db.cassandra.table") - // Whether or not the query is idempotent. - // - // Type: boolean - // Required: No - // Stability: stable - DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") - // The number of times a query was speculatively executed. Not set or `0` if the - // query was not executed speculatively. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 0, 2 - DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") - // The ID of the coordinating node for a query. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' - DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") - // The data center of the coordinating node for a query. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'us-west-2' - DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") -) - -var ( - // all - DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") - // each_quorum - DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") - // quorum - DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") - // local_quorum - DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") - // one - DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") - // two - DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") - // three - DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") - // local_one - DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") - // any - DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") - // serial - DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") - // local_serial - DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") -) - -// Call-level attributes for Apache HBase -const ( - // The [HBase namespace](https://hbase.apache.org/book.html#_namespace) being - // accessed. To be used instead of the generic `db.name` attribute. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'default' - DBHBaseNamespaceKey = attribute.Key("db.hbase.namespace") -) - -// Call-level attributes for Redis -const ( - // The index of the database being accessed as used in the [`SELECT` - // command](https://redis.io/commands/select), provided as an integer. To be used - // instead of the generic `db.name` attribute. - // - // Type: int - // Required: Required, if other than the default database (`0`). - // Stability: stable - // Examples: 0, 1, 15 - DBRedisDBIndexKey = attribute.Key("db.redis.database_index") -) - -// Call-level attributes for MongoDB -const ( - // The collection being accessed within the database stated in `db.name`. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'customers', 'products' - DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") -) - -// Call-level attrbiutes for SQL databases -const ( - // The name of the primary table that the operation is acting upon, including the - // schema name (if applicable). - // - // Type: string - // Required: Recommended if available. - // Stability: stable - // Examples: 'public.users', 'customers' - // Note: It is not recommended to attempt any client-side parsing of - // `db.statement` just to get this property, but it should be set if it is - // provided by the library being instrumented. If the operation is acting upon an - // anonymous table, or more than one table, this value MUST NOT be set. - DBSQLTableKey = attribute.Key("db.sql.table") -) - -// This document defines the attributes used to report a single exception associated with a span. -const ( - // The type of the exception (its fully-qualified class name, if applicable). The - // dynamic type of the exception should be preferred over the static type in - // languages that support it. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'java.net.ConnectException', 'OSError' - ExceptionTypeKey = attribute.Key("exception.type") - // The exception message. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" - ExceptionMessageKey = attribute.Key("exception.message") - // A stacktrace as a string in the natural representation for the language - // runtime. The representation is to be determined and documented by each language - // SIG. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test - // exception\\n at ' - // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - ExceptionStacktraceKey = attribute.Key("exception.stacktrace") - // SHOULD be set to true if the exception event is recorded at a point where it is - // known that the exception is escaping the scope of the span. - // - // Type: boolean - // Required: No - // Stability: stable - // Note: An exception is considered to have escaped (or left) the scope of a span, - // if that span is ended while the exception is still logically "in flight". - // This may be actually "in flight" in some languages (e.g. if the exception - // is passed to a Context manager's `__exit__` method in Python) but will - // usually be caught at the point of recording the exception in most languages. - - // It is usually not possible to determine at the point where an exception is - // thrown - // whether it will escape the scope of a span. - // However, it is trivial to know that an exception - // will escape, if one checks for an active exception just before ending the span, - // as done in the [example above](#exception-end-example). - - // It follows that an exception may still escape the scope of the span - // even if the `exception.escaped` attribute was not set or set to false, - // since the event might have been recorded at a time where it was not - // clear whether the exception will escape. - ExceptionEscapedKey = attribute.Key("exception.escaped") -) - -// This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans. -const ( - // Type of the trigger on which the function is executed. - // - // Type: Enum - // Required: On FaaS instances, faas.trigger MUST be set on incoming invocations. - // Clients invoking FaaS instances MUST set `faas.trigger` on outgoing - // invocations, if it is known to the client. This is, for example, not the case, - // when the transport layer is abstracted in a FaaS client framework without - // access to its configuration. - // Stability: stable - FaaSTriggerKey = attribute.Key("faas.trigger") - // The execution ID of the current function execution. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' - FaaSExecutionKey = attribute.Key("faas.execution") -) - -var ( - // A response to some data source operation such as a database or filesystem read/write - FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") - // To provide an answer to an inbound HTTP request - FaaSTriggerHTTP = FaaSTriggerKey.String("http") - // A function is set to be executed when messages are sent to a messaging system - FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") - // A function is scheduled to be executed regularly - FaaSTriggerTimer = FaaSTriggerKey.String("timer") - // If none of the others apply - FaaSTriggerOther = FaaSTriggerKey.String("other") -) - -// Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write. -const ( - // The name of the source on which the triggering operation was performed. For - // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos - // DB to the database name. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'myBucketName', 'myDBName' - FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") - // Describes the type of the operation that was performed on the data. - // - // Type: Enum - // Required: Always - // Stability: stable - FaaSDocumentOperationKey = attribute.Key("faas.document.operation") - // A string containing the time when the data was accessed in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed - // in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // Required: Always - // Stability: stable - // Examples: '2020-01-23T13:47:06Z' - FaaSDocumentTimeKey = attribute.Key("faas.document.time") - // The document name/table subjected to the operation. For example, in Cloud - // Storage or S3 is the name of the file, and in Cosmos DB the table name. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'myFile.txt', 'myTableName' - FaaSDocumentNameKey = attribute.Key("faas.document.name") -) - -var ( - // When a new object is created - FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") - // When an object is modified - FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") - // When an object is deleted - FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") -) - -// Semantic Convention for FaaS scheduled to be executed regularly. -const ( - // A string containing the function invocation time in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed - // in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // Required: Always - // Stability: stable - // Examples: '2020-01-23T13:47:06Z' - FaaSTimeKey = attribute.Key("faas.time") - // A string containing the schedule period as [Cron Expression](https://docs.oracl - // e.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '0/5 * * * ? *' - FaaSCronKey = attribute.Key("faas.cron") -) - -// Contains additional attributes for incoming FaaS spans. -const ( - // A boolean that is true if the serverless function is executed for the first - // time (aka cold-start). - // - // Type: boolean - // Required: No - // Stability: stable - FaaSColdstartKey = attribute.Key("faas.coldstart") -) - -// Contains additional attributes for outgoing FaaS spans. -const ( - // The name of the invoked function. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'my-function' - // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked - // function. - FaaSInvokedNameKey = attribute.Key("faas.invoked_name") - // The cloud provider of the invoked function. - // - // Type: Enum - // Required: Always - // Stability: stable - // Examples: 'aws' - // Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked - // function. - FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") - // The cloud region of the invoked function. - // - // Type: string - // Required: For some cloud providers, like AWS or GCP, the region in which a - // function is hosted is essential to uniquely identify the function and also part - // of its endpoint. Since it's part of the endpoint being called, the region is - // always known to clients. In these cases, `faas.invoked_region` MUST be set - // accordingly. If the region is unknown to the client or not required for - // identifying the invoked function, setting `faas.invoked_region` is optional. - // Stability: stable - // Examples: 'eu-central-1' - // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked - // function. - FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") -) - -var ( - // Amazon Web Services - FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") - // Microsoft Azure - FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") - // Google Cloud Platform - FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") -) - -// These attributes may be used for any network related operation. -const ( - // Transport protocol used. See note below. - // - // Type: Enum - // Required: No - // Stability: stable - // Examples: 'ip_tcp' - NetTransportKey = attribute.Key("net.transport") - // Remote address of the peer (dotted decimal for IPv4 or - // [RFC5952](https://tools.ietf.org/html/rfc5952) for IPv6) - // - // Type: string - // Required: No - // Stability: stable - // Examples: '127.0.0.1' - NetPeerIPKey = attribute.Key("net.peer.ip") - // Remote port number. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 80, 8080, 443 - NetPeerPortKey = attribute.Key("net.peer.port") - // Remote hostname or similar, see note below. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'example.com' - NetPeerNameKey = attribute.Key("net.peer.name") - // Like `net.peer.ip` but for the host IP. Useful in case of a multi-IP host. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '192.168.0.1' - NetHostIPKey = attribute.Key("net.host.ip") - // Like `net.peer.port` but for the host port. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 35555 - NetHostPortKey = attribute.Key("net.host.port") - // Local hostname or similar, see note below. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'localhost' - NetHostNameKey = attribute.Key("net.host.name") -) - -var ( - // ip_tcp - NetTransportTCP = NetTransportKey.String("ip_tcp") - // ip_udp - NetTransportUDP = NetTransportKey.String("ip_udp") - // Another IP-based protocol - NetTransportIP = NetTransportKey.String("ip") - // Unix Domain socket. See below - NetTransportUnix = NetTransportKey.String("unix") - // Named or anonymous pipe. See note below - NetTransportPipe = NetTransportKey.String("pipe") - // In-process communication - NetTransportInProc = NetTransportKey.String("inproc") - // Something else (non IP-based) - NetTransportOther = NetTransportKey.String("other") -) - -// Operations that access some remote service. -const ( - // The [`service.name`](../../resource/semantic_conventions/README.md#service) of - // the remote service. SHOULD be equal to the actual `service.name` resource - // attribute of the remote service if any. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'AuthTokenCache' - PeerServiceKey = attribute.Key("peer.service") -) - -// These attributes may be used for any operation with an authenticated and/or authorized enduser. -const ( - // Username or client_id extracted from the access token or - // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the - // inbound request from outside the system. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'username' - EnduserIDKey = attribute.Key("enduser.id") - // Actual/assumed role the client is making the request under extracted from token - // or application security context. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'admin' - EnduserRoleKey = attribute.Key("enduser.role") - // Scopes or granted authorities the client currently possesses extracted from - // token or application security context. The value would come from the scope - // associated with an [OAuth 2.0 Access - // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value - // in a [SAML 2.0 Assertion](http://docs.oasis- - // open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'read:message, write:files' - EnduserScopeKey = attribute.Key("enduser.scope") -) - -// These attributes may be used for any operation to store information about a thread that started a span. -const ( - // Current "managed" thread ID (as opposed to OS thread ID). - // - // Type: int - // Required: No - // Stability: stable - // Examples: 42 - ThreadIDKey = attribute.Key("thread.id") - // Current thread name. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'main' - ThreadNameKey = attribute.Key("thread.name") -) - -// These attributes allow to report this unit of code and therefore to provide more context about the span. -const ( - // The method or function name, or equivalent (usually rightmost part of the code - // unit's name). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'serveRequest' - CodeFunctionKey = attribute.Key("code.function") - // The "namespace" within which `code.function` is defined. Usually the qualified - // class or module name, such that `code.namespace` + some separator + - // `code.function` form a unique identifier for the code unit. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'com.example.MyHTTPService' - CodeNamespaceKey = attribute.Key("code.namespace") - // The source code file name that identifies the code unit as uniquely as possible - // (preferably an absolute file path). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '/usr/local/MyApplication/content_root/app/index.php' - CodeFilepathKey = attribute.Key("code.filepath") - // The line number in `code.filepath` best representing the operation. It SHOULD - // point within the code unit named in `code.function`. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 42 - CodeLineNumberKey = attribute.Key("code.lineno") -) - -// This document defines semantic conventions for HTTP client and server Spans. -const ( - // HTTP request method. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'GET', 'POST', 'HEAD' - HTTPMethodKey = attribute.Key("http.method") - // Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`. - // Usually the fragment is not transmitted over HTTP, but if it is known, it - // should be included nevertheless. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' - // Note: `http.url` MUST NOT contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. In such case the attribute's - // value should be `https://www.example.com/`. - HTTPURLKey = attribute.Key("http.url") - // The full request target as passed in a HTTP request line or equivalent. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '/path/12314/?q=ddds#123' - HTTPTargetKey = attribute.Key("http.target") - // The value of the [HTTP host - // header](https://tools.ietf.org/html/rfc7230#section-5.4). When the header is - // empty or not present, this attribute should be the same. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'www.example.org' - HTTPHostKey = attribute.Key("http.host") - // The URI scheme identifying the used protocol. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'http', 'https' - HTTPSchemeKey = attribute.Key("http.scheme") - // [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6). - // - // Type: int - // Required: If and only if one was received/sent. - // Stability: stable - // Examples: 200 - HTTPStatusCodeKey = attribute.Key("http.status_code") - // Kind of HTTP protocol used. - // - // Type: Enum - // Required: No - // Stability: stable - // Examples: '1.0' - // Note: If `net.transport` is not specified, it can be assumed to be `IP.TCP` - // except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed. - HTTPFlavorKey = attribute.Key("http.flavor") - // Value of the [HTTP User- - // Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3) header sent by the - // client. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' - HTTPUserAgentKey = attribute.Key("http.user_agent") - // The size of the request payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as the - // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For - // requests using transport encoding, this should be the compressed size. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 3495 - HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") - // The size of the uncompressed request payload body after transport decoding. Not - // set if transport encoding not used. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 5493 - HTTPRequestContentLengthUncompressedKey = attribute.Key("http.request_content_length_uncompressed") - // The size of the response payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as the - // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For - // requests using transport encoding, this should be the compressed size. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 3495 - HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") - // The size of the uncompressed response payload body after transport decoding. - // Not set if transport encoding not used. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 5493 - HTTPResponseContentLengthUncompressedKey = attribute.Key("http.response_content_length_uncompressed") -) - -var ( - // HTTP 1.0 - HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") - // HTTP 1.1 - HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") - // HTTP 2 - HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") - // SPDY protocol - HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") - // QUIC protocol - HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") -) - -// Semantic Convention for HTTP Server -const ( - // The primary server name of the matched virtual host. This should be obtained - // via configuration. If no such configuration can be obtained, this attribute - // MUST NOT be set ( `net.host.name` should be used instead). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'example.com' - // Note: `http.url` is usually not readily available on the server side but would - // have to be assembled in a cumbersome and sometimes lossy process from other - // information (see e.g. open-telemetry/opentelemetry-python/pull/148). It is thus - // preferred to supply the raw data that is available. - HTTPServerNameKey = attribute.Key("http.server_name") - // The matched route (path template). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '/users/:userID?' - HTTPRouteKey = attribute.Key("http.route") - // The IP address of the original client behind all proxies, if known (e.g. from - // [X-Forwarded-For](https://developer.mozilla.org/en- - // US/docs/Web/HTTP/Headers/X-Forwarded-For)). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '83.164.160.102' - // Note: This is not necessarily the same as `net.peer.ip`, which would identify - // the network-level peer, which may be a proxy. - HTTPClientIPKey = attribute.Key("http.client_ip") -) - -// Attributes that exist for multiple DynamoDB request types. -const ( - // The keys in the `RequestItems` object field. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: 'Users', 'Cats' - AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") - // The JSON-serialized value of each item in the `ConsumedCapacity` response - // field. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { - // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": - // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, - // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, - // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, - // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": - // "string", "WriteCapacityUnits": number }' - AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") - // The JSON-serialized value of the `ItemCollectionMetrics` response field. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, - // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : - // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": - // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' - AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") - // The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. - // - // Type: double - // Required: No - // Stability: stable - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") - // The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter. - // - // Type: double - // Required: No - // Stability: stable - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") - // The value of the `ConsistentRead` request parameter. - // - // Type: boolean - // Required: No - // Stability: stable - AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") - // The value of the `ProjectionExpression` request parameter. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, - // ProductReviews' - AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") - // The value of the `Limit` request parameter. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 10 - AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") - // The value of the `AttributesToGet` request parameter. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: 'lives', 'id' - AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") - // The value of the `IndexName` request parameter. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'name_to_group' - AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") - // The value of the `Select` request parameter. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'ALL_ATTRIBUTES', 'COUNT' - AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") -) - -// DynamoDB.CreateTable -const ( - // The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request - // field - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", - // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], - // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": - // number, "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") - // The JSON-serialized value of each item of the `LocalSecondaryIndexes` request - // field. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": - // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", - // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], - // "ProjectionType": "string" } }' - AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") -) - -// DynamoDB.ListTables -const ( - // The value of the `ExclusiveStartTableName` request parameter. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Users', 'CatsTable' - AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") - // The the number of items in the `TableNames` response parameter. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 20 - AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") -) - -// DynamoDB.Query -const ( - // The value of the `ScanIndexForward` request parameter. - // - // Type: boolean - // Required: No - // Stability: stable - AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") -) - -// DynamoDB.Scan -const ( - // The value of the `Segment` request parameter. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 10 - AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") - // The value of the `TotalSegments` request parameter. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 100 - AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") - // The value of the `Count` response parameter. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 10 - AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") - // The value of the `ScannedCount` response parameter. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 50 - AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") -) - -// DynamoDB.UpdateTable -const ( - // The JSON-serialized value of each item in the `AttributeDefinitions` request - // field. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' - AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") - // The JSON-serialized value of each item in the the `GlobalSecondaryIndexUpdates` - // request field. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, - // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": - // number } }' - AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") -) - -// This document defines the attributes used in messaging systems. -const ( - // A string identifying the messaging system. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'kafka', 'rabbitmq', 'activemq', 'AmazonSQS' - MessagingSystemKey = attribute.Key("messaging.system") - // The message destination name. This might be equal to the span name but is - // required nevertheless. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'MyQueue', 'MyTopic' - MessagingDestinationKey = attribute.Key("messaging.destination") - // The kind of message destination - // - // Type: Enum - // Required: Required only if the message destination is either a `queue` or - // `topic`. - // Stability: stable - MessagingDestinationKindKey = attribute.Key("messaging.destination_kind") - // A boolean that is true if the message destination is temporary. - // - // Type: boolean - // Required: If missing, it is assumed to be false. - // Stability: stable - MessagingTempDestinationKey = attribute.Key("messaging.temp_destination") - // The name of the transport protocol. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'AMQP', 'MQTT' - MessagingProtocolKey = attribute.Key("messaging.protocol") - // The version of the transport protocol. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '0.9.1' - MessagingProtocolVersionKey = attribute.Key("messaging.protocol_version") - // Connection string. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'tibjmsnaming://localhost:7222', - // 'https://queue.amazonaws.com/80398EXAMPLE/MyQueue' - MessagingURLKey = attribute.Key("messaging.url") - // A value used by the messaging system as an identifier for the message, - // represented as a string. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '452a7c7c7c7048c2f887f61572b18fc2' - MessagingMessageIDKey = attribute.Key("messaging.message_id") - // The [conversation ID](#conversations) identifying the conversation to which the - // message belongs, represented as a string. Sometimes called "Correlation ID". - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'MyConversationID' - MessagingConversationIDKey = attribute.Key("messaging.conversation_id") - // The (uncompressed) size of the message payload in bytes. Also use this - // attribute if it is unknown whether the compressed or uncompressed payload size - // is reported. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 2738 - MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message_payload_size_bytes") - // The compressed size of the message payload in bytes. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 2048 - MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message_payload_compressed_size_bytes") -) - -var ( - // A message sent to a queue - MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") - // A message sent to a topic - MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") -) - -// Semantic convention for a consumer of messages received from a messaging system -const ( - // A string identifying the kind of message consumption as defined in the - // [Operation names](#operation-names) section above. If the operation is "send", - // this attribute MUST NOT be set, since the operation can be inferred from the - // span kind in that case. - // - // Type: Enum - // Required: No - // Stability: stable - MessagingOperationKey = attribute.Key("messaging.operation") -) - -var ( - // receive - MessagingOperationReceive = MessagingOperationKey.String("receive") - // process - MessagingOperationProcess = MessagingOperationKey.String("process") -) - -// Attributes for RabbitMQ -const ( - // RabbitMQ message routing key. - // - // Type: string - // Required: Unless it is empty. - // Stability: stable - // Examples: 'myKey' - MessagingRabbitmqRoutingKeyKey = attribute.Key("messaging.rabbitmq.routing_key") -) - -// Attributes for Apache Kafka -const ( - // Message keys in Kafka are used for grouping alike messages to ensure they're - // processed on the same partition. They differ from `messaging.message_id` in - // that they're not unique. If the key is `null`, the attribute MUST NOT be set. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'myKey' - // Note: If the key type is not string, it's string representation has to be - // supplied for the attribute. If the key has no unambiguous, canonical string - // form, don't include its value. - MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message_key") - // Name of the Kafka Consumer Group that is handling the message. Only applies to - // consumers, not producers. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'my-group' - MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer_group") - // Client ID for the Consumer or Producer that is handling the message. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'client-5' - MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") - // Partition the message is sent to. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 2 - MessagingKafkaPartitionKey = attribute.Key("messaging.kafka.partition") - // A boolean that is true if the message is a tombstone. - // - // Type: boolean - // Required: If missing, it is assumed to be false. - // Stability: stable - MessagingKafkaTombstoneKey = attribute.Key("messaging.kafka.tombstone") -) - -// This document defines semantic conventions for remote procedure calls. -const ( - // A string identifying the remoting system. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'grpc', 'java_rmi', 'wcf' - RPCSystemKey = attribute.Key("rpc.system") - // The full name of the service being called, including its package name, if - // applicable. - // - // Type: string - // Required: No, but recommended - // Stability: stable - // Examples: 'myservice.EchoService' - RPCServiceKey = attribute.Key("rpc.service") - // The name of the method being called, must be equal to the $method part in the - // span name. - // - // Type: string - // Required: No, but recommended - // Stability: stable - // Examples: 'exampleMethod' - RPCMethodKey = attribute.Key("rpc.method") -) - -// Tech-specific attributes for gRPC. -const ( - // The [numeric status - // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC - // request. - // - // Type: Enum - // Required: Always - // Stability: stable - // Examples: 0, 1, 16 - RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") -) - -var ( - // OK - RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) - // CANCELLED - RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) - // UNKNOWN - RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) - // INVALID_ARGUMENT - RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) - // DEADLINE_EXCEEDED - RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) - // NOT_FOUND - RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) - // ALREADY_EXISTS - RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) - // PERMISSION_DENIED - RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) - // RESOURCE_EXHAUSTED - RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) - // FAILED_PRECONDITION - RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) - // ABORTED - RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) - // OUT_OF_RANGE - RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) - // UNIMPLEMENTED - RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) - // INTERNAL - RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) - // UNAVAILABLE - RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) - // DATA_LOSS - RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) - // UNAUTHENTICATED - RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) -) - -// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). -const ( - // Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC - // 1.0 does not specify this, the value can be omitted. - // - // Type: string - // Required: If missing, it is assumed to be "1.0". - // Stability: stable - // Examples: '2.0', '1.0' - RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") - // `method` property from request. Unlike `rpc.method`, this may not relate to the - // actual method being called. Useful for client-side traces since client does not - // know what will be called on the server. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'users.create', 'get_users' - RPCJsonrpcMethodKey = attribute.Key("rpc.jsonrpc.method") - // `id` property of request or response. Since protocol allows id to be int, - // string, `null` or missing (for notifications), value is expected to be cast to - // string for simplicity. Use empty string in case of `null` value. Omit entirely - // if this is a notification. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '10', 'request-7', '' - RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") - // `error.code` property of response if it is an error response. - // - // Type: int - // Required: If missing, response is assumed to be successful. - // Stability: stable - // Examples: -32700, 100 - RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") - // `error.message` property of response if it is an error response. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Parse error', 'User already exists' - RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") -) diff --git a/vendor/go.opentelemetry.io/otel/trace.go b/vendor/go.opentelemetry.io/otel/trace.go deleted file mode 100644 index caf7249d..00000000 --- a/vendor/go.opentelemetry.io/otel/trace.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otel // import "go.opentelemetry.io/otel" - -import ( - "go.opentelemetry.io/otel/internal/global" - "go.opentelemetry.io/otel/trace" -) - -// Tracer creates a named tracer that implements Tracer interface. -// If the name is an empty string then provider uses default name. -// -// This is short for GetTracerProvider().Tracer(name, opts...) -func Tracer(name string, opts ...trace.TracerOption) trace.Tracer { - return GetTracerProvider().Tracer(name, opts...) -} - -// GetTracerProvider returns the registered global trace provider. -// If none is registered then an instance of NoopTracerProvider is returned. -// -// Use the trace provider to create a named tracer. E.g. -// -// tracer := otel.GetTracerProvider().Tracer("example.com/foo") -// -// or -// -// tracer := otel.Tracer("example.com/foo") -func GetTracerProvider() trace.TracerProvider { - return global.TracerProvider() -} - -// SetTracerProvider registers `tp` as the global trace provider. -func SetTracerProvider(tp trace.TracerProvider) { - global.SetTracerProvider(tp) -} diff --git a/vendor/go.opentelemetry.io/otel/trace/LICENSE b/vendor/go.opentelemetry.io/otel/trace/LICENSE deleted file mode 100644 index 261eeb9e..00000000 --- a/vendor/go.opentelemetry.io/otel/trace/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go deleted file mode 100644 index cb3efbb9..00000000 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ /dev/null @@ -1,333 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace // import "go.opentelemetry.io/otel/trace" - -import ( - "time" - - "go.opentelemetry.io/otel/attribute" -) - -// TracerConfig is a group of options for a Tracer. -type TracerConfig struct { - instrumentationVersion string - // Schema URL of the telemetry emitted by the Tracer. - schemaURL string - attrs attribute.Set -} - -// InstrumentationVersion returns the version of the library providing instrumentation. -func (t *TracerConfig) InstrumentationVersion() string { - return t.instrumentationVersion -} - -// InstrumentationAttributes returns the attributes associated with the library -// providing instrumentation. -func (t *TracerConfig) InstrumentationAttributes() attribute.Set { - return t.attrs -} - -// SchemaURL returns the Schema URL of the telemetry emitted by the Tracer. -func (t *TracerConfig) SchemaURL() string { - return t.schemaURL -} - -// NewTracerConfig applies all the options to a returned TracerConfig. -func NewTracerConfig(options ...TracerOption) TracerConfig { - var config TracerConfig - for _, option := range options { - config = option.apply(config) - } - return config -} - -// TracerOption applies an option to a TracerConfig. -type TracerOption interface { - apply(TracerConfig) TracerConfig -} - -type tracerOptionFunc func(TracerConfig) TracerConfig - -func (fn tracerOptionFunc) apply(cfg TracerConfig) TracerConfig { - return fn(cfg) -} - -// SpanConfig is a group of options for a Span. -type SpanConfig struct { - attributes []attribute.KeyValue - timestamp time.Time - links []Link - newRoot bool - spanKind SpanKind - stackTrace bool -} - -// Attributes describe the associated qualities of a Span. -func (cfg *SpanConfig) Attributes() []attribute.KeyValue { - return cfg.attributes -} - -// Timestamp is a time in a Span life-cycle. -func (cfg *SpanConfig) Timestamp() time.Time { - return cfg.timestamp -} - -// StackTrace checks whether stack trace capturing is enabled. -func (cfg *SpanConfig) StackTrace() bool { - return cfg.stackTrace -} - -// Links are the associations a Span has with other Spans. -func (cfg *SpanConfig) Links() []Link { - return cfg.links -} - -// NewRoot identifies a Span as the root Span for a new trace. This is -// commonly used when an existing trace crosses trust boundaries and the -// remote parent span context should be ignored for security. -func (cfg *SpanConfig) NewRoot() bool { - return cfg.newRoot -} - -// SpanKind is the role a Span has in a trace. -func (cfg *SpanConfig) SpanKind() SpanKind { - return cfg.spanKind -} - -// NewSpanStartConfig applies all the options to a returned SpanConfig. -// No validation is performed on the returned SpanConfig (e.g. no uniqueness -// checking or bounding of data), it is left to the SDK to perform this -// action. -func NewSpanStartConfig(options ...SpanStartOption) SpanConfig { - var c SpanConfig - for _, option := range options { - c = option.applySpanStart(c) - } - return c -} - -// NewSpanEndConfig applies all the options to a returned SpanConfig. -// No validation is performed on the returned SpanConfig (e.g. no uniqueness -// checking or bounding of data), it is left to the SDK to perform this -// action. -func NewSpanEndConfig(options ...SpanEndOption) SpanConfig { - var c SpanConfig - for _, option := range options { - c = option.applySpanEnd(c) - } - return c -} - -// SpanStartOption applies an option to a SpanConfig. These options are applicable -// only when the span is created. -type SpanStartOption interface { - applySpanStart(SpanConfig) SpanConfig -} - -type spanOptionFunc func(SpanConfig) SpanConfig - -func (fn spanOptionFunc) applySpanStart(cfg SpanConfig) SpanConfig { - return fn(cfg) -} - -// SpanEndOption applies an option to a SpanConfig. These options are -// applicable only when the span is ended. -type SpanEndOption interface { - applySpanEnd(SpanConfig) SpanConfig -} - -// EventConfig is a group of options for an Event. -type EventConfig struct { - attributes []attribute.KeyValue - timestamp time.Time - stackTrace bool -} - -// Attributes describe the associated qualities of an Event. -func (cfg *EventConfig) Attributes() []attribute.KeyValue { - return cfg.attributes -} - -// Timestamp is a time in an Event life-cycle. -func (cfg *EventConfig) Timestamp() time.Time { - return cfg.timestamp -} - -// StackTrace checks whether stack trace capturing is enabled. -func (cfg *EventConfig) StackTrace() bool { - return cfg.stackTrace -} - -// NewEventConfig applies all the EventOptions to a returned EventConfig. If no -// timestamp option is passed, the returned EventConfig will have a Timestamp -// set to the call time, otherwise no validation is performed on the returned -// EventConfig. -func NewEventConfig(options ...EventOption) EventConfig { - var c EventConfig - for _, option := range options { - c = option.applyEvent(c) - } - if c.timestamp.IsZero() { - c.timestamp = time.Now() - } - return c -} - -// EventOption applies span event options to an EventConfig. -type EventOption interface { - applyEvent(EventConfig) EventConfig -} - -// SpanOption are options that can be used at both the beginning and end of a span. -type SpanOption interface { - SpanStartOption - SpanEndOption -} - -// SpanStartEventOption are options that can be used at the start of a span, or with an event. -type SpanStartEventOption interface { - SpanStartOption - EventOption -} - -// SpanEndEventOption are options that can be used at the end of a span, or with an event. -type SpanEndEventOption interface { - SpanEndOption - EventOption -} - -type attributeOption []attribute.KeyValue - -func (o attributeOption) applySpan(c SpanConfig) SpanConfig { - c.attributes = append(c.attributes, []attribute.KeyValue(o)...) - return c -} -func (o attributeOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) } -func (o attributeOption) applyEvent(c EventConfig) EventConfig { - c.attributes = append(c.attributes, []attribute.KeyValue(o)...) - return c -} - -var _ SpanStartEventOption = attributeOption{} - -// WithAttributes adds the attributes related to a span life-cycle event. -// These attributes are used to describe the work a Span represents when this -// option is provided to a Span's start or end events. Otherwise, these -// attributes provide additional information about the event being recorded -// (e.g. error, state change, processing progress, system event). -// -// If multiple of these options are passed the attributes of each successive -// option will extend the attributes instead of overwriting. There is no -// guarantee of uniqueness in the resulting attributes. -func WithAttributes(attributes ...attribute.KeyValue) SpanStartEventOption { - return attributeOption(attributes) -} - -// SpanEventOption are options that can be used with an event or a span. -type SpanEventOption interface { - SpanOption - EventOption -} - -type timestampOption time.Time - -func (o timestampOption) applySpan(c SpanConfig) SpanConfig { - c.timestamp = time.Time(o) - return c -} -func (o timestampOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) } -func (o timestampOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) } -func (o timestampOption) applyEvent(c EventConfig) EventConfig { - c.timestamp = time.Time(o) - return c -} - -var _ SpanEventOption = timestampOption{} - -// WithTimestamp sets the time of a Span or Event life-cycle moment (e.g. -// started, stopped, errored). -func WithTimestamp(t time.Time) SpanEventOption { - return timestampOption(t) -} - -type stackTraceOption bool - -func (o stackTraceOption) applyEvent(c EventConfig) EventConfig { - c.stackTrace = bool(o) - return c -} -func (o stackTraceOption) applySpan(c SpanConfig) SpanConfig { - c.stackTrace = bool(o) - return c -} -func (o stackTraceOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) } - -// WithStackTrace sets the flag to capture the error with stack trace (e.g. true, false). -func WithStackTrace(b bool) SpanEndEventOption { - return stackTraceOption(b) -} - -// WithLinks adds links to a Span. The links are added to the existing Span -// links, i.e. this does not overwrite. Links with invalid span context are ignored. -func WithLinks(links ...Link) SpanStartOption { - return spanOptionFunc(func(cfg SpanConfig) SpanConfig { - cfg.links = append(cfg.links, links...) - return cfg - }) -} - -// WithNewRoot specifies that the Span should be treated as a root Span. Any -// existing parent span context will be ignored when defining the Span's trace -// identifiers. -func WithNewRoot() SpanStartOption { - return spanOptionFunc(func(cfg SpanConfig) SpanConfig { - cfg.newRoot = true - return cfg - }) -} - -// WithSpanKind sets the SpanKind of a Span. -func WithSpanKind(kind SpanKind) SpanStartOption { - return spanOptionFunc(func(cfg SpanConfig) SpanConfig { - cfg.spanKind = kind - return cfg - }) -} - -// WithInstrumentationVersion sets the instrumentation version. -func WithInstrumentationVersion(version string) TracerOption { - return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { - cfg.instrumentationVersion = version - return cfg - }) -} - -// WithInstrumentationAttributes sets the instrumentation attributes. -// -// The passed attributes will be de-duplicated. -func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption { - return tracerOptionFunc(func(config TracerConfig) TracerConfig { - config.attrs = attribute.NewSet(attr...) - return config - }) -} - -// WithSchemaURL sets the schema URL for the Tracer. -func WithSchemaURL(schemaURL string) TracerOption { - return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { - cfg.schemaURL = schemaURL - return cfg - }) -} diff --git a/vendor/go.opentelemetry.io/otel/trace/context.go b/vendor/go.opentelemetry.io/otel/trace/context.go deleted file mode 100644 index 76f9a083..00000000 --- a/vendor/go.opentelemetry.io/otel/trace/context.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace // import "go.opentelemetry.io/otel/trace" - -import "context" - -type traceContextKeyType int - -const currentSpanKey traceContextKeyType = iota - -// ContextWithSpan returns a copy of parent with span set as the current Span. -func ContextWithSpan(parent context.Context, span Span) context.Context { - return context.WithValue(parent, currentSpanKey, span) -} - -// ContextWithSpanContext returns a copy of parent with sc as the current -// Span. The Span implementation that wraps sc is non-recording and performs -// no operations other than to return sc as the SpanContext from the -// SpanContext method. -func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Context { - return ContextWithSpan(parent, nonRecordingSpan{sc: sc}) -} - -// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicly -// as a remote SpanContext and as the current Span. The Span implementation -// that wraps rsc is non-recording and performs no operations other than to -// return rsc as the SpanContext from the SpanContext method. -func ContextWithRemoteSpanContext(parent context.Context, rsc SpanContext) context.Context { - return ContextWithSpanContext(parent, rsc.WithRemote(true)) -} - -// SpanFromContext returns the current Span from ctx. -// -// If no Span is currently set in ctx an implementation of a Span that -// performs no operations is returned. -func SpanFromContext(ctx context.Context) Span { - if ctx == nil { - return noopSpan{} - } - if span, ok := ctx.Value(currentSpanKey).(Span); ok { - return span - } - return noopSpan{} -} - -// SpanContextFromContext returns the current Span's SpanContext. -func SpanContextFromContext(ctx context.Context) SpanContext { - return SpanFromContext(ctx).SpanContext() -} diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go deleted file mode 100644 index ab0346f9..00000000 --- a/vendor/go.opentelemetry.io/otel/trace/doc.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package trace provides an implementation of the tracing part of the -OpenTelemetry API. - -To participate in distributed traces a Span needs to be created for the -operation being performed as part of a traced workflow. In its simplest form: - - var tracer trace.Tracer - - func init() { - tracer = otel.Tracer("instrumentation/package/name") - } - - func operation(ctx context.Context) { - var span trace.Span - ctx, span = tracer.Start(ctx, "operation") - defer span.End() - // ... - } - -A Tracer is unique to the instrumentation and is used to create Spans. -Instrumentation should be designed to accept a TracerProvider from which it -can create its own unique Tracer. Alternatively, the registered global -TracerProvider from the go.opentelemetry.io/otel package can be used as -a default. - - const ( - name = "instrumentation/package/name" - version = "0.1.0" - ) - - type Instrumentation struct { - tracer trace.Tracer - } - - func NewInstrumentation(tp trace.TracerProvider) *Instrumentation { - if tp == nil { - tp = otel.TracerProvider() - } - return &Instrumentation{ - tracer: tp.Tracer(name, trace.WithInstrumentationVersion(version)), - } - } - - func operation(ctx context.Context, inst *Instrumentation) { - var span trace.Span - ctx, span = inst.tracer.Start(ctx, "operation") - defer span.End() - // ... - } -*/ -package trace // import "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go deleted file mode 100644 index 88fcb816..00000000 --- a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace // import "go.opentelemetry.io/otel/trace" - -// nonRecordingSpan is a minimal implementation of a Span that wraps a -// SpanContext. It performs no operations other than to return the wrapped -// SpanContext. -type nonRecordingSpan struct { - noopSpan - - sc SpanContext -} - -// SpanContext returns the wrapped SpanContext. -func (s nonRecordingSpan) SpanContext() SpanContext { return s.sc } diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go deleted file mode 100644 index 7cf6c7f3..00000000 --- a/vendor/go.opentelemetry.io/otel/trace/noop.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace // import "go.opentelemetry.io/otel/trace" - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" -) - -// NewNoopTracerProvider returns an implementation of TracerProvider that -// performs no operations. The Tracer and Spans created from the returned -// TracerProvider also perform no operations. -func NewNoopTracerProvider() TracerProvider { - return noopTracerProvider{} -} - -type noopTracerProvider struct{} - -var _ TracerProvider = noopTracerProvider{} - -// Tracer returns noop implementation of Tracer. -func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer { - return noopTracer{} -} - -// noopTracer is an implementation of Tracer that performs no operations. -type noopTracer struct{} - -var _ Tracer = noopTracer{} - -// Start carries forward a non-recording Span, if one is present in the context, otherwise it -// creates a no-op Span. -func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption) (context.Context, Span) { - span := SpanFromContext(ctx) - if _, ok := span.(nonRecordingSpan); !ok { - // span is likely already a noopSpan, but let's be sure - span = noopSpan{} - } - return ContextWithSpan(ctx, span), span -} - -// noopSpan is an implementation of Span that performs no operations. -type noopSpan struct{} - -var _ Span = noopSpan{} - -// SpanContext returns an empty span context. -func (noopSpan) SpanContext() SpanContext { return SpanContext{} } - -// IsRecording always returns false. -func (noopSpan) IsRecording() bool { return false } - -// SetStatus does nothing. -func (noopSpan) SetStatus(codes.Code, string) {} - -// SetError does nothing. -func (noopSpan) SetError(bool) {} - -// SetAttributes does nothing. -func (noopSpan) SetAttributes(...attribute.KeyValue) {} - -// End does nothing. -func (noopSpan) End(...SpanEndOption) {} - -// RecordError does nothing. -func (noopSpan) RecordError(error, ...EventOption) {} - -// AddEvent does nothing. -func (noopSpan) AddEvent(string, ...EventOption) {} - -// SetName does nothing. -func (noopSpan) SetName(string) {} - -// TracerProvider returns a no-op TracerProvider. -func (noopSpan) TracerProvider() TracerProvider { return noopTracerProvider{} } diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go deleted file mode 100644 index 4aa94f79..00000000 --- a/vendor/go.opentelemetry.io/otel/trace/trace.go +++ /dev/null @@ -1,551 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace // import "go.opentelemetry.io/otel/trace" - -import ( - "bytes" - "context" - "encoding/hex" - "encoding/json" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" -) - -const ( - // FlagsSampled is a bitmask with the sampled bit set. A SpanContext - // with the sampling bit set means the span is sampled. - FlagsSampled = TraceFlags(0x01) - - errInvalidHexID errorConst = "trace-id and span-id can only contain [0-9a-f] characters, all lowercase" - - errInvalidTraceIDLength errorConst = "hex encoded trace-id must have length equals to 32" - errNilTraceID errorConst = "trace-id can't be all zero" - - errInvalidSpanIDLength errorConst = "hex encoded span-id must have length equals to 16" - errNilSpanID errorConst = "span-id can't be all zero" -) - -type errorConst string - -func (e errorConst) Error() string { - return string(e) -} - -// TraceID is a unique identity of a trace. -// nolint:revive // revive complains about stutter of `trace.TraceID`. -type TraceID [16]byte - -var nilTraceID TraceID -var _ json.Marshaler = nilTraceID - -// IsValid checks whether the trace TraceID is valid. A valid trace ID does -// not consist of zeros only. -func (t TraceID) IsValid() bool { - return !bytes.Equal(t[:], nilTraceID[:]) -} - -// MarshalJSON implements a custom marshal function to encode TraceID -// as a hex string. -func (t TraceID) MarshalJSON() ([]byte, error) { - return json.Marshal(t.String()) -} - -// String returns the hex string representation form of a TraceID. -func (t TraceID) String() string { - return hex.EncodeToString(t[:]) -} - -// SpanID is a unique identity of a span in a trace. -type SpanID [8]byte - -var nilSpanID SpanID -var _ json.Marshaler = nilSpanID - -// IsValid checks whether the SpanID is valid. A valid SpanID does not consist -// of zeros only. -func (s SpanID) IsValid() bool { - return !bytes.Equal(s[:], nilSpanID[:]) -} - -// MarshalJSON implements a custom marshal function to encode SpanID -// as a hex string. -func (s SpanID) MarshalJSON() ([]byte, error) { - return json.Marshal(s.String()) -} - -// String returns the hex string representation form of a SpanID. -func (s SpanID) String() string { - return hex.EncodeToString(s[:]) -} - -// TraceIDFromHex returns a TraceID from a hex string if it is compliant with -// the W3C trace-context specification. See more at -// https://www.w3.org/TR/trace-context/#trace-id -// nolint:revive // revive complains about stutter of `trace.TraceIDFromHex`. -func TraceIDFromHex(h string) (TraceID, error) { - t := TraceID{} - if len(h) != 32 { - return t, errInvalidTraceIDLength - } - - if err := decodeHex(h, t[:]); err != nil { - return t, err - } - - if !t.IsValid() { - return t, errNilTraceID - } - return t, nil -} - -// SpanIDFromHex returns a SpanID from a hex string if it is compliant -// with the w3c trace-context specification. -// See more at https://www.w3.org/TR/trace-context/#parent-id -func SpanIDFromHex(h string) (SpanID, error) { - s := SpanID{} - if len(h) != 16 { - return s, errInvalidSpanIDLength - } - - if err := decodeHex(h, s[:]); err != nil { - return s, err - } - - if !s.IsValid() { - return s, errNilSpanID - } - return s, nil -} - -func decodeHex(h string, b []byte) error { - for _, r := range h { - switch { - case 'a' <= r && r <= 'f': - continue - case '0' <= r && r <= '9': - continue - default: - return errInvalidHexID - } - } - - decoded, err := hex.DecodeString(h) - if err != nil { - return err - } - - copy(b, decoded) - return nil -} - -// TraceFlags contains flags that can be set on a SpanContext. -type TraceFlags byte //nolint:revive // revive complains about stutter of `trace.TraceFlags`. - -// IsSampled returns if the sampling bit is set in the TraceFlags. -func (tf TraceFlags) IsSampled() bool { - return tf&FlagsSampled == FlagsSampled -} - -// WithSampled sets the sampling bit in a new copy of the TraceFlags. -func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive // sampled is not a control flag. - if sampled { - return tf | FlagsSampled - } - - return tf &^ FlagsSampled -} - -// MarshalJSON implements a custom marshal function to encode TraceFlags -// as a hex string. -func (tf TraceFlags) MarshalJSON() ([]byte, error) { - return json.Marshal(tf.String()) -} - -// String returns the hex string representation form of TraceFlags. -func (tf TraceFlags) String() string { - return hex.EncodeToString([]byte{byte(tf)}[:]) -} - -// SpanContextConfig contains mutable fields usable for constructing -// an immutable SpanContext. -type SpanContextConfig struct { - TraceID TraceID - SpanID SpanID - TraceFlags TraceFlags - TraceState TraceState - Remote bool -} - -// NewSpanContext constructs a SpanContext using values from the provided -// SpanContextConfig. -func NewSpanContext(config SpanContextConfig) SpanContext { - return SpanContext{ - traceID: config.TraceID, - spanID: config.SpanID, - traceFlags: config.TraceFlags, - traceState: config.TraceState, - remote: config.Remote, - } -} - -// SpanContext contains identifying trace information about a Span. -type SpanContext struct { - traceID TraceID - spanID SpanID - traceFlags TraceFlags - traceState TraceState - remote bool -} - -var _ json.Marshaler = SpanContext{} - -// IsValid returns if the SpanContext is valid. A valid span context has a -// valid TraceID and SpanID. -func (sc SpanContext) IsValid() bool { - return sc.HasTraceID() && sc.HasSpanID() -} - -// IsRemote indicates whether the SpanContext represents a remotely-created Span. -func (sc SpanContext) IsRemote() bool { - return sc.remote -} - -// WithRemote returns a copy of sc with the Remote property set to remote. -func (sc SpanContext) WithRemote(remote bool) SpanContext { - return SpanContext{ - traceID: sc.traceID, - spanID: sc.spanID, - traceFlags: sc.traceFlags, - traceState: sc.traceState, - remote: remote, - } -} - -// TraceID returns the TraceID from the SpanContext. -func (sc SpanContext) TraceID() TraceID { - return sc.traceID -} - -// HasTraceID checks if the SpanContext has a valid TraceID. -func (sc SpanContext) HasTraceID() bool { - return sc.traceID.IsValid() -} - -// WithTraceID returns a new SpanContext with the TraceID replaced. -func (sc SpanContext) WithTraceID(traceID TraceID) SpanContext { - return SpanContext{ - traceID: traceID, - spanID: sc.spanID, - traceFlags: sc.traceFlags, - traceState: sc.traceState, - remote: sc.remote, - } -} - -// SpanID returns the SpanID from the SpanContext. -func (sc SpanContext) SpanID() SpanID { - return sc.spanID -} - -// HasSpanID checks if the SpanContext has a valid SpanID. -func (sc SpanContext) HasSpanID() bool { - return sc.spanID.IsValid() -} - -// WithSpanID returns a new SpanContext with the SpanID replaced. -func (sc SpanContext) WithSpanID(spanID SpanID) SpanContext { - return SpanContext{ - traceID: sc.traceID, - spanID: spanID, - traceFlags: sc.traceFlags, - traceState: sc.traceState, - remote: sc.remote, - } -} - -// TraceFlags returns the flags from the SpanContext. -func (sc SpanContext) TraceFlags() TraceFlags { - return sc.traceFlags -} - -// IsSampled returns if the sampling bit is set in the SpanContext's TraceFlags. -func (sc SpanContext) IsSampled() bool { - return sc.traceFlags.IsSampled() -} - -// WithTraceFlags returns a new SpanContext with the TraceFlags replaced. -func (sc SpanContext) WithTraceFlags(flags TraceFlags) SpanContext { - return SpanContext{ - traceID: sc.traceID, - spanID: sc.spanID, - traceFlags: flags, - traceState: sc.traceState, - remote: sc.remote, - } -} - -// TraceState returns the TraceState from the SpanContext. -func (sc SpanContext) TraceState() TraceState { - return sc.traceState -} - -// WithTraceState returns a new SpanContext with the TraceState replaced. -func (sc SpanContext) WithTraceState(state TraceState) SpanContext { - return SpanContext{ - traceID: sc.traceID, - spanID: sc.spanID, - traceFlags: sc.traceFlags, - traceState: state, - remote: sc.remote, - } -} - -// Equal is a predicate that determines whether two SpanContext values are equal. -func (sc SpanContext) Equal(other SpanContext) bool { - return sc.traceID == other.traceID && - sc.spanID == other.spanID && - sc.traceFlags == other.traceFlags && - sc.traceState.String() == other.traceState.String() && - sc.remote == other.remote -} - -// MarshalJSON implements a custom marshal function to encode a SpanContext. -func (sc SpanContext) MarshalJSON() ([]byte, error) { - return json.Marshal(SpanContextConfig{ - TraceID: sc.traceID, - SpanID: sc.spanID, - TraceFlags: sc.traceFlags, - TraceState: sc.traceState, - Remote: sc.remote, - }) -} - -// Span is the individual component of a trace. It represents a single named -// and timed operation of a workflow that is traced. A Tracer is used to -// create a Span and it is then up to the operation the Span represents to -// properly end the Span when the operation itself ends. -// -// Warning: methods may be added to this interface in minor releases. -type Span interface { - // End completes the Span. The Span is considered complete and ready to be - // delivered through the rest of the telemetry pipeline after this method - // is called. Therefore, updates to the Span are not allowed after this - // method has been called. - End(options ...SpanEndOption) - - // AddEvent adds an event with the provided name and options. - AddEvent(name string, options ...EventOption) - - // IsRecording returns the recording state of the Span. It will return - // true if the Span is active and events can be recorded. - IsRecording() bool - - // RecordError will record err as an exception span event for this span. An - // additional call to SetStatus is required if the Status of the Span should - // be set to Error, as this method does not change the Span status. If this - // span is not being recorded or err is nil then this method does nothing. - RecordError(err error, options ...EventOption) - - // SpanContext returns the SpanContext of the Span. The returned SpanContext - // is usable even after the End method has been called for the Span. - SpanContext() SpanContext - - // SetStatus sets the status of the Span in the form of a code and a - // description, provided the status hasn't already been set to a higher - // value before (OK > Error > Unset). The description is only included in a - // status when the code is for an error. - SetStatus(code codes.Code, description string) - - // SetName sets the Span name. - SetName(name string) - - // SetAttributes sets kv as attributes of the Span. If a key from kv - // already exists for an attribute of the Span it will be overwritten with - // the value contained in kv. - SetAttributes(kv ...attribute.KeyValue) - - // TracerProvider returns a TracerProvider that can be used to generate - // additional Spans on the same telemetry pipeline as the current Span. - TracerProvider() TracerProvider -} - -// Link is the relationship between two Spans. The relationship can be within -// the same Trace or across different Traces. -// -// For example, a Link is used in the following situations: -// -// 1. Batch Processing: A batch of operations may contain operations -// associated with one or more traces/spans. Since there can only be one -// parent SpanContext, a Link is used to keep reference to the -// SpanContext of all operations in the batch. -// 2. Public Endpoint: A SpanContext for an in incoming client request on a -// public endpoint should be considered untrusted. In such a case, a new -// trace with its own identity and sampling decision needs to be created, -// but this new trace needs to be related to the original trace in some -// form. A Link is used to keep reference to the original SpanContext and -// track the relationship. -type Link struct { - // SpanContext of the linked Span. - SpanContext SpanContext - - // Attributes describe the aspects of the link. - Attributes []attribute.KeyValue -} - -// LinkFromContext returns a link encapsulating the SpanContext in the provided ctx. -func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { - return Link{ - SpanContext: SpanContextFromContext(ctx), - Attributes: attrs, - } -} - -// SpanKind is the role a Span plays in a Trace. -type SpanKind int - -// As a convenience, these match the proto definition, see -// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 -// -// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` -// to coerce a span kind to a valid value. -const ( - // SpanKindUnspecified is an unspecified SpanKind and is not a valid - // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal - // if it is received. - SpanKindUnspecified SpanKind = 0 - // SpanKindInternal is a SpanKind for a Span that represents an internal - // operation within an application. - SpanKindInternal SpanKind = 1 - // SpanKindServer is a SpanKind for a Span that represents the operation - // of handling a request from a client. - SpanKindServer SpanKind = 2 - // SpanKindClient is a SpanKind for a Span that represents the operation - // of client making a request to a server. - SpanKindClient SpanKind = 3 - // SpanKindProducer is a SpanKind for a Span that represents the operation - // of a producer sending a message to a message broker. Unlike - // SpanKindClient and SpanKindServer, there is often no direct - // relationship between this kind of Span and a SpanKindConsumer kind. A - // SpanKindProducer Span will end once the message is accepted by the - // message broker which might not overlap with the processing of that - // message. - SpanKindProducer SpanKind = 4 - // SpanKindConsumer is a SpanKind for a Span that represents the operation - // of a consumer receiving a message from a message broker. Like - // SpanKindProducer Spans, there is often no direct relationship between - // this Span and the Span that produced the message. - SpanKindConsumer SpanKind = 5 -) - -// ValidateSpanKind returns a valid span kind value. This will coerce -// invalid values into the default value, SpanKindInternal. -func ValidateSpanKind(spanKind SpanKind) SpanKind { - switch spanKind { - case SpanKindInternal, - SpanKindServer, - SpanKindClient, - SpanKindProducer, - SpanKindConsumer: - // valid - return spanKind - default: - return SpanKindInternal - } -} - -// String returns the specified name of the SpanKind in lower-case. -func (sk SpanKind) String() string { - switch sk { - case SpanKindInternal: - return "internal" - case SpanKindServer: - return "server" - case SpanKindClient: - return "client" - case SpanKindProducer: - return "producer" - case SpanKindConsumer: - return "consumer" - default: - return "unspecified" - } -} - -// Tracer is the creator of Spans. -// -// Warning: methods may be added to this interface in minor releases. -type Tracer interface { - // Start creates a span and a context.Context containing the newly-created span. - // - // If the context.Context provided in `ctx` contains a Span then the newly-created - // Span will be a child of that span, otherwise it will be a root span. This behavior - // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the - // newly-created Span to be a root span even if `ctx` contains a Span. - // - // When creating a Span it is recommended to provide all known span attributes using - // the `WithAttributes()` SpanOption as samplers will only have access to the - // attributes provided when a Span is created. - // - // Any Span that is created MUST also be ended. This is the responsibility of the user. - // Implementations of this API may leak memory or other resources if Spans are not ended. - Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) -} - -// TracerProvider provides Tracers that are used by instrumentation code to -// trace computational workflows. -// -// A TracerProvider is the collection destination of all Spans from Tracers it -// provides, it represents a unique telemetry collection pipeline. How that -// pipeline is defined, meaning how those Spans are collected, processed, and -// where they are exported, depends on its implementation. Instrumentation -// authors do not need to define this implementation, rather just use the -// provided Tracers to instrument code. -// -// Commonly, instrumentation code will accept a TracerProvider implementation -// at runtime from its users or it can simply use the globally registered one -// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). -// -// Warning: methods may be added to this interface in minor releases. -type TracerProvider interface { - // Tracer returns a unique Tracer scoped to be used by instrumentation code - // to trace computational workflows. The scope and identity of that - // instrumentation code is uniquely defined by the name and options passed. - // - // The passed name needs to uniquely identify instrumentation code. - // Therefore, it is recommended that name is the Go package name of the - // library providing instrumentation (note: not the code being - // instrumented). Instrumentation libraries can have multiple versions, - // therefore, the WithInstrumentationVersion option should be used to - // distinguish these different codebases. Additionally, instrumentation - // libraries may sometimes use traces to communicate different domains of - // workflow data (i.e. using spans to communicate workflow events only). If - // this is the case, the WithScopeAttributes option should be used to - // uniquely identify Tracers that handle the different domains of workflow - // data. - // - // If the same name and options are passed multiple times, the same Tracer - // will be returned (it is up to the implementation if this will be the - // same underlying instance of that Tracer or not). It is not necessary to - // call this multiple times with the same name and options to get an - // up-to-date Tracer. All implementations will ensure any TracerProvider - // configuration changes are propagated to all provided Tracers. - // - // If name is empty, then an implementation defined default name will be - // used instead. - // - // This method is safe to call concurrently. - Tracer(name string, options ...TracerOption) Tracer -} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go deleted file mode 100644 index ca68a82e..00000000 --- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace // import "go.opentelemetry.io/otel/trace" - -import ( - "encoding/json" - "fmt" - "regexp" - "strings" -) - -const ( - maxListMembers = 32 - - listDelimiter = "," - - // based on the W3C Trace Context specification, see - // https://www.w3.org/TR/trace-context-1/#tracestate-header - noTenantKeyFormat = `[a-z][_0-9a-z\-\*\/]{0,255}` - withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}` - valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]` - - errInvalidKey errorConst = "invalid tracestate key" - errInvalidValue errorConst = "invalid tracestate value" - errInvalidMember errorConst = "invalid tracestate list-member" - errMemberNumber errorConst = "too many list-members in tracestate" - errDuplicate errorConst = "duplicate list-member in tracestate" -) - -var ( - keyRe = regexp.MustCompile(`^((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))$`) - valueRe = regexp.MustCompile(`^(` + valueFormat + `)$`) - memberRe = regexp.MustCompile(`^\s*((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`) -) - -type member struct { - Key string - Value string -} - -func newMember(key, value string) (member, error) { - if !keyRe.MatchString(key) { - return member{}, fmt.Errorf("%w: %s", errInvalidKey, key) - } - if !valueRe.MatchString(value) { - return member{}, fmt.Errorf("%w: %s", errInvalidValue, value) - } - return member{Key: key, Value: value}, nil -} - -func parseMember(m string) (member, error) { - matches := memberRe.FindStringSubmatch(m) - if len(matches) != 5 { - return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) - } - - return member{ - Key: matches[1], - Value: matches[4], - }, nil -} - -// String encodes member into a string compliant with the W3C Trace Context -// specification. -func (m member) String() string { - return fmt.Sprintf("%s=%s", m.Key, m.Value) -} - -// TraceState provides additional vendor-specific trace identification -// information across different distributed tracing systems. It represents an -// immutable list consisting of key/value pairs, each pair is referred to as a -// list-member. -// -// TraceState conforms to the W3C Trace Context specification -// (https://www.w3.org/TR/trace-context-1). All operations that create or copy -// a TraceState do so by validating all input and will only produce TraceState -// that conform to the specification. Specifically, this means that all -// list-member's key/value pairs are valid, no duplicate list-members exist, -// and the maximum number of list-members (32) is not exceeded. -type TraceState struct { //nolint:revive // revive complains about stutter of `trace.TraceState` - // list is the members in order. - list []member -} - -var _ json.Marshaler = TraceState{} - -// ParseTraceState attempts to decode a TraceState from the passed -// string. It returns an error if the input is invalid according to the W3C -// Trace Context specification. -func ParseTraceState(tracestate string) (TraceState, error) { - if tracestate == "" { - return TraceState{}, nil - } - - wrapErr := func(err error) error { - return fmt.Errorf("failed to parse tracestate: %w", err) - } - - var members []member - found := make(map[string]struct{}) - for _, memberStr := range strings.Split(tracestate, listDelimiter) { - if len(memberStr) == 0 { - continue - } - - m, err := parseMember(memberStr) - if err != nil { - return TraceState{}, wrapErr(err) - } - - if _, ok := found[m.Key]; ok { - return TraceState{}, wrapErr(errDuplicate) - } - found[m.Key] = struct{}{} - - members = append(members, m) - if n := len(members); n > maxListMembers { - return TraceState{}, wrapErr(errMemberNumber) - } - } - - return TraceState{list: members}, nil -} - -// MarshalJSON marshals the TraceState into JSON. -func (ts TraceState) MarshalJSON() ([]byte, error) { - return json.Marshal(ts.String()) -} - -// String encodes the TraceState into a string compliant with the W3C -// Trace Context specification. The returned string will be invalid if the -// TraceState contains any invalid members. -func (ts TraceState) String() string { - members := make([]string, len(ts.list)) - for i, m := range ts.list { - members[i] = m.String() - } - return strings.Join(members, listDelimiter) -} - -// Get returns the value paired with key from the corresponding TraceState -// list-member if it exists, otherwise an empty string is returned. -func (ts TraceState) Get(key string) string { - for _, member := range ts.list { - if member.Key == key { - return member.Value - } - } - - return "" -} - -// Insert adds a new list-member defined by the key/value pair to the -// TraceState. If a list-member already exists for the given key, that -// list-member's value is updated. The new or updated list-member is always -// moved to the beginning of the TraceState as specified by the W3C Trace -// Context specification. -// -// If key or value are invalid according to the W3C Trace Context -// specification an error is returned with the original TraceState. -// -// If adding a new list-member means the TraceState would have more members -// then is allowed, the new list-member will be inserted and the right-most -// list-member will be dropped in the returned TraceState. -func (ts TraceState) Insert(key, value string) (TraceState, error) { - m, err := newMember(key, value) - if err != nil { - return ts, err - } - - cTS := ts.Delete(key) - if cTS.Len()+1 <= maxListMembers { - cTS.list = append(cTS.list, member{}) - } - // When the number of members exceeds capacity, drop the "right-most". - copy(cTS.list[1:], cTS.list) - cTS.list[0] = m - - return cTS, nil -} - -// Delete returns a copy of the TraceState with the list-member identified by -// key removed. -func (ts TraceState) Delete(key string) TraceState { - members := make([]member, ts.Len()) - copy(members, ts.list) - for i, member := range ts.list { - if member.Key == key { - members = append(members[:i], members[i+1:]...) - // TraceState should contain no duplicate members. - break - } - } - return TraceState{list: members} -} - -// Len returns the number of list-members in the TraceState. -func (ts TraceState) Len() int { - return len(ts.list) -} diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go deleted file mode 100644 index c2217a28..00000000 --- a/vendor/go.opentelemetry.io/otel/version.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otel // import "go.opentelemetry.io/otel" - -// Version is the current release version of OpenTelemetry in use. -func Version() string { - return "1.16.0" -} diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml deleted file mode 100644 index 9dc47532..00000000 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -module-sets: - stable-v1: - version: v1.16.0 - modules: - - go.opentelemetry.io/otel - - go.opentelemetry.io/otel/bridge/opentracing - - go.opentelemetry.io/otel/bridge/opentracing/test - - go.opentelemetry.io/otel/example/fib - - go.opentelemetry.io/otel/example/jaeger - - go.opentelemetry.io/otel/example/namedtracer - - go.opentelemetry.io/otel/example/otel-collector - - go.opentelemetry.io/otel/example/passthrough - - go.opentelemetry.io/otel/example/zipkin - - go.opentelemetry.io/otel/exporters/jaeger - - go.opentelemetry.io/otel/exporters/otlp/internal/retry - - go.opentelemetry.io/otel/exporters/otlp/otlptrace - - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc - - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp - - go.opentelemetry.io/otel/exporters/stdout/stdouttrace - - go.opentelemetry.io/otel/exporters/zipkin - - go.opentelemetry.io/otel/metric - - go.opentelemetry.io/otel/sdk - - go.opentelemetry.io/otel/trace - experimental-metrics: - version: v0.39.0 - modules: - - go.opentelemetry.io/otel/example/opencensus - - go.opentelemetry.io/otel/example/prometheus - - go.opentelemetry.io/otel/exporters/otlp/otlpmetric - - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc - - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - - go.opentelemetry.io/otel/exporters/prometheus - - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric - - go.opentelemetry.io/otel/sdk/metric - - go.opentelemetry.io/otel/bridge/opencensus - - go.opentelemetry.io/otel/bridge/opencensus/test - - go.opentelemetry.io/otel/example/view - experimental-schema: - version: v0.0.4 - modules: - - go.opentelemetry.io/otel/schema -excluded-modules: - - go.opentelemetry.io/otel/internal/tools diff --git a/vendor/golang.org/x/arch/x86/x86asm/gnu.go b/vendor/golang.org/x/arch/x86/x86asm/gnu.go index 75cff72b..8eba1fd0 100644 --- a/vendor/golang.org/x/arch/x86/x86asm/gnu.go +++ b/vendor/golang.org/x/arch/x86/x86asm/gnu.go @@ -10,7 +10,7 @@ import ( ) // GNUSyntax returns the GNU assembler syntax for the instruction, as defined by GNU binutils. -// This general form is often called ``AT&T syntax'' as a reference to AT&T System V Unix. +// This general form is often called “AT&T syntax” as a reference to AT&T System V Unix. func GNUSyntax(inst Inst, pc uint64, symname SymLookup) string { // Rewrite instruction to mimic GNU peculiarities. // Note that inst has been passed by value and contains diff --git a/vendor/golang.org/x/arch/x86/x86asm/inst.go b/vendor/golang.org/x/arch/x86/x86asm/inst.go index 4632b506..e98f1a84 100644 --- a/vendor/golang.org/x/arch/x86/x86asm/inst.go +++ b/vendor/golang.org/x/arch/x86/x86asm/inst.go @@ -144,7 +144,7 @@ type Arg interface { // the interface value instead of requiring an allocation. // A Reg is a single register. -// The zero Reg value has no name but indicates ``no register.'' +// The zero Reg value has no name but indicates “no register.” type Reg uint8 const ( diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index 83f112c4..4756ad5f 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -38,7 +38,7 @@ var X86 struct { HasAVX512F bool // Advanced vector extension 512 Foundation Instructions HasAVX512CD bool // Advanced vector extension 512 Conflict Detection Instructions HasAVX512ER bool // Advanced vector extension 512 Exponential and Reciprocal Instructions - HasAVX512PF bool // Advanced vector extension 512 Prefetch Instructions Instructions + HasAVX512PF bool // Advanced vector extension 512 Prefetch Instructions HasAVX512VL bool // Advanced vector extension 512 Vector Length Extensions HasAVX512BW bool // Advanced vector extension 512 Byte and Word Instructions HasAVX512DQ bool // Advanced vector extension 512 Doubleword and Quadword Instructions @@ -54,6 +54,9 @@ var X86 struct { HasAVX512VBMI2 bool // Advanced vector extension 512 Vector Byte Manipulation Instructions 2 HasAVX512BITALG bool // Advanced vector extension 512 Bit Algorithms HasAVX512BF16 bool // Advanced vector extension 512 BFloat16 Instructions + HasAMXTile bool // Advanced Matrix Extension Tile instructions + HasAMXInt8 bool // Advanced Matrix Extension Int8 instructions + HasAMXBF16 bool // Advanced Matrix Extension BFloat16 instructions HasBMI1 bool // Bit manipulation instruction set 1 HasBMI2 bool // Bit manipulation instruction set 2 HasCX16 bool // Compare and exchange 16 Bytes diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go index f5aacfc8..2dcde828 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -37,6 +37,9 @@ func initOptions() { {Name: "avx512vbmi2", Feature: &X86.HasAVX512VBMI2}, {Name: "avx512bitalg", Feature: &X86.HasAVX512BITALG}, {Name: "avx512bf16", Feature: &X86.HasAVX512BF16}, + {Name: "amxtile", Feature: &X86.HasAMXTile}, + {Name: "amxint8", Feature: &X86.HasAMXInt8}, + {Name: "amxbf16", Feature: &X86.HasAMXBF16}, {Name: "bmi1", Feature: &X86.HasBMI1}, {Name: "bmi2", Feature: &X86.HasBMI2}, {Name: "cx16", Feature: &X86.HasCX16}, @@ -138,6 +141,10 @@ func archInit() { eax71, _, _, _ := cpuid(7, 1) X86.HasAVX512BF16 = isSet(5, eax71) } + + X86.HasAMXTile = isSet(24, edx7) + X86.HasAMXInt8 = isSet(25, edx7) + X86.HasAMXBF16 = isSet(22, edx7) } func isSet(bitpos uint, value uint32) bool { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index a730878e..0ba03019 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -2471,6 +2471,29 @@ func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask * return pselect6(nfd, r, w, e, mutableTimeout, kernelMask) } +//sys schedSetattr(pid int, attr *SchedAttr, flags uint) (err error) +//sys schedGetattr(pid int, attr *SchedAttr, size uint, flags uint) (err error) + +// SchedSetAttr is a wrapper for sched_setattr(2) syscall. +// https://man7.org/linux/man-pages/man2/sched_setattr.2.html +func SchedSetAttr(pid int, attr *SchedAttr, flags uint) error { + if attr == nil { + return EINVAL + } + attr.Size = SizeofSchedAttr + return schedSetattr(pid, attr, flags) +} + +// SchedGetAttr is a wrapper for sched_getattr(2) syscall. +// https://man7.org/linux/man-pages/man2/sched_getattr.2.html +func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) { + attr := &SchedAttr{} + if err := schedGetattr(pid, attr, SizeofSchedAttr, flags); err != nil { + return nil, err + } + return attr, nil +} + /* * Unimplemented */ diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 8bb30e7c..f6eda270 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -549,6 +549,9 @@ func SetNonblock(fd int, nonblocking bool) (err error) { if err != nil { return err } + if (flag&O_NONBLOCK != 0) == nonblocking { + return nil + } if nonblocking { flag |= O_NONBLOCK } else { diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 3784f402..0787a043 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -2821,6 +2821,23 @@ const ( RWF_SUPPORTED = 0x1f RWF_SYNC = 0x4 RWF_WRITE_LIFE_NOT_SET = 0x0 + SCHED_BATCH = 0x3 + SCHED_DEADLINE = 0x6 + SCHED_FIFO = 0x1 + SCHED_FLAG_ALL = 0x7f + SCHED_FLAG_DL_OVERRUN = 0x4 + SCHED_FLAG_KEEP_ALL = 0x18 + SCHED_FLAG_KEEP_PARAMS = 0x10 + SCHED_FLAG_KEEP_POLICY = 0x8 + SCHED_FLAG_RECLAIM = 0x2 + SCHED_FLAG_RESET_ON_FORK = 0x1 + SCHED_FLAG_UTIL_CLAMP = 0x60 + SCHED_FLAG_UTIL_CLAMP_MAX = 0x40 + SCHED_FLAG_UTIL_CLAMP_MIN = 0x20 + SCHED_IDLE = 0x5 + SCHED_NORMAL = 0x0 + SCHED_RESET_ON_FORK = 0x40000000 + SCHED_RR = 0x2 SCM_CREDENTIALS = 0x2 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x1d diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index a07321be..14ab34a5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -2197,3 +2197,23 @@ func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) { RawSyscallNoError(SYS_GETRESGID, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func schedSetattr(pid int, attr *SchedAttr, flags uint) (err error) { + _, _, e1 := Syscall(SYS_SCHED_SETATTR, uintptr(pid), uintptr(unsafe.Pointer(attr)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func schedGetattr(pid int, attr *SchedAttr, size uint, flags uint) (err error) { + _, _, e1 := Syscall6(SYS_SCHED_GETATTR, uintptr(pid), uintptr(unsafe.Pointer(attr)), uintptr(size), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 26ef52aa..494493c7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -5868,3 +5868,18 @@ const ( VIRTIO_NET_HDR_GSO_UDP_L4 = 0x5 VIRTIO_NET_HDR_GSO_ECN = 0x80 ) + +type SchedAttr struct { + Size uint32 + Policy uint32 + Flags uint64 + Nice int32 + Priority uint32 + Runtime uint64 + Deadline uint64 + Period uint64 + Util_min uint32 + Util_max uint32 +} + +const SizeofSchedAttr = 0x38 diff --git a/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/golang.org/x/sys/windows/registry/key.go deleted file mode 100644 index 6c8d97b6..00000000 --- a/vendor/golang.org/x/sys/windows/registry/key.go +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build windows -// +build windows - -// Package registry provides access to the Windows registry. -// -// Here is a simple example, opening a registry key and reading a string value from it. -// -// k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) -// if err != nil { -// log.Fatal(err) -// } -// defer k.Close() -// -// s, _, err := k.GetStringValue("SystemRoot") -// if err != nil { -// log.Fatal(err) -// } -// fmt.Printf("Windows system root is %q\n", s) -package registry - -import ( - "io" - "runtime" - "syscall" - "time" -) - -const ( - // Registry key security and access rights. - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms724878.aspx - // for details. - ALL_ACCESS = 0xf003f - CREATE_LINK = 0x00020 - CREATE_SUB_KEY = 0x00004 - ENUMERATE_SUB_KEYS = 0x00008 - EXECUTE = 0x20019 - NOTIFY = 0x00010 - QUERY_VALUE = 0x00001 - READ = 0x20019 - SET_VALUE = 0x00002 - WOW64_32KEY = 0x00200 - WOW64_64KEY = 0x00100 - WRITE = 0x20006 -) - -// Key is a handle to an open Windows registry key. -// Keys can be obtained by calling OpenKey; there are -// also some predefined root keys such as CURRENT_USER. -// Keys can be used directly in the Windows API. -type Key syscall.Handle - -const ( - // Windows defines some predefined root keys that are always open. - // An application can use these keys as entry points to the registry. - // Normally these keys are used in OpenKey to open new keys, - // but they can also be used anywhere a Key is required. - CLASSES_ROOT = Key(syscall.HKEY_CLASSES_ROOT) - CURRENT_USER = Key(syscall.HKEY_CURRENT_USER) - LOCAL_MACHINE = Key(syscall.HKEY_LOCAL_MACHINE) - USERS = Key(syscall.HKEY_USERS) - CURRENT_CONFIG = Key(syscall.HKEY_CURRENT_CONFIG) - PERFORMANCE_DATA = Key(syscall.HKEY_PERFORMANCE_DATA) -) - -// Close closes open key k. -func (k Key) Close() error { - return syscall.RegCloseKey(syscall.Handle(k)) -} - -// OpenKey opens a new key with path name relative to key k. -// It accepts any open key, including CURRENT_USER and others, -// and returns the new key and an error. -// The access parameter specifies desired access rights to the -// key to be opened. -func OpenKey(k Key, path string, access uint32) (Key, error) { - p, err := syscall.UTF16PtrFromString(path) - if err != nil { - return 0, err - } - var subkey syscall.Handle - err = syscall.RegOpenKeyEx(syscall.Handle(k), p, 0, access, &subkey) - if err != nil { - return 0, err - } - return Key(subkey), nil -} - -// OpenRemoteKey opens a predefined registry key on another -// computer pcname. The key to be opened is specified by k, but -// can only be one of LOCAL_MACHINE, PERFORMANCE_DATA or USERS. -// If pcname is "", OpenRemoteKey returns local computer key. -func OpenRemoteKey(pcname string, k Key) (Key, error) { - var err error - var p *uint16 - if pcname != "" { - p, err = syscall.UTF16PtrFromString(`\\` + pcname) - if err != nil { - return 0, err - } - } - var remoteKey syscall.Handle - err = regConnectRegistry(p, syscall.Handle(k), &remoteKey) - if err != nil { - return 0, err - } - return Key(remoteKey), nil -} - -// ReadSubKeyNames returns the names of subkeys of key k. -// The parameter n controls the number of returned names, -// analogous to the way os.File.Readdirnames works. -func (k Key) ReadSubKeyNames(n int) ([]string, error) { - // RegEnumKeyEx must be called repeatedly and to completion. - // During this time, this goroutine cannot migrate away from - // its current thread. See https://golang.org/issue/49320 and - // https://golang.org/issue/49466. - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - names := make([]string, 0) - // Registry key size limit is 255 bytes and described there: - // https://msdn.microsoft.com/library/windows/desktop/ms724872.aspx - buf := make([]uint16, 256) //plus extra room for terminating zero byte -loopItems: - for i := uint32(0); ; i++ { - if n > 0 { - if len(names) == n { - return names, nil - } - } - l := uint32(len(buf)) - for { - err := syscall.RegEnumKeyEx(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) - if err == nil { - break - } - if err == syscall.ERROR_MORE_DATA { - // Double buffer size and try again. - l = uint32(2 * len(buf)) - buf = make([]uint16, l) - continue - } - if err == _ERROR_NO_MORE_ITEMS { - break loopItems - } - return names, err - } - names = append(names, syscall.UTF16ToString(buf[:l])) - } - if n > len(names) { - return names, io.EOF - } - return names, nil -} - -// CreateKey creates a key named path under open key k. -// CreateKey returns the new key and a boolean flag that reports -// whether the key already existed. -// The access parameter specifies the access rights for the key -// to be created. -func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool, err error) { - var h syscall.Handle - var d uint32 - err = regCreateKeyEx(syscall.Handle(k), syscall.StringToUTF16Ptr(path), - 0, nil, _REG_OPTION_NON_VOLATILE, access, nil, &h, &d) - if err != nil { - return 0, false, err - } - return Key(h), d == _REG_OPENED_EXISTING_KEY, nil -} - -// DeleteKey deletes the subkey path of key k and its values. -func DeleteKey(k Key, path string) error { - return regDeleteKey(syscall.Handle(k), syscall.StringToUTF16Ptr(path)) -} - -// A KeyInfo describes the statistics of a key. It is returned by Stat. -type KeyInfo struct { - SubKeyCount uint32 - MaxSubKeyLen uint32 // size of the key's subkey with the longest name, in Unicode characters, not including the terminating zero byte - ValueCount uint32 - MaxValueNameLen uint32 // size of the key's longest value name, in Unicode characters, not including the terminating zero byte - MaxValueLen uint32 // longest data component among the key's values, in bytes - lastWriteTime syscall.Filetime -} - -// ModTime returns the key's last write time. -func (ki *KeyInfo) ModTime() time.Time { - return time.Unix(0, ki.lastWriteTime.Nanoseconds()) -} - -// Stat retrieves information about the open key k. -func (k Key) Stat() (*KeyInfo, error) { - var ki KeyInfo - err := syscall.RegQueryInfoKey(syscall.Handle(k), nil, nil, nil, - &ki.SubKeyCount, &ki.MaxSubKeyLen, nil, &ki.ValueCount, - &ki.MaxValueNameLen, &ki.MaxValueLen, nil, &ki.lastWriteTime) - if err != nil { - return nil, err - } - return &ki, nil -} diff --git a/vendor/golang.org/x/sys/windows/registry/mksyscall.go b/vendor/golang.org/x/sys/windows/registry/mksyscall.go deleted file mode 100644 index ee74927d..00000000 --- a/vendor/golang.org/x/sys/windows/registry/mksyscall.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build generate -// +build generate - -package registry - -//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go syscall.go diff --git a/vendor/golang.org/x/sys/windows/registry/syscall.go b/vendor/golang.org/x/sys/windows/registry/syscall.go deleted file mode 100644 index 41733512..00000000 --- a/vendor/golang.org/x/sys/windows/registry/syscall.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build windows -// +build windows - -package registry - -import "syscall" - -const ( - _REG_OPTION_NON_VOLATILE = 0 - - _REG_CREATED_NEW_KEY = 1 - _REG_OPENED_EXISTING_KEY = 2 - - _ERROR_NO_MORE_ITEMS syscall.Errno = 259 -) - -func LoadRegLoadMUIString() error { - return procRegLoadMUIStringW.Find() -} - -//sys regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW -//sys regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) = advapi32.RegDeleteKeyW -//sys regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) = advapi32.RegSetValueExW -//sys regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegEnumValueW -//sys regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) = advapi32.RegDeleteValueW -//sys regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) = advapi32.RegLoadMUIStringW -//sys regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) = advapi32.RegConnectRegistryW - -//sys expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) = kernel32.ExpandEnvironmentStringsW diff --git a/vendor/golang.org/x/sys/windows/registry/value.go b/vendor/golang.org/x/sys/windows/registry/value.go deleted file mode 100644 index 2789f6f1..00000000 --- a/vendor/golang.org/x/sys/windows/registry/value.go +++ /dev/null @@ -1,387 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build windows -// +build windows - -package registry - -import ( - "errors" - "io" - "syscall" - "unicode/utf16" - "unsafe" -) - -const ( - // Registry value types. - NONE = 0 - SZ = 1 - EXPAND_SZ = 2 - BINARY = 3 - DWORD = 4 - DWORD_BIG_ENDIAN = 5 - LINK = 6 - MULTI_SZ = 7 - RESOURCE_LIST = 8 - FULL_RESOURCE_DESCRIPTOR = 9 - RESOURCE_REQUIREMENTS_LIST = 10 - QWORD = 11 -) - -var ( - // ErrShortBuffer is returned when the buffer was too short for the operation. - ErrShortBuffer = syscall.ERROR_MORE_DATA - - // ErrNotExist is returned when a registry key or value does not exist. - ErrNotExist = syscall.ERROR_FILE_NOT_FOUND - - // ErrUnexpectedType is returned by Get*Value when the value's type was unexpected. - ErrUnexpectedType = errors.New("unexpected key value type") -) - -// GetValue retrieves the type and data for the specified value associated -// with an open key k. It fills up buffer buf and returns the retrieved -// byte count n. If buf is too small to fit the stored value it returns -// ErrShortBuffer error along with the required buffer size n. -// If no buffer is provided, it returns true and actual buffer size n. -// If no buffer is provided, GetValue returns the value's type only. -// If the value does not exist, the error returned is ErrNotExist. -// -// GetValue is a low level function. If value's type is known, use the appropriate -// Get*Value function instead. -func (k Key) GetValue(name string, buf []byte) (n int, valtype uint32, err error) { - pname, err := syscall.UTF16PtrFromString(name) - if err != nil { - return 0, 0, err - } - var pbuf *byte - if len(buf) > 0 { - pbuf = (*byte)(unsafe.Pointer(&buf[0])) - } - l := uint32(len(buf)) - err = syscall.RegQueryValueEx(syscall.Handle(k), pname, nil, &valtype, pbuf, &l) - if err != nil { - return int(l), valtype, err - } - return int(l), valtype, nil -} - -func (k Key) getValue(name string, buf []byte) (data []byte, valtype uint32, err error) { - p, err := syscall.UTF16PtrFromString(name) - if err != nil { - return nil, 0, err - } - var t uint32 - n := uint32(len(buf)) - for { - err = syscall.RegQueryValueEx(syscall.Handle(k), p, nil, &t, (*byte)(unsafe.Pointer(&buf[0])), &n) - if err == nil { - return buf[:n], t, nil - } - if err != syscall.ERROR_MORE_DATA { - return nil, 0, err - } - if n <= uint32(len(buf)) { - return nil, 0, err - } - buf = make([]byte, n) - } -} - -// GetStringValue retrieves the string value for the specified -// value name associated with an open key k. It also returns the value's type. -// If value does not exist, GetStringValue returns ErrNotExist. -// If value is not SZ or EXPAND_SZ, it will return the correct value -// type and ErrUnexpectedType. -func (k Key) GetStringValue(name string) (val string, valtype uint32, err error) { - data, typ, err2 := k.getValue(name, make([]byte, 64)) - if err2 != nil { - return "", typ, err2 - } - switch typ { - case SZ, EXPAND_SZ: - default: - return "", typ, ErrUnexpectedType - } - if len(data) == 0 { - return "", typ, nil - } - u := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2] - return syscall.UTF16ToString(u), typ, nil -} - -// GetMUIStringValue retrieves the localized string value for -// the specified value name associated with an open key k. -// If the value name doesn't exist or the localized string value -// can't be resolved, GetMUIStringValue returns ErrNotExist. -// GetMUIStringValue panics if the system doesn't support -// regLoadMUIString; use LoadRegLoadMUIString to check if -// regLoadMUIString is supported before calling this function. -func (k Key) GetMUIStringValue(name string) (string, error) { - pname, err := syscall.UTF16PtrFromString(name) - if err != nil { - return "", err - } - - buf := make([]uint16, 1024) - var buflen uint32 - var pdir *uint16 - - err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) - if err == syscall.ERROR_FILE_NOT_FOUND { // Try fallback path - - // Try to resolve the string value using the system directory as - // a DLL search path; this assumes the string value is of the form - // @[path]\dllname,-strID but with no path given, e.g. @tzres.dll,-320. - - // This approach works with tzres.dll but may have to be revised - // in the future to allow callers to provide custom search paths. - - var s string - s, err = ExpandString("%SystemRoot%\\system32\\") - if err != nil { - return "", err - } - pdir, err = syscall.UTF16PtrFromString(s) - if err != nil { - return "", err - } - - err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) - } - - for err == syscall.ERROR_MORE_DATA { // Grow buffer if needed - if buflen <= uint32(len(buf)) { - break // Buffer not growing, assume race; break - } - buf = make([]uint16, buflen) - err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) - } - - if err != nil { - return "", err - } - - return syscall.UTF16ToString(buf), nil -} - -// ExpandString expands environment-variable strings and replaces -// them with the values defined for the current user. -// Use ExpandString to expand EXPAND_SZ strings. -func ExpandString(value string) (string, error) { - if value == "" { - return "", nil - } - p, err := syscall.UTF16PtrFromString(value) - if err != nil { - return "", err - } - r := make([]uint16, 100) - for { - n, err := expandEnvironmentStrings(p, &r[0], uint32(len(r))) - if err != nil { - return "", err - } - if n <= uint32(len(r)) { - return syscall.UTF16ToString(r[:n]), nil - } - r = make([]uint16, n) - } -} - -// GetStringsValue retrieves the []string value for the specified -// value name associated with an open key k. It also returns the value's type. -// If value does not exist, GetStringsValue returns ErrNotExist. -// If value is not MULTI_SZ, it will return the correct value -// type and ErrUnexpectedType. -func (k Key) GetStringsValue(name string) (val []string, valtype uint32, err error) { - data, typ, err2 := k.getValue(name, make([]byte, 64)) - if err2 != nil { - return nil, typ, err2 - } - if typ != MULTI_SZ { - return nil, typ, ErrUnexpectedType - } - if len(data) == 0 { - return nil, typ, nil - } - p := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2] - if len(p) == 0 { - return nil, typ, nil - } - if p[len(p)-1] == 0 { - p = p[:len(p)-1] // remove terminating null - } - val = make([]string, 0, 5) - from := 0 - for i, c := range p { - if c == 0 { - val = append(val, string(utf16.Decode(p[from:i]))) - from = i + 1 - } - } - return val, typ, nil -} - -// GetIntegerValue retrieves the integer value for the specified -// value name associated with an open key k. It also returns the value's type. -// If value does not exist, GetIntegerValue returns ErrNotExist. -// If value is not DWORD or QWORD, it will return the correct value -// type and ErrUnexpectedType. -func (k Key) GetIntegerValue(name string) (val uint64, valtype uint32, err error) { - data, typ, err2 := k.getValue(name, make([]byte, 8)) - if err2 != nil { - return 0, typ, err2 - } - switch typ { - case DWORD: - if len(data) != 4 { - return 0, typ, errors.New("DWORD value is not 4 bytes long") - } - var val32 uint32 - copy((*[4]byte)(unsafe.Pointer(&val32))[:], data) - return uint64(val32), DWORD, nil - case QWORD: - if len(data) != 8 { - return 0, typ, errors.New("QWORD value is not 8 bytes long") - } - copy((*[8]byte)(unsafe.Pointer(&val))[:], data) - return val, QWORD, nil - default: - return 0, typ, ErrUnexpectedType - } -} - -// GetBinaryValue retrieves the binary value for the specified -// value name associated with an open key k. It also returns the value's type. -// If value does not exist, GetBinaryValue returns ErrNotExist. -// If value is not BINARY, it will return the correct value -// type and ErrUnexpectedType. -func (k Key) GetBinaryValue(name string) (val []byte, valtype uint32, err error) { - data, typ, err2 := k.getValue(name, make([]byte, 64)) - if err2 != nil { - return nil, typ, err2 - } - if typ != BINARY { - return nil, typ, ErrUnexpectedType - } - return data, typ, nil -} - -func (k Key) setValue(name string, valtype uint32, data []byte) error { - p, err := syscall.UTF16PtrFromString(name) - if err != nil { - return err - } - if len(data) == 0 { - return regSetValueEx(syscall.Handle(k), p, 0, valtype, nil, 0) - } - return regSetValueEx(syscall.Handle(k), p, 0, valtype, &data[0], uint32(len(data))) -} - -// SetDWordValue sets the data and type of a name value -// under key k to value and DWORD. -func (k Key) SetDWordValue(name string, value uint32) error { - return k.setValue(name, DWORD, (*[4]byte)(unsafe.Pointer(&value))[:]) -} - -// SetQWordValue sets the data and type of a name value -// under key k to value and QWORD. -func (k Key) SetQWordValue(name string, value uint64) error { - return k.setValue(name, QWORD, (*[8]byte)(unsafe.Pointer(&value))[:]) -} - -func (k Key) setStringValue(name string, valtype uint32, value string) error { - v, err := syscall.UTF16FromString(value) - if err != nil { - return err - } - buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2] - return k.setValue(name, valtype, buf) -} - -// SetStringValue sets the data and type of a name value -// under key k to value and SZ. The value must not contain a zero byte. -func (k Key) SetStringValue(name, value string) error { - return k.setStringValue(name, SZ, value) -} - -// SetExpandStringValue sets the data and type of a name value -// under key k to value and EXPAND_SZ. The value must not contain a zero byte. -func (k Key) SetExpandStringValue(name, value string) error { - return k.setStringValue(name, EXPAND_SZ, value) -} - -// SetStringsValue sets the data and type of a name value -// under key k to value and MULTI_SZ. The value strings -// must not contain a zero byte. -func (k Key) SetStringsValue(name string, value []string) error { - ss := "" - for _, s := range value { - for i := 0; i < len(s); i++ { - if s[i] == 0 { - return errors.New("string cannot have 0 inside") - } - } - ss += s + "\x00" - } - v := utf16.Encode([]rune(ss + "\x00")) - buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2] - return k.setValue(name, MULTI_SZ, buf) -} - -// SetBinaryValue sets the data and type of a name value -// under key k to value and BINARY. -func (k Key) SetBinaryValue(name string, value []byte) error { - return k.setValue(name, BINARY, value) -} - -// DeleteValue removes a named value from the key k. -func (k Key) DeleteValue(name string) error { - return regDeleteValue(syscall.Handle(k), syscall.StringToUTF16Ptr(name)) -} - -// ReadValueNames returns the value names of key k. -// The parameter n controls the number of returned names, -// analogous to the way os.File.Readdirnames works. -func (k Key) ReadValueNames(n int) ([]string, error) { - ki, err := k.Stat() - if err != nil { - return nil, err - } - names := make([]string, 0, ki.ValueCount) - buf := make([]uint16, ki.MaxValueNameLen+1) // extra room for terminating null character -loopItems: - for i := uint32(0); ; i++ { - if n > 0 { - if len(names) == n { - return names, nil - } - } - l := uint32(len(buf)) - for { - err := regEnumValue(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) - if err == nil { - break - } - if err == syscall.ERROR_MORE_DATA { - // Double buffer size and try again. - l = uint32(2 * len(buf)) - buf = make([]uint16, l) - continue - } - if err == _ERROR_NO_MORE_ITEMS { - break loopItems - } - return names, err - } - names = append(names, syscall.UTF16ToString(buf[:l])) - } - if n > len(names) { - return names, io.EOF - } - return names, nil -} diff --git a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go deleted file mode 100644 index fc1835d8..00000000 --- a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go +++ /dev/null @@ -1,117 +0,0 @@ -// Code generated by 'go generate'; DO NOT EDIT. - -package registry - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) - errERROR_EINVAL error = syscall.EINVAL -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return errERROR_EINVAL - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - - procRegConnectRegistryW = modadvapi32.NewProc("RegConnectRegistryW") - procRegCreateKeyExW = modadvapi32.NewProc("RegCreateKeyExW") - procRegDeleteKeyW = modadvapi32.NewProc("RegDeleteKeyW") - procRegDeleteValueW = modadvapi32.NewProc("RegDeleteValueW") - procRegEnumValueW = modadvapi32.NewProc("RegEnumValueW") - procRegLoadMUIStringW = modadvapi32.NewProc("RegLoadMUIStringW") - procRegSetValueExW = modadvapi32.NewProc("RegSetValueExW") - procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") -) - -func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) { - r0, _, _ := syscall.Syscall(procRegConnectRegistryW.Addr(), 3, uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result))) - if r0 != 0 { - regerrno = syscall.Errno(r0) - } - return -} - -func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) { - r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition))) - if r0 != 0 { - regerrno = syscall.Errno(r0) - } - return -} - -func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) { - r0, _, _ := syscall.Syscall(procRegDeleteKeyW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(subkey)), 0) - if r0 != 0 { - regerrno = syscall.Errno(r0) - } - return -} - -func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) { - r0, _, _ := syscall.Syscall(procRegDeleteValueW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(name)), 0) - if r0 != 0 { - regerrno = syscall.Errno(r0) - } - return -} - -func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { - r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)), 0) - if r0 != 0 { - regerrno = syscall.Errno(r0) - } - return -} - -func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) { - r0, _, _ := syscall.Syscall9(procRegLoadMUIStringW.Addr(), 7, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)), 0, 0) - if r0 != 0 { - regerrno = syscall.Errno(r0) - } - return -} - -func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) { - r0, _, _ := syscall.Syscall6(procRegSetValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize)) - if r0 != 0 { - regerrno = syscall.Errno(r0) - } - return -} - -func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) - n = uint32(r0) - if n == 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 373d1638..67bad092 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -216,7 +216,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) = shell32.SHGetKnownFolderPath //sys TerminateProcess(handle Handle, exitcode uint32) (err error) //sys GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) -//sys GetStartupInfo(startupInfo *StartupInfo) (err error) = GetStartupInfoW +//sys getStartupInfo(startupInfo *StartupInfo) = GetStartupInfoW //sys GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) //sys DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) //sys WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) [failretval==0xffffffff] @@ -437,6 +437,10 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) = dwmapi.DwmGetWindowAttribute //sys DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) = dwmapi.DwmSetWindowAttribute +// Windows Multimedia API +//sys TimeBeginPeriod (period uint32) (err error) [failretval != 0] = winmm.timeBeginPeriod +//sys TimeEndPeriod (period uint32) (err error) [failretval != 0] = winmm.timeEndPeriod + // syscall interface implementation for other packages // GetCurrentProcess returns the handle for the current process. @@ -1624,6 +1628,11 @@ func SetConsoleCursorPosition(console Handle, position Coord) error { return setConsoleCursorPosition(console, *((*uint32)(unsafe.Pointer(&position)))) } +func GetStartupInfo(startupInfo *StartupInfo) error { + getStartupInfo(startupInfo) + return nil +} + func (s NTStatus) Errno() syscall.Errno { return rtlNtStatusToDosErrorNoTeb(s) } diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 566dd3e3..5c385580 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -55,6 +55,7 @@ var ( moduser32 = NewLazySystemDLL("user32.dll") moduserenv = NewLazySystemDLL("userenv.dll") modversion = NewLazySystemDLL("version.dll") + modwinmm = NewLazySystemDLL("winmm.dll") modwintrust = NewLazySystemDLL("wintrust.dll") modws2_32 = NewLazySystemDLL("ws2_32.dll") modwtsapi32 = NewLazySystemDLL("wtsapi32.dll") @@ -468,6 +469,8 @@ var ( procGetFileVersionInfoSizeW = modversion.NewProc("GetFileVersionInfoSizeW") procGetFileVersionInfoW = modversion.NewProc("GetFileVersionInfoW") procVerQueryValueW = modversion.NewProc("VerQueryValueW") + proctimeBeginPeriod = modwinmm.NewProc("timeBeginPeriod") + proctimeEndPeriod = modwinmm.NewProc("timeEndPeriod") procWinVerifyTrustEx = modwintrust.NewProc("WinVerifyTrustEx") procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW") procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW") @@ -2367,11 +2370,8 @@ func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uin return } -func GetStartupInfo(startupInfo *StartupInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } +func getStartupInfo(startupInfo *StartupInfo) { + syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) return } @@ -4017,6 +4017,22 @@ func _VerQueryValue(block unsafe.Pointer, subBlock *uint16, pointerToBufferPoint return } +func TimeBeginPeriod(period uint32) (err error) { + r1, _, e1 := syscall.Syscall(proctimeBeginPeriod.Addr(), 1, uintptr(period), 0, 0) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + +func TimeEndPeriod(period uint32) (err error) { + r1, _, e1 := syscall.Syscall(proctimeEndPeriod.Addr(), 1, uintptr(period), 0, 0) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) { r0, _, _ := syscall.Syscall(procWinVerifyTrustEx.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data))) if r0 != 0 { diff --git a/vendor/golang.org/x/text/unicode/norm/trie.go b/vendor/golang.org/x/text/unicode/norm/trie.go index 423386bf..e4250ae2 100644 --- a/vendor/golang.org/x/text/unicode/norm/trie.go +++ b/vendor/golang.org/x/text/unicode/norm/trie.go @@ -29,7 +29,7 @@ var ( nfkcData = newNfkcTrie(0) ) -// lookupValue determines the type of block n and looks up the value for b. +// lookup determines the type of block n and looks up the value for b. // For n < t.cutoff, the block is a simple lookup table. Otherwise, the block // is a list of ranges with an accompanying value. Given a matching range r, // the value for b is by r.value + (b - r.lo) * stride. diff --git a/vendor/modules.txt b/vendor/modules.txt index 118e221d..bf5d325a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,3 +1,5 @@ +# github.com/BurntSushi/toml v1.2.0 +## explicit; go 1.16 # github.com/MercuryEngineering/CookieMonster v0.0.0-20180304172713-1584578b3403 ## explicit github.com/MercuryEngineering/CookieMonster @@ -63,12 +65,6 @@ github.com/clbanning/mxj # github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f ## explicit github.com/dgryski/go-rendezvous -# github.com/fatih/color v1.15.0 -## explicit; go 1.17 -github.com/fatih/color -# github.com/fsnotify/fsnotify v1.6.0 -## explicit; go 1.16 -github.com/fsnotify/fsnotify # github.com/gabriel-vasile/mimetype v1.4.2 ## explicit; go 1.20 github.com/gabriel-vasile/mimetype @@ -85,13 +81,6 @@ github.com/gin-gonic/gin/binding github.com/gin-gonic/gin/internal/bytesconv github.com/gin-gonic/gin/internal/json github.com/gin-gonic/gin/render -# github.com/go-logr/logr v1.2.4 -## explicit; go 1.16 -github.com/go-logr/logr -github.com/go-logr/logr/funcr -# github.com/go-logr/stdr v1.2.2 -## explicit; go 1.16 -github.com/go-logr/stdr # github.com/go-ole/go-ole v1.3.0 ## explicit; go 1.12 github.com/go-ole/go-ole @@ -105,7 +94,7 @@ github.com/go-playground/locales/zh # github.com/go-playground/universal-translator v0.18.1 ## explicit; go 1.18 github.com/go-playground/universal-translator -# github.com/go-playground/validator/v10 v10.15.1 +# github.com/go-playground/validator/v10 v10.15.3 ## explicit; go 1.18 github.com/go-playground/validator/v10 github.com/go-playground/validator/v10/translations/en @@ -124,58 +113,11 @@ github.com/goccy/go-json/internal/encoder/vm_color_indent github.com/goccy/go-json/internal/encoder/vm_indent github.com/goccy/go-json/internal/errors github.com/goccy/go-json/internal/runtime -# github.com/gogf/gf/v2 v2.5.2 -## explicit; go 1.18 -github.com/gogf/gf/v2/container/garray -github.com/gogf/gf/v2/container/glist -github.com/gogf/gf/v2/container/gmap -github.com/gogf/gf/v2/container/gpool -github.com/gogf/gf/v2/container/gqueue -github.com/gogf/gf/v2/container/gset -github.com/gogf/gf/v2/container/gtree -github.com/gogf/gf/v2/container/gtype -github.com/gogf/gf/v2/container/gvar -github.com/gogf/gf/v2/database/gredis -github.com/gogf/gf/v2/debug/gdebug -github.com/gogf/gf/v2/encoding/gbinary -github.com/gogf/gf/v2/encoding/gcompress -github.com/gogf/gf/v2/encoding/ghash -github.com/gogf/gf/v2/errors/gcode -github.com/gogf/gf/v2/errors/gerror -github.com/gogf/gf/v2/internal/command -github.com/gogf/gf/v2/internal/consts -github.com/gogf/gf/v2/internal/deepcopy -github.com/gogf/gf/v2/internal/empty -github.com/gogf/gf/v2/internal/intlog -github.com/gogf/gf/v2/internal/json -github.com/gogf/gf/v2/internal/reflection -github.com/gogf/gf/v2/internal/rwmutex -github.com/gogf/gf/v2/internal/tracing -github.com/gogf/gf/v2/internal/utils -github.com/gogf/gf/v2/net/gipv4 -github.com/gogf/gf/v2/net/gtrace -github.com/gogf/gf/v2/net/gtrace/internal/provider -github.com/gogf/gf/v2/os/gcache -github.com/gogf/gf/v2/os/gcron -github.com/gogf/gf/v2/os/gctx -github.com/gogf/gf/v2/os/gfile -github.com/gogf/gf/v2/os/gfpool -github.com/gogf/gf/v2/os/gfsnotify -github.com/gogf/gf/v2/os/glog -github.com/gogf/gf/v2/os/gmlock -github.com/gogf/gf/v2/os/grpool -github.com/gogf/gf/v2/os/gstructs -github.com/gogf/gf/v2/os/gtime -github.com/gogf/gf/v2/os/gtimer -github.com/gogf/gf/v2/text/gregex -github.com/gogf/gf/v2/text/gstr -github.com/gogf/gf/v2/util/gconv -github.com/gogf/gf/v2/util/grand -github.com/gogf/gf/v2/util/gtag -github.com/gogf/gf/v2/util/gutil # github.com/golang/snappy v0.0.4 ## explicit github.com/golang/snappy +# github.com/google/go-cmp v0.5.9 +## explicit; go 1.13 # github.com/google/go-querystring v1.1.0 ## explicit; go 1.10 github.com/google/go-querystring/query @@ -223,14 +165,6 @@ github.com/klauspost/cpuid/v2 # github.com/leodido/go-urn v1.2.4 ## explicit; go 1.16 github.com/leodido/go-urn -# github.com/lib/pq v1.10.9 -## explicit; go 1.13 -github.com/lib/pq -github.com/lib/pq/oid -github.com/lib/pq/scram -# github.com/mattn/go-colorable v0.1.13 -## explicit; go 1.15 -github.com/mattn/go-colorable # github.com/mattn/go-isatty v0.0.19 ## explicit; go 1.15 github.com/mattn/go-isatty @@ -263,14 +197,14 @@ github.com/oschwald/geoip2-golang # github.com/oschwald/maxminddb-golang v1.12.0 ## explicit; go 1.19 github.com/oschwald/maxminddb-golang -# github.com/pelletier/go-toml/v2 v2.0.9 +# github.com/pelletier/go-toml/v2 v2.1.0 ## explicit; go 1.16 github.com/pelletier/go-toml/v2 github.com/pelletier/go-toml/v2/internal/characters github.com/pelletier/go-toml/v2/internal/danger github.com/pelletier/go-toml/v2/internal/tracker github.com/pelletier/go-toml/v2/unstable -# github.com/qiniu/go-sdk/v7 v7.17.0 +# github.com/qiniu/go-sdk/v7 v7.17.1 ## explicit; go 1.14 github.com/qiniu/go-sdk/v7 github.com/qiniu/go-sdk/v7/auth @@ -319,20 +253,6 @@ github.com/shirou/gopsutil/process # github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 github.com/sirupsen/logrus -# github.com/syndtr/goleveldb v1.0.0 -## explicit -github.com/syndtr/goleveldb/leveldb -github.com/syndtr/goleveldb/leveldb/cache -github.com/syndtr/goleveldb/leveldb/comparer -github.com/syndtr/goleveldb/leveldb/errors -github.com/syndtr/goleveldb/leveldb/filter -github.com/syndtr/goleveldb/leveldb/iterator -github.com/syndtr/goleveldb/leveldb/journal -github.com/syndtr/goleveldb/leveldb/memdb -github.com/syndtr/goleveldb/leveldb/opt -github.com/syndtr/goleveldb/leveldb/storage -github.com/syndtr/goleveldb/leveldb/table -github.com/syndtr/goleveldb/leveldb/util # github.com/tencentyun/cos-go-sdk-v5 v0.7.42 ## explicit; go 1.12 github.com/tencentyun/cos-go-sdk-v5 @@ -426,35 +346,6 @@ go.mongodb.org/mongo-driver/x/mongo/driver/operation go.mongodb.org/mongo-driver/x/mongo/driver/session go.mongodb.org/mongo-driver/x/mongo/driver/topology go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage -# go.opentelemetry.io/otel v1.16.0 -## explicit; go 1.19 -go.opentelemetry.io/otel -go.opentelemetry.io/otel/attribute -go.opentelemetry.io/otel/baggage -go.opentelemetry.io/otel/codes -go.opentelemetry.io/otel/internal -go.opentelemetry.io/otel/internal/attribute -go.opentelemetry.io/otel/internal/baggage -go.opentelemetry.io/otel/internal/global -go.opentelemetry.io/otel/propagation -go.opentelemetry.io/otel/semconv/internal -go.opentelemetry.io/otel/semconv/v1.17.0 -go.opentelemetry.io/otel/semconv/v1.4.0 -# go.opentelemetry.io/otel/metric v1.16.0 -## explicit; go 1.19 -go.opentelemetry.io/otel/metric -go.opentelemetry.io/otel/metric/embedded -# go.opentelemetry.io/otel/sdk v1.16.0 -## explicit; go 1.19 -go.opentelemetry.io/otel/sdk -go.opentelemetry.io/otel/sdk/instrumentation -go.opentelemetry.io/otel/sdk/internal -go.opentelemetry.io/otel/sdk/internal/env -go.opentelemetry.io/otel/sdk/resource -go.opentelemetry.io/otel/sdk/trace -# go.opentelemetry.io/otel/trace v1.16.0 -## explicit; go 1.19 -go.opentelemetry.io/otel/trace # go.uber.org/multierr v1.11.0 ## explicit; go 1.19 go.uber.org/multierr @@ -468,7 +359,7 @@ go.uber.org/zap/internal/color go.uber.org/zap/internal/exit go.uber.org/zap/internal/pool go.uber.org/zap/zapcore -# golang.org/x/arch v0.4.0 +# golang.org/x/arch v0.5.0 ## explicit; go 1.17 golang.org/x/arch/x86/x86asm # golang.org/x/crypto v0.12.0 @@ -512,15 +403,14 @@ golang.org/x/net/idna ## explicit; go 1.17 golang.org/x/sync/errgroup golang.org/x/sync/singleflight -# golang.org/x/sys v0.11.0 +# golang.org/x/sys v0.12.0 ## explicit; go 1.17 golang.org/x/sys/cpu golang.org/x/sys/execabs golang.org/x/sys/internal/unsafeheader golang.org/x/sys/unix golang.org/x/sys/windows -golang.org/x/sys/windows/registry -# golang.org/x/text v0.12.0 +# golang.org/x/text v0.13.0 ## explicit; go 1.17 golang.org/x/text/cases golang.org/x/text/encoding @@ -635,21 +525,3 @@ gorm.io/hints # gorm.io/plugin/dbresolver v1.4.7 ## explicit; go 1.14 gorm.io/plugin/dbresolver -# xorm.io/builder v0.3.13 -## explicit; go 1.11 -xorm.io/builder -# xorm.io/xorm v1.3.2 -## explicit; go 1.13 -xorm.io/xorm -xorm.io/xorm/caches -xorm.io/xorm/contexts -xorm.io/xorm/convert -xorm.io/xorm/core -xorm.io/xorm/dialects -xorm.io/xorm/internal/json -xorm.io/xorm/internal/statements -xorm.io/xorm/internal/utils -xorm.io/xorm/log -xorm.io/xorm/names -xorm.io/xorm/schemas -xorm.io/xorm/tags diff --git a/vendor/xorm.io/builder/.gitignore b/vendor/xorm.io/builder/.gitignore deleted file mode 100644 index 723ef36f..00000000 --- a/vendor/xorm.io/builder/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.idea \ No newline at end of file diff --git a/vendor/xorm.io/builder/LICENSE b/vendor/xorm.io/builder/LICENSE deleted file mode 100644 index 614d5e28..00000000 --- a/vendor/xorm.io/builder/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2016 The Xorm Authors -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* Neither the name of the {organization} nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/xorm.io/builder/README.md b/vendor/xorm.io/builder/README.md deleted file mode 100644 index 43c205e4..00000000 --- a/vendor/xorm.io/builder/README.md +++ /dev/null @@ -1,217 +0,0 @@ -# SQL builder - -[![Build Status](https://drone.gitea.com/api/badges/xorm/builder/status.svg)](https://drone.gitea.com/xorm/builder) [![](http://gocover.io/_badge/xorm.io/builder)](http://gocover.io/xorm.io/builder) -[![](https://goreportcard.com/badge/xorm.io/builder)](https://goreportcard.com/report/xorm.io/builder) - -Package builder is a lightweight and fast SQL builder for Go and XORM. - -Make sure you have installed Go 1.8+ and then: - - go get xorm.io/builder - -# Insert - -```Go -sql, args, err := builder.Insert(Eq{"c": 1, "d": 2}).Into("table1").ToSQL() - -// INSERT INTO table1 SELECT * FROM table2 -sql, err := builder.Insert().Into("table1").Select().From("table2").ToBoundSQL() - -// INSERT INTO table1 (a, b) SELECT b, c FROM table2 -sql, err = builder.Insert("a, b").Into("table1").Select("b, c").From("table2").ToBoundSQL() -``` - -# Select - -```Go -// Simple Query -sql, args, err := Select("c, d").From("table1").Where(Eq{"a": 1}).ToSQL() -// With join -sql, args, err = Select("c, d").From("table1").LeftJoin("table2", Eq{"table1.id": 1}.And(Lt{"table2.id": 3})). - RightJoin("table3", "table2.id = table3.tid").Where(Eq{"a": 1}).ToSQL() -// From sub query -sql, args, err := Select("sub.id").From(Select("c").From("table1").Where(Eq{"a": 1}), "sub").Where(Eq{"b": 1}).ToSQL() -// From union query -sql, args, err = Select("sub.id").From( - Select("id").From("table1").Where(Eq{"a": 1}).Union("all", Select("id").From("table1").Where(Eq{"a": 2})),"sub"). - Where(Eq{"b": 1}).ToSQL() -// With order by -sql, args, err = Select("a", "b", "c").From("table1").Where(Eq{"f1": "v1", "f2": "v2"}). - OrderBy("a ASC").ToSQL() -// With limit. -// Be careful! You should set up specific dialect for builder before performing a query with LIMIT -sql, args, err = Dialect(MYSQL).Select("a", "b", "c").From("table1").OrderBy("a ASC"). - Limit(5, 10).ToSQL() -``` - -# Update - -```Go -sql, args, err := Update(Eq{"a": 2}).From("table1").Where(Eq{"a": 1}).ToSQL() -``` - -# Delete - -```Go -sql, args, err := Delete(Eq{"a": 1}).From("table1").ToSQL() -``` - -# Union - -```Go -sql, args, err := Select("*").From("a").Where(Eq{"status": "1"}). - Union("all", Select("*").From("a").Where(Eq{"status": "2"})). - Union("distinct", Select("*").From("a").Where(Eq{"status": "3"})). - Union("", Select("*").From("a").Where(Eq{"status": "4"})). - ToSQL() -``` - -# Conditions - -* `Eq` is a redefine of a map, you can give one or more conditions to `Eq` - -```Go -import . "xorm.io/builder" - -sql, args, _ := ToSQL(Eq{"a":1}) -// a=? [1] -sql, args, _ := ToSQL(Eq{"b":"c"}.And(Eq{"c": 0})) -// b=? AND c=? ["c", 0] -sql, args, _ := ToSQL(Eq{"b":"c", "c":0}) -// b=? AND c=? ["c", 0] -sql, args, _ := ToSQL(Eq{"b":"c"}.Or(Eq{"b":"d"})) -// b=? OR b=? ["c", "d"] -sql, args, _ := ToSQL(Eq{"b": []string{"c", "d"}}) -// b IN (?,?) ["c", "d"] -sql, args, _ := ToSQL(Eq{"b": 1, "c":[]int{2, 3}}) -// b=? AND c IN (?,?) [1, 2, 3] -``` - -* `Neq` is the same to `Eq` - -```Go -import . "xorm.io/builder" - -sql, args, _ := ToSQL(Neq{"a":1}) -// a<>? [1] -sql, args, _ := ToSQL(Neq{"b":"c"}.And(Neq{"c": 0})) -// b<>? AND c<>? ["c", 0] -sql, args, _ := ToSQL(Neq{"b":"c", "c":0}) -// b<>? AND c<>? ["c", 0] -sql, args, _ := ToSQL(Neq{"b":"c"}.Or(Neq{"b":"d"})) -// b<>? OR b<>? ["c", "d"] -sql, args, _ := ToSQL(Neq{"b": []string{"c", "d"}}) -// b NOT IN (?,?) ["c", "d"] -sql, args, _ := ToSQL(Neq{"b": 1, "c":[]int{2, 3}}) -// b<>? AND c NOT IN (?,?) [1, 2, 3] -``` - -* `Gt`, `Gte`, `Lt`, `Lte` - -```Go -import . "xorm.io/builder" - -sql, args, _ := ToSQL(Gt{"a", 1}.And(Gte{"b", 2})) -// a>? AND b>=? [1, 2] -sql, args, _ := ToSQL(Lt{"a", 1}.Or(Lte{"b", 2})) -// a? [1, %c%, 2] -``` - -* `Or(conds ...Cond)`, Or can connect one or more conditions via Or - -```Go -import . "xorm.io/builder" - -sql, args, _ := ToSQL(Or(Eq{"a":1}, Like{"b", "c"}, Neq{"d", 2})) -// a=? OR b LIKE ? OR d<>? [1, %c%, 2] -sql, args, _ := ToSQL(Or(Eq{"a":1}, And(Like{"b", "c"}, Neq{"d", 2}))) -// a=? OR (b LIKE ? AND d<>?) [1, %c%, 2] -``` - -* `Between` - -```Go -import . "xorm.io/builder" - -sql, args, _ := ToSQL(Between{"a", 1, 2}) -// a BETWEEN 1 AND 2 -``` - -* Define yourself conditions - -Since `Cond` is an interface. - -```Go -type Cond interface { - WriteTo(Writer) error - And(...Cond) Cond - Or(...Cond) Cond - IsValid() bool -} -``` - -You can define yourself conditions and compose with other `Cond`. \ No newline at end of file diff --git a/vendor/xorm.io/builder/as.go b/vendor/xorm.io/builder/as.go deleted file mode 100644 index 23345f57..00000000 --- a/vendor/xorm.io/builder/as.go +++ /dev/null @@ -1,10 +0,0 @@ -package builder - -type Aliased struct { - table interface{} - alias string -} - -func As(table interface{}, alias string) *Aliased { - return &Aliased{table, alias} -} diff --git a/vendor/xorm.io/builder/builder.go b/vendor/xorm.io/builder/builder.go deleted file mode 100644 index c6b77423..00000000 --- a/vendor/xorm.io/builder/builder.go +++ /dev/null @@ -1,330 +0,0 @@ -// Copyright 2016 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package builder - -import ( - sql2 "database/sql" - "fmt" -) - -type optype byte - -const ( - condType optype = iota // only conditions - selectType // select - insertType // insert - updateType // update - deleteType // delete - setOpType // set operation -) - -// all databasees -const ( - POSTGRES = "postgres" - SQLITE = "sqlite3" - MYSQL = "mysql" - MSSQL = "mssql" - ORACLE = "oracle" - - UNION = "union" - INTERSECT = "intersect" - EXCEPT = "except" -) - -type join struct { - joinType string - joinTable interface{} - joinCond Cond -} - -type setOp struct { - opType string - distinctType string - builder *Builder -} - -type limit struct { - limitN int - offset int -} - -// Builder describes a SQL statement -type Builder struct { - optype - dialect string - isNested bool - into string - from string - subQuery *Builder - cond Cond - selects []string - joins joins - setOps []setOp - limitation *limit - insertCols []string - insertVals []interface{} - updates []UpdateCond - orderBy interface{} - groupBy string - having interface{} -} - -// Dialect sets the db dialect of Builder. -func Dialect(dialect string) *Builder { - builder := &Builder{cond: NewCond(), dialect: dialect} - return builder -} - -// MySQL is shortcut of Dialect(MySQL) -func MySQL() *Builder { - return Dialect(MYSQL) -} - -// MsSQL is shortcut of Dialect(MsSQL) -func MsSQL() *Builder { - return Dialect(MSSQL) -} - -// Oracle is shortcut of Dialect(Oracle) -func Oracle() *Builder { - return Dialect(ORACLE) -} - -// Postgres is shortcut of Dialect(Postgres) -func Postgres() *Builder { - return Dialect(POSTGRES) -} - -// SQLite is shortcut of Dialect(SQLITE) -func SQLite() *Builder { - return Dialect(SQLITE) -} - -// Where sets where SQL -func (b *Builder) Where(cond Cond) *Builder { - if b.cond.IsValid() { - b.cond = b.cond.And(cond) - } else { - b.cond = cond - } - return b -} - -// From sets from subject(can be a table name in string or a builder pointer) and its alias -func (b *Builder) From(subject interface{}, aliasMaybe ...string) *Builder { - alias := "" - if len(aliasMaybe) > 0 { - alias = aliasMaybe[0] - } - - if aliased, ok := subject.(*Aliased); ok { - subject = aliased.table - alias = aliased.alias - } - - switch t := subject.(type) { - case *Builder: - b.subQuery = t - - if len(alias) > 0 { - b.from = alias - } else { - b.isNested = true - } - case string: - b.from = t - - if len(alias) > 0 { - b.from = b.from + " " + alias - } - } - - return b -} - -// TableName returns the table name -func (b *Builder) TableName() string { - if b.optype == insertType { - return b.into - } - return b.from -} - -// Into sets insert table name -func (b *Builder) Into(tableName string) *Builder { - b.into = tableName - return b -} - -// Union sets union conditions -func (b *Builder) Union(distinctType string, cond *Builder) *Builder { - return b.setOperation(UNION, distinctType, cond) -} - -// Intersect sets intersect conditions -func (b *Builder) Intersect(distinctType string, cond *Builder) *Builder { - return b.setOperation(INTERSECT, distinctType, cond) -} - -// Except sets except conditions -func (b *Builder) Except(distinctType string, cond *Builder) *Builder { - return b.setOperation(EXCEPT, distinctType, cond) -} - -func (b *Builder) setOperation(opType, distinctType string, cond *Builder) *Builder { - var builder *Builder - if b.optype != setOpType { - builder = &Builder{cond: NewCond()} - builder.optype = setOpType - builder.dialect = b.dialect - builder.selects = b.selects - - currentSetOps := b.setOps - // erase sub setOps (actually append to new Builder.unions) - b.setOps = nil - - for e := range currentSetOps { - currentSetOps[e].builder.dialect = b.dialect - } - - builder.setOps = append(append(builder.setOps, setOp{opType, "", b}), currentSetOps...) - } else { - builder = b - } - - if cond != nil { - if cond.dialect == "" && builder.dialect != "" { - cond.dialect = builder.dialect - } - - builder.setOps = append(builder.setOps, setOp{opType, distinctType, cond}) - } - - return builder -} - -// Limit sets limitN condition -func (b *Builder) Limit(limitN int, offset ...int) *Builder { - b.limitation = &limit{limitN: limitN} - - if len(offset) > 0 { - b.limitation.offset = offset[0] - } - - return b -} - -// Select sets select SQL -func (b *Builder) Select(cols ...string) *Builder { - b.selects = cols - if b.optype == condType { - b.optype = selectType - } - return b -} - -// And sets AND condition -func (b *Builder) And(cond Cond) *Builder { - b.cond = And(b.cond, cond) - return b -} - -// Or sets OR condition -func (b *Builder) Or(cond Cond) *Builder { - b.cond = Or(b.cond, cond) - return b -} - -// Update sets update SQL -func (b *Builder) Update(updates ...Cond) *Builder { - b.updates = make([]UpdateCond, 0, len(updates)) - for _, update := range updates { - if u, ok := update.(UpdateCond); ok && u.IsValid() { - b.updates = append(b.updates, u) - } - } - b.optype = updateType - return b -} - -// Delete sets delete SQL -func (b *Builder) Delete(conds ...Cond) *Builder { - b.cond = b.cond.And(conds...) - b.optype = deleteType - return b -} - -// WriteTo implements Writer interface -func (b *Builder) WriteTo(w Writer) error { - switch b.optype { - /*case condType: - return b.cond.WriteTo(w)*/ - case selectType: - return b.selectWriteTo(w) - case insertType: - return b.insertWriteTo(w) - case updateType: - return b.updateWriteTo(w) - case deleteType: - return b.deleteWriteTo(w) - case setOpType: - return b.setOpWriteTo(w) - } - - return ErrNotSupportType -} - -// ToSQL convert a builder to SQL and args -func (b *Builder) ToSQL() (string, []interface{}, error) { - w := NewWriter() - if err := b.WriteTo(w); err != nil { - return "", nil, err - } - - // in case of sql.NamedArg in args - for e := range w.args { - if namedArg, ok := w.args[e].(sql2.NamedArg); ok { - w.args[e] = namedArg.Value - } - } - - sql := w.String() - var err error - - switch b.dialect { - case ORACLE, MSSQL: - // This is for compatibility with different sql drivers - for e := range w.args { - w.args[e] = sql2.Named(fmt.Sprintf("p%d", e+1), w.args[e]) - } - - var prefix string - if b.dialect == ORACLE { - prefix = ":p" - } else { - prefix = "@p" - } - - if sql, err = ConvertPlaceholder(sql, prefix); err != nil { - return "", nil, err - } - case POSTGRES: - if sql, err = ConvertPlaceholder(sql, "$"); err != nil { - return "", nil, err - } - } - - return sql, w.args, nil -} - -// ToBoundSQL generated a bound SQL string -func (b *Builder) ToBoundSQL() (string, error) { - w := NewWriter() - if err := b.WriteTo(w); err != nil { - return "", err - } - - return ConvertToBoundSQL(w.String(), w.args) -} diff --git a/vendor/xorm.io/builder/builder_delete.go b/vendor/xorm.io/builder/builder_delete.go deleted file mode 100644 index 317cc3ff..00000000 --- a/vendor/xorm.io/builder/builder_delete.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2016 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package builder - -import ( - "fmt" -) - -// Delete creates a delete Builder -func Delete(conds ...Cond) *Builder { - builder := &Builder{cond: NewCond()} - return builder.Delete(conds...) -} - -func (b *Builder) deleteWriteTo(w Writer) error { - if len(b.from) <= 0 { - return ErrNoTableName - } - - if _, err := fmt.Fprintf(w, "DELETE FROM %s WHERE ", b.from); err != nil { - return err - } - - return b.cond.WriteTo(w) -} diff --git a/vendor/xorm.io/builder/builder_insert.go b/vendor/xorm.io/builder/builder_insert.go deleted file mode 100644 index 9b0e6eef..00000000 --- a/vendor/xorm.io/builder/builder_insert.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2016 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package builder - -import ( - "bytes" - "fmt" - "sort" -) - -// Insert creates an insert Builder -func Insert(eq ...interface{}) *Builder { - builder := &Builder{cond: NewCond()} - return builder.Insert(eq...) -} - -func (b *Builder) insertSelectWriteTo(w Writer) error { - if _, err := fmt.Fprintf(w, "INSERT INTO %s ", b.into); err != nil { - return err - } - - if len(b.insertCols) > 0 { - fmt.Fprintf(w, "(") - for _, col := range b.insertCols { - fmt.Fprintf(w, col) - } - fmt.Fprintf(w, ") ") - } - - return b.selectWriteTo(w) -} - -func (b *Builder) insertWriteTo(w Writer) error { - if len(b.into) <= 0 { - return ErrNoTableName - } - if len(b.insertCols) <= 0 && b.from == "" { - return ErrNoColumnToInsert - } - - if b.into != "" && b.from != "" { - return b.insertSelectWriteTo(w) - } - - if _, err := fmt.Fprintf(w, "INSERT INTO %s (", b.into); err != nil { - return err - } - - args := make([]interface{}, 0) - var bs []byte - valBuffer := bytes.NewBuffer(bs) - - for i, col := range b.insertCols { - value := b.insertVals[i] - fmt.Fprint(w, col) - if e, ok := value.(*Expression); ok { - fmt.Fprintf(valBuffer, "(%s)", e.sql) - args = append(args, e.args...) - } else if value == nil { - fmt.Fprintf(valBuffer, `null`) - } else { - fmt.Fprint(valBuffer, "?") - args = append(args, value) - } - - if i != len(b.insertCols)-1 { - if _, err := fmt.Fprint(w, ","); err != nil { - return err - } - if _, err := fmt.Fprint(valBuffer, ","); err != nil { - return err - } - } - } - - if _, err := fmt.Fprint(w, ") Values ("); err != nil { - return err - } - - if _, err := w.Write(valBuffer.Bytes()); err != nil { - return err - } - if _, err := fmt.Fprint(w, ")"); err != nil { - return err - } - - w.Append(args...) - - return nil -} - -type insertColsSorter struct { - cols []string - vals []interface{} -} - -func (s insertColsSorter) Len() int { - return len(s.cols) -} - -func (s insertColsSorter) Swap(i, j int) { - s.cols[i], s.cols[j] = s.cols[j], s.cols[i] - s.vals[i], s.vals[j] = s.vals[j], s.vals[i] -} - -func (s insertColsSorter) Less(i, j int) bool { - return s.cols[i] < s.cols[j] -} - -// Insert sets insert SQL -func (b *Builder) Insert(eq ...interface{}) *Builder { - if len(eq) > 0 { - paramType := -1 - for _, e := range eq { - switch t := e.(type) { - case Eq: - if paramType == -1 { - paramType = 0 - } - if paramType != 0 { - break - } - for k, v := range t { - b.insertCols = append(b.insertCols, k) - b.insertVals = append(b.insertVals, v) - } - case string: - if paramType == -1 { - paramType = 1 - } - if paramType != 1 { - break - } - b.insertCols = append(b.insertCols, t) - } - } - } - - if len(b.insertCols) == len(b.insertVals) { - sort.Sort(insertColsSorter{ - cols: b.insertCols, - vals: b.insertVals, - }) - } - b.optype = insertType - return b -} diff --git a/vendor/xorm.io/builder/builder_join.go b/vendor/xorm.io/builder/builder_join.go deleted file mode 100644 index a8aef0a7..00000000 --- a/vendor/xorm.io/builder/builder_join.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2019 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package builder - -import ( - "fmt" -) - -// InnerJoin sets inner join -func (b *Builder) InnerJoin(joinTable, joinCond interface{}) *Builder { - return b.Join("INNER", joinTable, joinCond) -} - -// LeftJoin sets left join SQL -func (b *Builder) LeftJoin(joinTable, joinCond interface{}) *Builder { - return b.Join("LEFT", joinTable, joinCond) -} - -// RightJoin sets right join SQL -func (b *Builder) RightJoin(joinTable, joinCond interface{}) *Builder { - return b.Join("RIGHT", joinTable, joinCond) -} - -// CrossJoin sets cross join SQL -func (b *Builder) CrossJoin(joinTable, joinCond interface{}) *Builder { - return b.Join("CROSS", joinTable, joinCond) -} - -// FullJoin sets full join SQL -func (b *Builder) FullJoin(joinTable, joinCond interface{}) *Builder { - return b.Join("FULL", joinTable, joinCond) -} - -// Join sets join table and conditions -func (b *Builder) Join(joinType string, joinTable, joinCond interface{}) *Builder { - switch joinCond.(type) { - case Cond: - b.joins = append(b.joins, join{joinType, joinTable, joinCond.(Cond)}) - case string: - b.joins = append(b.joins, join{joinType, joinTable, Expr(joinCond.(string))}) - } - - return b -} - -type joins []join - -func (joins joins) WriteTo(w Writer) error { - for _, v := range joins { - var joinTable = v.joinTable - var alias string - if aliased, ok := v.joinTable.(*Aliased); ok { - joinTable = aliased.table - alias = aliased.alias + " " - } - - switch tbl := joinTable.(type) { - case *Builder: - if _, err := fmt.Fprintf(w, " %s JOIN (", v.joinType); err != nil { - return err - } - if err := tbl.WriteTo(w); err != nil { - return err - } - if _, err := fmt.Fprintf(w, ") %s", alias); err != nil { - return err - } - case string: - if _, err := fmt.Fprintf(w, " %s JOIN %s %s", v.joinType, tbl, alias); err != nil { - return err - } - } - if _, err := fmt.Fprintf(w, "ON "); err != nil { - return err - } - - if err := v.joinCond.WriteTo(w); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/xorm.io/builder/builder_limit.go b/vendor/xorm.io/builder/builder_limit.go deleted file mode 100644 index 793df377..00000000 --- a/vendor/xorm.io/builder/builder_limit.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2018 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package builder - -import ( - "fmt" - "strings" -) - -func (b *Builder) limitWriteTo(w Writer) error { - if strings.TrimSpace(b.dialect) == "" { - return ErrDialectNotSetUp - } - - if b.limitation != nil { - limit := b.limitation - if limit.offset < 0 || limit.limitN <= 0 { - return ErrInvalidLimitation - } - // unset limit condition to prevent final.WriteTo from recursing forever - b.limitation = nil - defer func() { - b.limitation = limit - }() - - switch strings.ToLower(strings.TrimSpace(b.dialect)) { - case ORACLE: - if len(b.selects) == 0 { - b.selects = append(b.selects, "*") - } - - var final *Builder - selects := b.selects - b.selects = append(selects, "ROWNUM RN") - - var wb *Builder - if b.optype == setOpType { - wb = Dialect(b.dialect).Select("at.*", "ROWNUM RN"). - From(b, "at") - } else { - wb = b - } - - if limit.offset == 0 { - final = Dialect(b.dialect).Select(selects...).From(wb, "at"). - Where(Lte{"at.RN": limit.limitN}) - } else { - sub := Dialect(b.dialect).Select("*"). - From(b, "at").Where(Lte{"at.RN": limit.offset + limit.limitN}) - - final = Dialect(b.dialect).Select(selects...).From(sub, "att"). - Where(Gt{"att.RN": limit.offset}) - } - - return final.WriteTo(w) - case SQLITE, MYSQL, POSTGRES: - // if type UNION, we need to write previous content back to current writer - if b.optype == setOpType { - if err := b.WriteTo(w); err != nil { - return err - } - } - - if limit.offset == 0 { - fmt.Fprint(w, " LIMIT ", limit.limitN) - } else { - fmt.Fprintf(w, " LIMIT %v OFFSET %v", limit.limitN, limit.offset) - } - case MSSQL: - if len(b.selects) == 0 { - b.selects = append(b.selects, "*") - } - - var final *Builder - selects := b.selects - b.selects = append(append([]string{fmt.Sprintf("TOP %d %v", limit.limitN+limit.offset, b.selects[0])}, - b.selects[1:]...), "ROW_NUMBER() OVER (ORDER BY (SELECT 1)) AS RN") - - var wb *Builder - if b.optype == setOpType { - wb = Dialect(b.dialect).Select("*", "ROW_NUMBER() OVER (ORDER BY (SELECT 1)) AS RN"). - From(b, "at") - } else { - wb = b - } - - if limit.offset == 0 { - final = Dialect(b.dialect).Select(selects...).From(wb, "at") - } else { - final = Dialect(b.dialect).Select(selects...).From(wb, "at").Where(Gt{"at.RN": limit.offset}) - } - - return final.WriteTo(w) - default: - return ErrNotSupportType - } - } - - return nil -} diff --git a/vendor/xorm.io/builder/builder_select.go b/vendor/xorm.io/builder/builder_select.go deleted file mode 100644 index a36d2a64..00000000 --- a/vendor/xorm.io/builder/builder_select.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2016 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package builder - -import ( - "fmt" -) - -// Select creates a select Builder -func Select(cols ...string) *Builder { - builder := &Builder{cond: NewCond()} - return builder.Select(cols...) -} - -func (b *Builder) selectWriteTo(w Writer) error { - if len(b.from) <= 0 && !b.isNested { - return ErrNoTableName - } - - // perform limit before writing to writer when b.dialect between ORACLE and MSSQL - // this avoid a duplicate writing problem in simple limit query - if b.limitation != nil && (b.dialect == ORACLE || b.dialect == MSSQL) { - return b.limitWriteTo(w) - } - - if _, err := fmt.Fprint(w, "SELECT "); err != nil { - return err - } - if len(b.selects) > 0 { - for i, s := range b.selects { - if _, err := fmt.Fprint(w, s); err != nil { - return err - } - if i != len(b.selects)-1 { - if _, err := fmt.Fprint(w, ","); err != nil { - return err - } - } - } - } else { - if _, err := fmt.Fprint(w, "*"); err != nil { - return err - } - } - - if b.subQuery == nil { - if _, err := fmt.Fprint(w, " FROM ", b.from); err != nil { - return err - } - } else { - if b.cond.IsValid() && len(b.from) <= 0 { - return ErrUnnamedDerivedTable - } - if b.subQuery.dialect != "" && b.dialect != b.subQuery.dialect { - return ErrInconsistentDialect - } - - // dialect of sub-query will inherit from the main one (if not set up) - if b.dialect != "" && b.subQuery.dialect == "" { - b.subQuery.dialect = b.dialect - } - - switch b.subQuery.optype { - case selectType, setOpType: - fmt.Fprint(w, " FROM (") - if err := b.subQuery.WriteTo(w); err != nil { - return err - } - - if len(b.from) == 0 { - fmt.Fprintf(w, ")") - } else { - fmt.Fprintf(w, ") %v", b.from) - } - default: - return ErrUnexpectedSubQuery - } - } - - if err := b.joins.WriteTo(w); err != nil { - return err - } - - if b.cond.IsValid() { - if _, err := fmt.Fprint(w, " WHERE "); err != nil { - return err - } - - if err := b.cond.WriteTo(w); err != nil { - return err - } - } - - if len(b.groupBy) > 0 { - if _, err := fmt.Fprint(w, " GROUP BY ", b.groupBy); err != nil { - return err - } - } - - if b.having != nil { - switch c := b.having.(type) { - case string: - if len(c) > 0 { - if _, err := fmt.Fprint(w, " HAVING ", c); err != nil { - return err - } - } - case Cond: - if c.IsValid() { - if _, err := fmt.Fprint(w, " HAVING "); err != nil { - return err - } - if err := c.WriteTo(w); err != nil { - return err - } - } - default: - return fmt.Errorf("unknown having parameter: %#v", b.having) - } - } - - if b.orderBy != nil { - switch c := b.orderBy.(type) { - case string: - if len(c) > 0 { - if _, err := fmt.Fprint(w, " ORDER BY ", c); err != nil { - return err - } - } - case *Expression: - if _, err := fmt.Fprint(w, " ORDER BY "); err != nil { - return err - } - if err := c.WriteTo(w); err != nil { - return err - } - default: - return fmt.Errorf("unknow orderby parameter: %#v", b.orderBy) - } - } - - if b.limitation != nil { - if err := b.limitWriteTo(w); err != nil { - return err - } - } - - return nil -} - -// OrderBy orderBy SQL -func (b *Builder) OrderBy(orderBy interface{}) *Builder { - b.orderBy = orderBy - return b -} - -// GroupBy groupby SQL -func (b *Builder) GroupBy(groupby string) *Builder { - b.groupBy = groupby - return b -} - -// Having having SQL -func (b *Builder) Having(having interface{}) *Builder { - b.having = having - return b -} diff --git a/vendor/xorm.io/builder/builder_set_operations.go b/vendor/xorm.io/builder/builder_set_operations.go deleted file mode 100644 index 6ad16778..00000000 --- a/vendor/xorm.io/builder/builder_set_operations.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2018 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package builder - -import ( - "fmt" - "strings" -) - -func (b *Builder) setOpWriteTo(w Writer) error { - if b.limitation != nil || b.cond.IsValid() || - b.orderBy != nil || b.having != nil || b.groupBy != "" { - return ErrNotUnexpectedUnionConditions - } - - for idx, o := range b.setOps { - current := o.builder - if current.optype != selectType { - return ErrUnsupportedUnionMembers - } - - if len(b.setOps) == 1 { - if err := current.selectWriteTo(w); err != nil { - return err - } - } else { - if b.dialect != "" && b.dialect != current.dialect { - return ErrInconsistentDialect - } - - if idx != 0 { - if o.distinctType == "" { - fmt.Fprint(w, fmt.Sprintf(" %s ", strings.ToUpper(o.opType))) - } else { - fmt.Fprint(w, fmt.Sprintf(" %s %s ", strings.ToUpper(o.opType), strings.ToUpper(o.distinctType))) - } - } - fmt.Fprint(w, "(") - - if err := current.selectWriteTo(w); err != nil { - return err - } - - fmt.Fprint(w, ")") - } - } - - return nil -} diff --git a/vendor/xorm.io/builder/builder_update.go b/vendor/xorm.io/builder/builder_update.go deleted file mode 100644 index 5fffbe34..00000000 --- a/vendor/xorm.io/builder/builder_update.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2016 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package builder - -import ( - "fmt" -) - -// UpdateCond defines an interface that cond could be used with update -type UpdateCond interface { - IsValid() bool - OpWriteTo(op string, w Writer) error -} - -// Update creates an update Builder -func Update(updates ...Cond) *Builder { - builder := &Builder{cond: NewCond()} - return builder.Update(updates...) -} - -func (b *Builder) updateWriteTo(w Writer) error { - if len(b.from) <= 0 { - return ErrNoTableName - } - if len(b.updates) <= 0 { - return ErrNoColumnToUpdate - } - - if _, err := fmt.Fprintf(w, "UPDATE %s SET ", b.from); err != nil { - return err - } - - for i, s := range b.updates { - - if err := s.OpWriteTo(",", w); err != nil { - return err - } - - if i != len(b.updates)-1 { - if _, err := fmt.Fprint(w, ","); err != nil { - return err - } - } - } - - if !b.cond.IsValid() { - return nil - } - - if _, err := fmt.Fprint(w, " WHERE "); err != nil { - return err - } - - return b.cond.WriteTo(w) -} diff --git a/vendor/xorm.io/builder/cond.go b/vendor/xorm.io/builder/cond.go deleted file mode 100644 index 149f5d8c..00000000 --- a/vendor/xorm.io/builder/cond.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2016 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package builder - -// Cond defines an interface -type Cond interface { - WriteTo(Writer) error - And(...Cond) Cond - Or(...Cond) Cond - IsValid() bool -} - -type condEmpty struct{} - -var _ Cond = condEmpty{} - -// NewCond creates an empty condition -func NewCond() Cond { - return condEmpty{} -} - -func (condEmpty) WriteTo(w Writer) error { - return nil -} - -func (condEmpty) And(conds ...Cond) Cond { - return And(conds...) -} - -func (condEmpty) Or(conds ...Cond) Cond { - return Or(conds...) -} - -func (condEmpty) IsValid() bool { - return false -} diff --git a/vendor/xorm.io/builder/cond_and.go b/vendor/xorm.io/builder/cond_and.go deleted file mode 100644 index 82ee30ea..00000000 --- a/vendor/xorm.io/builder/cond_and.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2016 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package builder - -import "fmt" - -type condAnd []Cond - -var _ Cond = condAnd{} - -// And generates AND conditions -func And(conds ...Cond) Cond { - result := make(condAnd, 0, len(conds)) - for _, cond := range conds { - if cond == nil || !cond.IsValid() { - continue - } - result = append(result, cond) - } - return result -} - -func (and condAnd) WriteTo(w Writer) error { - for i, cond := range and { - _, isOr := cond.(condOr) - _, isExpr := cond.(*Expression) - wrap := isOr || isExpr - if wrap { - fmt.Fprint(w, "(") - } - - err := cond.WriteTo(w) - if err != nil { - return err - } - - if wrap { - fmt.Fprint(w, ")") - } - - if i != len(and)-1 { - fmt.Fprint(w, " AND ") - } - } - - return nil -} - -func (and condAnd) And(conds ...Cond) Cond { - return And(and, And(conds...)) -} - -func (and condAnd) Or(conds ...Cond) Cond { - return Or(and, Or(conds...)) -} - -func (and condAnd) IsValid() bool { - return len(and) > 0 -} diff --git a/vendor/xorm.io/builder/cond_between.go b/vendor/xorm.io/builder/cond_between.go deleted file mode 100644 index b1541f2b..00000000 --- a/vendor/xorm.io/builder/cond_between.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2016 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package builder - -import "fmt" - -// Between implmentes between condition -type Between struct { - Col string - LessVal interface{} - MoreVal interface{} -} - -var _ Cond = Between{} - -// WriteTo write data to Writer -func (between Between) WriteTo(w Writer) error { - if _, err := fmt.Fprintf(w, "%s BETWEEN ", between.Col); err != nil { - return err - } - if lv, ok := between.LessVal.(*Expression); ok { - if err := lv.WriteTo(w); err != nil { - return err - } - } else { - if _, err := fmt.Fprint(w, "?"); err != nil { - return err - } - w.Append(between.LessVal) - } - - if _, err := fmt.Fprint(w, " AND "); err != nil { - return err - } - - if mv, ok := between.MoreVal.(*Expression); ok { - if err := mv.WriteTo(w); err != nil { - return err - } - } else { - if _, err := fmt.Fprint(w, "?"); err != nil { - return err - } - w.Append(between.MoreVal) - } - - return nil -} - -// And implments And with other conditions -func (between Between) And(conds ...Cond) Cond { - return And(between, And(conds...)) -} - -// Or implments Or with other conditions -func (between Between) Or(conds ...Cond) Cond { - return Or(between, Or(conds...)) -} - -// IsValid tests if the condition is valid -func (between Between) IsValid() bool { - return len(between.Col) > 0 -} diff --git a/vendor/xorm.io/builder/cond_compare.go b/vendor/xorm.io/builder/cond_compare.go deleted file mode 100644 index bda3d92c..00000000 --- a/vendor/xorm.io/builder/cond_compare.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2016 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package builder - -import "fmt" - -// WriteMap writes conditions' SQL to Writer, op could be =, <>, >, <, <=, >= and etc. -func WriteMap(w Writer, data map[string]interface{}, op string) error { - args := make([]interface{}, 0, len(data)) - i := 0 - keys := make([]string, 0, len(data)) - for k := range data { - keys = append(keys, k) - } - - for _, k := range keys { - v := data[k] - switch v.(type) { - case *Expression: - if _, err := fmt.Fprintf(w, "%s%s(", k, op); err != nil { - return err - } - - if err := v.(*Expression).WriteTo(w); err != nil { - return err - } - - if _, err := fmt.Fprintf(w, ")"); err != nil { - return err - } - case *Builder: - if _, err := fmt.Fprintf(w, "%s%s(", k, op); err != nil { - return err - } - - if err := v.(*Builder).WriteTo(w); err != nil { - return err - } - - if _, err := fmt.Fprintf(w, ")"); err != nil { - return err - } - default: - if _, err := fmt.Fprintf(w, "%s%s?", k, op); err != nil { - return err - } - args = append(args, v) - } - if i != len(data)-1 { - if _, err := fmt.Fprint(w, " AND "); err != nil { - return err - } - } - i = i + 1 - } - w.Append(args...) - return nil -} - -// Lt defines < condition -type Lt map[string]interface{} - -var _ Cond = Lt{} - -// WriteTo write SQL to Writer -func (lt Lt) WriteTo(w Writer) error { - return WriteMap(w, lt, "<") -} - -// And implements And with other conditions -func (lt Lt) And(conds ...Cond) Cond { - return condAnd{lt, And(conds...)} -} - -// Or implements Or with other conditions -func (lt Lt) Or(conds ...Cond) Cond { - return condOr{lt, Or(conds...)} -} - -// IsValid tests if this Eq is valid -func (lt Lt) IsValid() bool { - return len(lt) > 0 -} - -// Lte defines <= condition -type Lte map[string]interface{} - -var _ Cond = Lte{} - -// WriteTo write SQL to Writer -func (lte Lte) WriteTo(w Writer) error { - return WriteMap(w, lte, "<=") -} - -// And implements And with other conditions -func (lte Lte) And(conds ...Cond) Cond { - return And(lte, And(conds...)) -} - -// Or implements Or with other conditions -func (lte Lte) Or(conds ...Cond) Cond { - return Or(lte, Or(conds...)) -} - -// IsValid tests if this Eq is valid -func (lte Lte) IsValid() bool { - return len(lte) > 0 -} - -// Gt defines > condition -type Gt map[string]interface{} - -var _ Cond = Gt{} - -// WriteTo write SQL to Writer -func (gt Gt) WriteTo(w Writer) error { - return WriteMap(w, gt, ">") -} - -// And implements And with other conditions -func (gt Gt) And(conds ...Cond) Cond { - return And(gt, And(conds...)) -} - -// Or implements Or with other conditions -func (gt Gt) Or(conds ...Cond) Cond { - return Or(gt, Or(conds...)) -} - -// IsValid tests if this Eq is valid -func (gt Gt) IsValid() bool { - return len(gt) > 0 -} - -// Gte defines >= condition -type Gte map[string]interface{} - -var _ Cond = Gte{} - -// WriteTo write SQL to Writer -func (gte Gte) WriteTo(w Writer) error { - return WriteMap(w, gte, ">=") -} - -// And implements And with other conditions -func (gte Gte) And(conds ...Cond) Cond { - return And(gte, And(conds...)) -} - -// Or implements Or with other conditions -func (gte Gte) Or(conds ...Cond) Cond { - return Or(gte, Or(conds...)) -} - -// IsValid tests if this Eq is valid -func (gte Gte) IsValid() bool { - return len(gte) > 0 -} diff --git a/vendor/xorm.io/builder/cond_eq.go b/vendor/xorm.io/builder/cond_eq.go deleted file mode 100644 index 35fdf425..00000000 --- a/vendor/xorm.io/builder/cond_eq.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2016 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package builder - -import ( - "fmt" - "sort" -) - -// Incr implements a type used by Eq -type Incr int - -// Decr implements a type used by Eq -type Decr int - -// Eq defines equals conditions -type Eq map[string]interface{} - -var _ Cond = Eq{} - -// OpWriteTo writes conditions with special operator -func (eq Eq) OpWriteTo(op string, w Writer) error { - i := 0 - for _, k := range eq.sortedKeys() { - v := eq[k] - switch v.(type) { - case []int, []int64, []string, []int32, []int16, []int8, []uint, []uint64, []uint32, []uint16, []interface{}: - if err := In(k, v).WriteTo(w); err != nil { - return err - } - case *Expression: - if _, err := fmt.Fprintf(w, "%s=(", k); err != nil { - return err - } - - if err := v.(*Expression).WriteTo(w); err != nil { - return err - } - - if _, err := fmt.Fprintf(w, ")"); err != nil { - return err - } - case *Builder: - if _, err := fmt.Fprintf(w, "%s=(", k); err != nil { - return err - } - - if err := v.(*Builder).WriteTo(w); err != nil { - return err - } - - if _, err := fmt.Fprintf(w, ")"); err != nil { - return err - } - case Incr: - if _, err := fmt.Fprintf(w, "%s=%s+?", k, k); err != nil { - return err - } - w.Append(int(v.(Incr))) - case Decr: - if _, err := fmt.Fprintf(w, "%s=%s-?", k, k); err != nil { - return err - } - w.Append(int(v.(Decr))) - case nil: - if _, err := fmt.Fprintf(w, "%s=null", k); err != nil { - return err - } - default: - if _, err := fmt.Fprintf(w, "%s=?", k); err != nil { - return err - } - w.Append(v) - } - if i != len(eq)-1 { - if _, err := fmt.Fprint(w, op); err != nil { - return err - } - } - i = i + 1 - } - return nil -} - -// WriteTo writes SQL to Writer -func (eq Eq) WriteTo(w Writer) error { - return eq.OpWriteTo(" AND ", w) -} - -// And implements And with other conditions -func (eq Eq) And(conds ...Cond) Cond { - return And(eq, And(conds...)) -} - -// Or implements Or with other conditions -func (eq Eq) Or(conds ...Cond) Cond { - return Or(eq, Or(conds...)) -} - -// IsValid tests if this Eq is valid -func (eq Eq) IsValid() bool { - return len(eq) > 0 -} - -// sortedKeys returns all keys of this Eq sorted with sort.Strings. -// It is used internally for consistent ordering when generating -// SQL, see https://gitea.com/xorm/builder/issues/10 -func (eq Eq) sortedKeys() []string { - keys := make([]string, 0, len(eq)) - for key := range eq { - keys = append(keys, key) - } - sort.Strings(keys) - return keys -} diff --git a/vendor/xorm.io/builder/cond_exists.go b/vendor/xorm.io/builder/cond_exists.go deleted file mode 100644 index 2e089a31..00000000 --- a/vendor/xorm.io/builder/cond_exists.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2022 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package builder - -import ( - "errors" - "io" -) - -type condExists struct { - subQuery *Builder -} - -var _ Cond = condExists{} - -// Exists returns Cond via condition -func Exists(subQuery *Builder) Cond { - return &condExists{ - subQuery: subQuery, - } -} - -func (condExists condExists) WriteTo(w Writer) error { - if !condExists.IsValid() { - return errors.New("exists condition is nov valid") - } - if _, err := io.WriteString(w, "EXISTS ("); err != nil { - return err - } - if err := condExists.subQuery.WriteTo(w); err != nil { - return err - } - _, err := io.WriteString(w, ")") - return err -} - -func (condExists condExists) And(conds ...Cond) Cond { - return And(condExists, And(conds...)) -} - -func (condExists condExists) Or(conds ...Cond) Cond { - return Or(condExists, Or(conds...)) -} - -func (condExists condExists) IsValid() bool { - return condExists.subQuery != nil -} diff --git a/vendor/xorm.io/builder/cond_if.go b/vendor/xorm.io/builder/cond_if.go deleted file mode 100644 index af9eb321..00000000 --- a/vendor/xorm.io/builder/cond_if.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2019 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package builder - -type condIf struct { - condition bool - condTrue Cond - condFalse Cond -} - -var _ Cond = condIf{} - -// If returns Cond via condition -func If(condition bool, condTrue Cond, condFalse ...Cond) Cond { - var c = condIf{ - condition: condition, - condTrue: condTrue, - } - if len(condFalse) > 0 { - c.condFalse = condFalse[0] - } - return c -} - -func (condIf condIf) WriteTo(w Writer) error { - if condIf.condition { - return condIf.condTrue.WriteTo(w) - } else if condIf.condFalse != nil { - return condIf.condFalse.WriteTo(w) - } - return nil -} - -func (condIf condIf) And(conds ...Cond) Cond { - return And(condIf, And(conds...)) -} - -func (condIf condIf) Or(conds ...Cond) Cond { - return Or(condIf, Or(conds...)) -} - -func (condIf condIf) IsValid() bool { - if condIf.condition { - return condIf.condTrue != nil - } - return condIf.condFalse != nil -} diff --git a/vendor/xorm.io/builder/cond_in.go b/vendor/xorm.io/builder/cond_in.go deleted file mode 100644 index fa98b563..00000000 --- a/vendor/xorm.io/builder/cond_in.go +++ /dev/null @@ -1,326 +0,0 @@ -// Copyright 2016 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package builder - -import ( - "fmt" - "reflect" - "strings" -) - -type condIn struct { - col string - vals []interface{} -} - -var _ Cond = condIn{} - -// In generates IN condition -func In(col string, values ...interface{}) Cond { - return condIn{col, values} -} - -func (condIn condIn) handleBlank(w Writer) error { - _, err := fmt.Fprint(w, "0=1") - return err -} - -func (condIn condIn) WriteTo(w Writer) error { - if len(condIn.vals) <= 0 { - return condIn.handleBlank(w) - } - - switch condIn.vals[0].(type) { - case []int8: - vals := condIn.vals[0].([]int8) - if len(vals) <= 0 { - return condIn.handleBlank(w) - } - // We're using this map to track if a parameter was already added to the condition to not add the same multiple times. - trackMap := make(map[int8]bool, len(vals)) - for _, val := range vals { - if _, exists := trackMap[val]; exists { - continue - } - w.Append(val) - trackMap[val] = true - } - questionMark := strings.Repeat("?,", len(trackMap)) - if _, err := fmt.Fprintf(w, "%s IN (%s)", condIn.col, questionMark[:len(questionMark)-1]); err != nil { - return err - } - case []int16: - vals := condIn.vals[0].([]int16) - if len(vals) <= 0 { - return condIn.handleBlank(w) - } - // We're using this map to track if a parameter was already added to the condition to not add the same multiple times. - trackMap := make(map[int16]bool, len(vals)) - for _, val := range vals { - if _, exists := trackMap[val]; exists { - continue - } - w.Append(val) - trackMap[val] = true - } - questionMark := strings.Repeat("?,", len(trackMap)) - if _, err := fmt.Fprintf(w, "%s IN (%s)", condIn.col, questionMark[:len(questionMark)-1]); err != nil { - return err - } - case []int: - vals := condIn.vals[0].([]int) - if len(vals) <= 0 { - return condIn.handleBlank(w) - } - // We're using this map to track if a parameter was already added to the condition to not add the same multiple times. - trackMap := make(map[int]bool, len(vals)) - for _, val := range vals { - if _, exists := trackMap[val]; exists { - continue - } - w.Append(val) - trackMap[val] = true - } - questionMark := strings.Repeat("?,", len(trackMap)) - if _, err := fmt.Fprintf(w, "%s IN (%s)", condIn.col, questionMark[:len(questionMark)-1]); err != nil { - return err - } - case []int32: - vals := condIn.vals[0].([]int32) - if len(vals) <= 0 { - return condIn.handleBlank(w) - } - // We're using this map to track if a parameter was already added to the condition to not add the same multiple times. - trackMap := make(map[int32]bool, len(vals)) - for _, val := range vals { - if _, exists := trackMap[val]; exists { - continue - } - w.Append(val) - trackMap[val] = true - } - questionMark := strings.Repeat("?,", len(trackMap)) - if _, err := fmt.Fprintf(w, "%s IN (%s)", condIn.col, questionMark[:len(questionMark)-1]); err != nil { - return err - } - case []int64: - vals := condIn.vals[0].([]int64) - if len(vals) <= 0 { - return condIn.handleBlank(w) - } - // We're using this map to track if a parameter was already added to the condition to not add the same multiple times. - trackMap := make(map[int64]bool, len(vals)) - for _, val := range vals { - if _, exists := trackMap[val]; exists { - continue - } - w.Append(val) - trackMap[val] = true - } - questionMark := strings.Repeat("?,", len(trackMap)) - if _, err := fmt.Fprintf(w, "%s IN (%s)", condIn.col, questionMark[:len(questionMark)-1]); err != nil { - return err - } - case []uint8: - vals := condIn.vals[0].([]uint8) - if len(vals) <= 0 { - return condIn.handleBlank(w) - } - // We're using this map to track if a parameter was already added to the condition to not add the same multiple times. - trackMap := make(map[uint8]bool, len(vals)) - for _, val := range vals { - if _, exists := trackMap[val]; exists { - continue - } - w.Append(val) - trackMap[val] = true - } - questionMark := strings.Repeat("?,", len(trackMap)) - if _, err := fmt.Fprintf(w, "%s IN (%s)", condIn.col, questionMark[:len(questionMark)-1]); err != nil { - return err - } - case []uint16: - vals := condIn.vals[0].([]uint16) - if len(vals) <= 0 { - return condIn.handleBlank(w) - } - // We're using this map to track if a parameter was already added to the condition to not add the same multiple times. - trackMap := make(map[uint16]bool, len(vals)) - for _, val := range vals { - if _, exists := trackMap[val]; exists { - continue - } - w.Append(val) - trackMap[val] = true - } - questionMark := strings.Repeat("?,", len(trackMap)) - if _, err := fmt.Fprintf(w, "%s IN (%s)", condIn.col, questionMark[:len(questionMark)-1]); err != nil { - return err - } - case []uint: - vals := condIn.vals[0].([]uint) - if len(vals) <= 0 { - return condIn.handleBlank(w) - } - // We're using this map to track if a parameter was already added to the condition to not add the same multiple times. - trackMap := make(map[uint]bool, len(vals)) - for _, val := range vals { - if _, exists := trackMap[val]; exists { - continue - } - w.Append(val) - trackMap[val] = true - } - questionMark := strings.Repeat("?,", len(trackMap)) - if _, err := fmt.Fprintf(w, "%s IN (%s)", condIn.col, questionMark[:len(questionMark)-1]); err != nil { - return err - } - case []uint32: - vals := condIn.vals[0].([]uint32) - if len(vals) <= 0 { - return condIn.handleBlank(w) - } - // We're using this map to track if a parameter was already added to the condition to not add the same multiple times. - trackMap := make(map[uint32]bool, len(vals)) - for _, val := range vals { - if _, exists := trackMap[val]; exists { - continue - } - w.Append(val) - trackMap[val] = true - } - questionMark := strings.Repeat("?,", len(trackMap)) - if _, err := fmt.Fprintf(w, "%s IN (%s)", condIn.col, questionMark[:len(questionMark)-1]); err != nil { - return err - } - case []uint64: - vals := condIn.vals[0].([]uint64) - if len(vals) <= 0 { - return condIn.handleBlank(w) - } - // We're using this map to track if a parameter was already added to the condition to not add the same multiple times. - trackMap := make(map[uint64]bool, len(vals)) - for _, val := range vals { - if _, exists := trackMap[val]; exists { - continue - } - w.Append(val) - trackMap[val] = true - } - questionMark := strings.Repeat("?,", len(trackMap)) - if _, err := fmt.Fprintf(w, "%s IN (%s)", condIn.col, questionMark[:len(questionMark)-1]); err != nil { - return err - } - case []string: - vals := condIn.vals[0].([]string) - if len(vals) <= 0 { - return condIn.handleBlank(w) - } - // We're using this map to track if a parameter was already added to the condition to not add the same multiple times. - trackMap := make(map[string]bool, len(vals)) - for _, val := range vals { - if _, exists := trackMap[val]; exists { - continue - } - w.Append(val) - trackMap[val] = true - } - questionMark := strings.Repeat("?,", len(trackMap)) - if _, err := fmt.Fprintf(w, "%s IN (%s)", condIn.col, questionMark[:len(questionMark)-1]); err != nil { - return err - } - case []interface{}: - vals := condIn.vals[0].([]interface{}) - if len(vals) <= 0 { - return condIn.handleBlank(w) - } - questionMark := strings.Repeat("?,", len(vals)) - if _, err := fmt.Fprintf(w, "%s IN (%s)", condIn.col, questionMark[:len(questionMark)-1]); err != nil { - return err - } - w.Append(vals...) - case *Expression: - val := condIn.vals[0].(*Expression) - if _, err := fmt.Fprintf(w, "%s IN (", condIn.col); err != nil { - return err - } - if err := val.WriteTo(w); err != nil { - return err - } - if _, err := fmt.Fprintf(w, ")"); err != nil { - return err - } - case *Builder: - bd := condIn.vals[0].(*Builder) - if _, err := fmt.Fprintf(w, "%s IN (", condIn.col); err != nil { - return err - } - if err := bd.WriteTo(w); err != nil { - return err - } - if _, err := fmt.Fprintf(w, ")"); err != nil { - return err - } - default: - v := reflect.ValueOf(condIn.vals[0]) - if v.Kind() == reflect.Slice { - l := v.Len() - if l == 0 { - return condIn.handleBlank(w) - } - - trackMap := make(map[interface{}]bool, l) - for i := 0; i < l; i++ { - val := v.Index(i).Interface() - if _, exists := trackMap[val]; exists { - continue - } - w.Append(val) - trackMap[val] = true - } - - questionMark := strings.Repeat("?,", len(trackMap)) - if _, err := fmt.Fprintf(w, "%s IN (%s)", condIn.col, questionMark[:len(questionMark)-1]); err != nil { - return err - } - } else { - // Using a map for better efficiency - trackMap := make(map[interface{}]bool, len(condIn.vals)) - - i := 0 - for in, val := range condIn.vals { - if _, exists := trackMap[val]; exists { - // This sets empty values to nil, they get sliced off later. - condIn.vals[in] = nil - continue - } - trackMap[val] = true - condIn.vals[i] = val - i++ - } - // Here we slice the slice to only contain those values we defined as correct. - condIn.vals = condIn.vals[:i] - - questionMark := strings.Repeat("?,", len(condIn.vals)) - if _, err := fmt.Fprintf(w, "%s IN (%s)", condIn.col, questionMark[:len(questionMark)-1]); err != nil { - return err - } - w.Append(condIn.vals...) - } - } - return nil -} - -func (condIn condIn) And(conds ...Cond) Cond { - return And(condIn, And(conds...)) -} - -func (condIn condIn) Or(conds ...Cond) Cond { - return Or(condIn, Or(conds...)) -} - -func (condIn condIn) IsValid() bool { - return len(condIn.col) > 0 && len(condIn.vals) > 0 -} diff --git a/vendor/xorm.io/builder/cond_like.go b/vendor/xorm.io/builder/cond_like.go deleted file mode 100644 index e34202f8..00000000 --- a/vendor/xorm.io/builder/cond_like.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2016 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package builder - -import "fmt" - -// Like defines like condition -type Like [2]string - -var _ Cond = Like{"", ""} - -// WriteTo write SQL to Writer -func (like Like) WriteTo(w Writer) error { - if _, err := fmt.Fprintf(w, "%s LIKE ?", like[0]); err != nil { - return err - } - // FIXME: if use other regular express, this will be failed. but for compatible, keep this - if like[1][0] == '%' || like[1][len(like[1])-1] == '%' { - w.Append(like[1]) - } else { - w.Append("%" + like[1] + "%") - } - return nil -} - -// And implements And with other conditions -func (like Like) And(conds ...Cond) Cond { - return And(like, And(conds...)) -} - -// Or implements Or with other conditions -func (like Like) Or(conds ...Cond) Cond { - return Or(like, Or(conds...)) -} - -// IsValid tests if this condition is valid -func (like Like) IsValid() bool { - return len(like[0]) > 0 && len(like[1]) > 0 -} diff --git a/vendor/xorm.io/builder/cond_neq.go b/vendor/xorm.io/builder/cond_neq.go deleted file mode 100644 index 060d8451..00000000 --- a/vendor/xorm.io/builder/cond_neq.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2016 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package builder - -import ( - "fmt" - "sort" -) - -// Neq defines not equal conditions -type Neq map[string]interface{} - -var _ Cond = Neq{} - -// WriteTo writes SQL to Writer -func (neq Neq) WriteTo(w Writer) error { - args := make([]interface{}, 0, len(neq)) - i := 0 - for _, k := range neq.sortedKeys() { - v := neq[k] - switch v.(type) { - case []int, []int64, []string, []int32, []int16, []int8: - if err := NotIn(k, v).WriteTo(w); err != nil { - return err - } - case *Expression: - if _, err := fmt.Fprintf(w, "%s<>(", k); err != nil { - return err - } - - if err := v.(*Expression).WriteTo(w); err != nil { - return err - } - - if _, err := fmt.Fprintf(w, ")"); err != nil { - return err - } - case *Builder: - if _, err := fmt.Fprintf(w, "%s<>(", k); err != nil { - return err - } - - if err := v.(*Builder).WriteTo(w); err != nil { - return err - } - - if _, err := fmt.Fprintf(w, ")"); err != nil { - return err - } - default: - if _, err := fmt.Fprintf(w, "%s<>?", k); err != nil { - return err - } - args = append(args, v) - } - if i != len(neq)-1 { - if _, err := fmt.Fprint(w, " AND "); err != nil { - return err - } - } - i = i + 1 - } - w.Append(args...) - return nil -} - -// And implements And with other conditions -func (neq Neq) And(conds ...Cond) Cond { - return And(neq, And(conds...)) -} - -// Or implements Or with other conditions -func (neq Neq) Or(conds ...Cond) Cond { - return Or(neq, Or(conds...)) -} - -// IsValid tests if this condition is valid -func (neq Neq) IsValid() bool { - return len(neq) > 0 -} - -// sortedKeys returns all keys of this Neq sorted with sort.Strings. -// It is used internally for consistent ordering when generating -// SQL, see https://gitea.com/xorm/builder/issues/10 -func (neq Neq) sortedKeys() []string { - keys := make([]string, 0, len(neq)) - for key := range neq { - keys = append(keys, key) - } - sort.Strings(keys) - return keys -} diff --git a/vendor/xorm.io/builder/cond_not.go b/vendor/xorm.io/builder/cond_not.go deleted file mode 100644 index 667dfe72..00000000 --- a/vendor/xorm.io/builder/cond_not.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2016 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package builder - -import "fmt" - -// Not defines NOT condition -type Not [1]Cond - -var _ Cond = Not{} - -// WriteTo writes SQL to Writer -func (not Not) WriteTo(w Writer) error { - if _, err := fmt.Fprint(w, "NOT "); err != nil { - return err - } - switch not[0].(type) { - case condAnd, condOr: - if _, err := fmt.Fprint(w, "("); err != nil { - return err - } - case Eq: - if len(not[0].(Eq)) > 1 { - if _, err := fmt.Fprint(w, "("); err != nil { - return err - } - } - case Neq: - if len(not[0].(Neq)) > 1 { - if _, err := fmt.Fprint(w, "("); err != nil { - return err - } - } - } - - if err := not[0].WriteTo(w); err != nil { - return err - } - - switch not[0].(type) { - case condAnd, condOr: - if _, err := fmt.Fprint(w, ")"); err != nil { - return err - } - case Eq: - if len(not[0].(Eq)) > 1 { - if _, err := fmt.Fprint(w, ")"); err != nil { - return err - } - } - case Neq: - if len(not[0].(Neq)) > 1 { - if _, err := fmt.Fprint(w, ")"); err != nil { - return err - } - } - } - - return nil -} - -// And implements And with other conditions -func (not Not) And(conds ...Cond) Cond { - return And(not, And(conds...)) -} - -// Or implements Or with other conditions -func (not Not) Or(conds ...Cond) Cond { - return Or(not, Or(conds...)) -} - -// IsValid tests if this condition is valid -func (not Not) IsValid() bool { - return not[0] != nil && not[0].IsValid() -} diff --git a/vendor/xorm.io/builder/cond_not_exists.go b/vendor/xorm.io/builder/cond_not_exists.go deleted file mode 100644 index fc9ea4c8..00000000 --- a/vendor/xorm.io/builder/cond_not_exists.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2022 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package builder - -import ( - "errors" - "io" -) - -type condNotExists struct { - subQuery *Builder -} - -var _ Cond = condNotExists{} - -// NotExists returns Cond via condition -func NotExists(subQuery *Builder) Cond { - return &condNotExists{ - subQuery: subQuery, - } -} - -func (condNotExists condNotExists) WriteTo(w Writer) error { - if !condNotExists.IsValid() { - return errors.New("exists condition is nov valid") - } - if _, err := io.WriteString(w, "NOT EXISTS ("); err != nil { - return err - } - if err := condNotExists.subQuery.WriteTo(w); err != nil { - return err - } - _, err := io.WriteString(w, ")") - return err -} - -func (condNotExists condNotExists) And(conds ...Cond) Cond { - return And(condNotExists, And(conds...)) -} - -func (condNotExists condNotExists) Or(conds ...Cond) Cond { - return Or(condNotExists, Or(conds...)) -} - -func (condNotExists condNotExists) IsValid() bool { - return condNotExists.subQuery != nil -} diff --git a/vendor/xorm.io/builder/cond_notin.go b/vendor/xorm.io/builder/cond_notin.go deleted file mode 100644 index 43a2be30..00000000 --- a/vendor/xorm.io/builder/cond_notin.go +++ /dev/null @@ -1,323 +0,0 @@ -// Copyright 2016 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package builder - -import ( - "fmt" - "reflect" - "strings" -) - -type condNotIn condIn - -var _ Cond = condNotIn{} - -// NotIn generate NOT IN condition -func NotIn(col string, values ...interface{}) Cond { - return condNotIn{col, values} -} - -func (condNotIn condNotIn) handleBlank(w Writer) error { - _, err := fmt.Fprint(w, "0=0") - return err -} - -func (condNotIn condNotIn) WriteTo(w Writer) error { - if len(condNotIn.vals) <= 0 { - return condNotIn.handleBlank(w) - } - - switch condNotIn.vals[0].(type) { - case []int8: - vals := condNotIn.vals[0].([]int8) - if len(vals) <= 0 { - return condNotIn.handleBlank(w) - } - // We're using this map to track if a parameter was already added to the condition to not add the same multiple times. - trackMap := make(map[int8]bool, len(vals)) - for _, val := range vals { - if _, exists := trackMap[val]; exists { - continue - } - w.Append(val) - trackMap[val] = true - } - questionMark := strings.Repeat("?,", len(trackMap)) - if _, err := fmt.Fprintf(w, "%s NOT IN (%s)", condNotIn.col, questionMark[:len(questionMark)-1]); err != nil { - return err - } - case []int16: - vals := condNotIn.vals[0].([]int16) - if len(vals) <= 0 { - return condNotIn.handleBlank(w) - } - // We're using this map to track if a parameter was already added to the condition to not add the same multiple times. - trackMap := make(map[int16]bool, len(vals)) - for _, val := range vals { - if _, exists := trackMap[val]; exists { - continue - } - w.Append(val) - trackMap[val] = true - } - questionMark := strings.Repeat("?,", len(trackMap)) - if _, err := fmt.Fprintf(w, "%s NOT IN (%s)", condNotIn.col, questionMark[:len(questionMark)-1]); err != nil { - return err - } - case []int: - vals := condNotIn.vals[0].([]int) - if len(vals) <= 0 { - return condNotIn.handleBlank(w) - } - // We're using this map to track if a parameter was already added to the condition to not add the same multiple times. - trackMap := make(map[int]bool, len(vals)) - for _, val := range vals { - if _, exists := trackMap[val]; exists { - continue - } - w.Append(val) - trackMap[val] = true - } - questionMark := strings.Repeat("?,", len(trackMap)) - if _, err := fmt.Fprintf(w, "%s NOT IN (%s)", condNotIn.col, questionMark[:len(questionMark)-1]); err != nil { - return err - } - case []int32: - vals := condNotIn.vals[0].([]int32) - if len(vals) <= 0 { - return condNotIn.handleBlank(w) - } - // We're using this map to track if a parameter was already added to the condition to not add the same multiple times. - trackMap := make(map[int32]bool, len(vals)) - for _, val := range vals { - if _, exists := trackMap[val]; exists { - continue - } - w.Append(val) - trackMap[val] = true - } - questionMark := strings.Repeat("?,", len(trackMap)) - if _, err := fmt.Fprintf(w, "%s NOT IN (%s)", condNotIn.col, questionMark[:len(questionMark)-1]); err != nil { - return err - } - case []int64: - vals := condNotIn.vals[0].([]int64) - if len(vals) <= 0 { - return condNotIn.handleBlank(w) - } - // We're using this map to track if a parameter was already added to the condition to not add the same multiple times. - trackMap := make(map[int64]bool, len(vals)) - for _, val := range vals { - if _, exists := trackMap[val]; exists { - continue - } - w.Append(val) - trackMap[val] = true - } - questionMark := strings.Repeat("?,", len(trackMap)) - if _, err := fmt.Fprintf(w, "%s NOT IN (%s)", condNotIn.col, questionMark[:len(questionMark)-1]); err != nil { - return err - } - case []uint8: - vals := condNotIn.vals[0].([]uint8) - if len(vals) <= 0 { - return condNotIn.handleBlank(w) - } - // We're using this map to track if a parameter was already added to the condition to not add the same multiple times. - trackMap := make(map[uint8]bool, len(vals)) - for _, val := range vals { - if _, exists := trackMap[val]; exists { - continue - } - w.Append(val) - trackMap[val] = true - } - questionMark := strings.Repeat("?,", len(trackMap)) - if _, err := fmt.Fprintf(w, "%s NOT IN (%s)", condNotIn.col, questionMark[:len(questionMark)-1]); err != nil { - return err - } - case []uint16: - vals := condNotIn.vals[0].([]uint16) - if len(vals) <= 0 { - return condNotIn.handleBlank(w) - } - // We're using this map to track if a parameter was already added to the condition to not add the same multiple times. - trackMap := make(map[uint16]bool, len(vals)) - for _, val := range vals { - if _, exists := trackMap[val]; exists { - continue - } - w.Append(val) - trackMap[val] = true - } - questionMark := strings.Repeat("?,", len(trackMap)) - if _, err := fmt.Fprintf(w, "%s NOT IN (%s)", condNotIn.col, questionMark[:len(questionMark)-1]); err != nil { - return err - } - case []uint: - vals := condNotIn.vals[0].([]uint) - if len(vals) <= 0 { - return condNotIn.handleBlank(w) - } - // We're using this map to track if a parameter was already added to the condition to not add the same multiple times. - trackMap := make(map[uint]bool, len(vals)) - for _, val := range vals { - if _, exists := trackMap[val]; exists { - continue - } - w.Append(val) - trackMap[val] = true - } - questionMark := strings.Repeat("?,", len(trackMap)) - if _, err := fmt.Fprintf(w, "%s NOT IN (%s)", condNotIn.col, questionMark[:len(questionMark)-1]); err != nil { - return err - } - case []uint32: - vals := condNotIn.vals[0].([]uint32) - if len(vals) <= 0 { - return condNotIn.handleBlank(w) - } - // We're using this map to track if a parameter was already added to the condition to not add the same multiple times. - trackMap := make(map[uint32]bool, len(vals)) - for _, val := range vals { - if _, exists := trackMap[val]; exists { - continue - } - w.Append(val) - trackMap[val] = true - } - questionMark := strings.Repeat("?,", len(trackMap)) - if _, err := fmt.Fprintf(w, "%s NOT IN (%s)", condNotIn.col, questionMark[:len(questionMark)-1]); err != nil { - return err - } - case []uint64: - vals := condNotIn.vals[0].([]uint64) - if len(vals) <= 0 { - return condNotIn.handleBlank(w) - } - // We're using this map to track if a parameter was already added to the condition to not add the same multiple times. - trackMap := make(map[uint64]bool, len(vals)) - for _, val := range vals { - if _, exists := trackMap[val]; exists { - continue - } - w.Append(val) - trackMap[val] = true - } - questionMark := strings.Repeat("?,", len(trackMap)) - if _, err := fmt.Fprintf(w, "%s NOT IN (%s)", condNotIn.col, questionMark[:len(questionMark)-1]); err != nil { - return err - } - case []string: - vals := condNotIn.vals[0].([]string) - if len(vals) <= 0 { - return condNotIn.handleBlank(w) - } - // We're using this map to track if a parameter was already added to the condition to not add the same multiple times. - trackMap := make(map[string]bool, len(vals)) - for _, val := range vals { - if _, exists := trackMap[val]; exists { - continue - } - w.Append(val) - trackMap[val] = true - } - questionMark := strings.Repeat("?,", len(trackMap)) - if _, err := fmt.Fprintf(w, "%s NOT IN (%s)", condNotIn.col, questionMark[:len(questionMark)-1]); err != nil { - return err - } - case []interface{}: - vals := condNotIn.vals[0].([]interface{}) - if len(vals) <= 0 { - return condNotIn.handleBlank(w) - } - questionMark := strings.Repeat("?,", len(vals)) - if _, err := fmt.Fprintf(w, "%s NOT IN (%s)", condNotIn.col, questionMark[:len(questionMark)-1]); err != nil { - return err - } - w.Append(vals...) - case *Expression: - val := condNotIn.vals[0].(*Expression) - if _, err := fmt.Fprintf(w, "%s NOT IN (", condNotIn.col); err != nil { - return err - } - if err := val.WriteTo(w); err != nil { - return err - } - if _, err := fmt.Fprintf(w, ")"); err != nil { - return err - } - case *Builder: - val := condNotIn.vals[0].(*Builder) - if _, err := fmt.Fprintf(w, "%s NOT IN (", condNotIn.col); err != nil { - return err - } - if err := val.WriteTo(w); err != nil { - return err - } - if _, err := fmt.Fprintf(w, ")"); err != nil { - return err - } - default: - v := reflect.ValueOf(condNotIn.vals[0]) - if v.Kind() == reflect.Slice { - l := v.Len() - if l == 0 { - return condNotIn.handleBlank(w) - } - - trackMap := make(map[interface{}]bool, l) - for i := 0; i < l; i++ { - val := v.Index(i).Interface() - if _, exists := trackMap[val]; exists { - continue - } - w.Append(val) - trackMap[val] = true - } - - questionMark := strings.Repeat("?,", len(trackMap)) - if _, err := fmt.Fprintf(w, "%s NOT IN (%s)", condNotIn.col, questionMark[:len(questionMark)-1]); err != nil { - return err - } - } else { - // Using a map for better efficiency - trackMap := make(map[interface{}]bool, len(condNotIn.vals)) - - i := 0 - for in, val := range condNotIn.vals { - if _, exists := trackMap[val]; exists { - // This sets empty values to nil, they get sliced off later. - condNotIn.vals[in] = nil - continue - } - trackMap[val] = true - condNotIn.vals[i] = val - i++ - } - // Here we slice the slice to only contain those values we defined as correct. - condNotIn.vals = condNotIn.vals[:i] - - questionMark := strings.Repeat("?,", len(condNotIn.vals)) - if _, err := fmt.Fprintf(w, "%s NOT IN (%s)", condNotIn.col, questionMark[:len(questionMark)-1]); err != nil { - return err - } - w.Append(condNotIn.vals...) - } - } - return nil -} - -func (condNotIn condNotIn) And(conds ...Cond) Cond { - return And(condNotIn, And(conds...)) -} - -func (condNotIn condNotIn) Or(conds ...Cond) Cond { - return Or(condNotIn, Or(conds...)) -} - -func (condNotIn condNotIn) IsValid() bool { - return len(condNotIn.col) > 0 && len(condNotIn.vals) > 0 -} diff --git a/vendor/xorm.io/builder/cond_null.go b/vendor/xorm.io/builder/cond_null.go deleted file mode 100644 index bf2aaf85..00000000 --- a/vendor/xorm.io/builder/cond_null.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2016 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package builder - -import "fmt" - -// IsNull defines IS NULL condition -type IsNull [1]string - -var _ Cond = IsNull{""} - -// WriteTo write SQL to Writer -func (isNull IsNull) WriteTo(w Writer) error { - _, err := fmt.Fprintf(w, "%s IS NULL", isNull[0]) - return err -} - -// And implements And with other conditions -func (isNull IsNull) And(conds ...Cond) Cond { - return And(isNull, And(conds...)) -} - -// Or implements Or with other conditions -func (isNull IsNull) Or(conds ...Cond) Cond { - return Or(isNull, Or(conds...)) -} - -// IsValid tests if this condition is valid -func (isNull IsNull) IsValid() bool { - return len(isNull[0]) > 0 -} - -// NotNull defines NOT NULL condition -type NotNull [1]string - -var _ Cond = NotNull{""} - -// WriteTo write SQL to Writer -func (notNull NotNull) WriteTo(w Writer) error { - _, err := fmt.Fprintf(w, "%s IS NOT NULL", notNull[0]) - return err -} - -// And implements And with other conditions -func (notNull NotNull) And(conds ...Cond) Cond { - return And(notNull, And(conds...)) -} - -// Or implements Or with other conditions -func (notNull NotNull) Or(conds ...Cond) Cond { - return Or(notNull, Or(conds...)) -} - -// IsValid tests if this condition is valid -func (notNull NotNull) IsValid() bool { - return len(notNull[0]) > 0 -} diff --git a/vendor/xorm.io/builder/cond_or.go b/vendor/xorm.io/builder/cond_or.go deleted file mode 100644 index e0321541..00000000 --- a/vendor/xorm.io/builder/cond_or.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2016 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package builder - -import "fmt" - -type condOr []Cond - -var _ Cond = condOr{} - -// Or sets OR conditions -func Or(conds ...Cond) Cond { - result := make(condOr, 0, len(conds)) - for _, cond := range conds { - if cond == nil || !cond.IsValid() { - continue - } - result = append(result, cond) - } - return result -} - -// WriteTo implments Cond -func (o condOr) WriteTo(w Writer) error { - for i, cond := range o { - var needQuote bool - switch cond.(type) { - case condAnd, *Expression: - needQuote = true - case Eq: - needQuote = (len(cond.(Eq)) > 1) - case Neq: - needQuote = (len(cond.(Neq)) > 1) - } - - if needQuote { - fmt.Fprint(w, "(") - } - - err := cond.WriteTo(w) - if err != nil { - return err - } - - if needQuote { - fmt.Fprint(w, ")") - } - - if i != len(o)-1 { - fmt.Fprint(w, " OR ") - } - } - - return nil -} - -func (o condOr) And(conds ...Cond) Cond { - return And(o, And(conds...)) -} - -func (o condOr) Or(conds ...Cond) Cond { - return Or(o, Or(conds...)) -} - -func (o condOr) IsValid() bool { - return len(o) > 0 -} diff --git a/vendor/xorm.io/builder/doc.go b/vendor/xorm.io/builder/doc.go deleted file mode 100644 index 6e7dd452..00000000 --- a/vendor/xorm.io/builder/doc.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2016 The XORM Authors. All rights reserved. -// Use of this source code is governed by a BSD -// license that can be found in the LICENSE file. - -/* - -Package builder is a simple and powerful sql builder for Go. - -Make sure you have installed Go 1.1+ and then: - - go get xorm.io/builder - -WARNNING: Currently, only query conditions are supported. Below is the supported conditions. - -1. Eq is a redefine of a map, you can give one or more conditions to Eq - - import . "xorm.io/builder" - - sql, args, _ := ToSQL(Eq{"a":1}) - // a=? [1] - sql, args, _ := ToSQL(Eq{"b":"c"}.And(Eq{"c": 0})) - // b=? AND c=? ["c", 0] - sql, args, _ := ToSQL(Eq{"b":"c", "c":0}) - // b=? AND c=? ["c", 0] - sql, args, _ := ToSQL(Eq{"b":"c"}.Or(Eq{"b":"d"})) - // b=? OR b=? ["c", "d"] - sql, args, _ := ToSQL(Eq{"b": []string{"c", "d"}}) - // b IN (?,?) ["c", "d"] - sql, args, _ := ToSQL(Eq{"b": 1, "c":[]int{2, 3}}) - // b=? AND c IN (?,?) [1, 2, 3] - -2. Neq is the same to Eq - - import . "xorm.io/builder" - - sql, args, _ := ToSQL(Neq{"a":1}) - // a<>? [1] - sql, args, _ := ToSQL(Neq{"b":"c"}.And(Neq{"c": 0})) - // b<>? AND c<>? ["c", 0] - sql, args, _ := ToSQL(Neq{"b":"c", "c":0}) - // b<>? AND c<>? ["c", 0] - sql, args, _ := ToSQL(Neq{"b":"c"}.Or(Neq{"b":"d"})) - // b<>? OR b<>? ["c", "d"] - sql, args, _ := ToSQL(Neq{"b": []string{"c", "d"}}) - // b NOT IN (?,?) ["c", "d"] - sql, args, _ := ToSQL(Neq{"b": 1, "c":[]int{2, 3}}) - // b<>? AND c NOT IN (?,?) [1, 2, 3] - -3. Gt, Gte, Lt, Lte - - import . "xorm.io/builder" - - sql, args, _ := ToSQL(Gt{"a", 1}.And(Gte{"b", 2})) - // a>? AND b>=? [1, 2] - sql, args, _ := ToSQL(Lt{"a", 1}.Or(Lte{"b", 2})) - // a? [1, %c%, 2] - -9. Or(conds ...Cond), Or can connect one or more conditions via Or - - import . "xorm.io/builder" - - sql, args, _ := ToSQL(Or(Eq{"a":1}, Like{"b", "c"}, Neq{"d", 2})) - // a=? OR b LIKE ? OR d<>? [1, %c%, 2] - sql, args, _ := ToSQL(Or(Eq{"a":1}, And(Like{"b", "c"}, Neq{"d", 2}))) - // a=? OR (b LIKE ? AND d<>?) [1, %c%, 2] - -10. Between - - import . "xorm.io/builder" - - sql, args, _ := ToSQL(Between("a", 1, 2)) - // a BETWEEN 1 AND 2 - -11. define yourself conditions -Since Cond is a interface, you can define yourself conditions and compare with them -*/ -package builder diff --git a/vendor/xorm.io/builder/error.go b/vendor/xorm.io/builder/error.go deleted file mode 100644 index b0ded29f..00000000 --- a/vendor/xorm.io/builder/error.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2016 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package builder - -import "errors" - -var ( - // ErrNotSupportType not supported SQL type error - ErrNotSupportType = errors.New("Not supported SQL type") - // ErrNoNotInConditions no NOT IN params error - ErrNoNotInConditions = errors.New("No NOT IN conditions") - // ErrNoInConditions no IN params error - ErrNoInConditions = errors.New("No IN conditions") - // ErrNeedMoreArguments need more arguments - ErrNeedMoreArguments = errors.New("Need more sql arguments") - // ErrNoTableName no table name - ErrNoTableName = errors.New("No table indicated") - // ErrNoColumnToUpdate no column to update - ErrNoColumnToUpdate = errors.New("No column(s) to update") - // ErrNoColumnToInsert no column to insert - ErrNoColumnToInsert = errors.New("No column(s) to insert") - // ErrNotSupportDialectType not supported dialect type error - ErrNotSupportDialectType = errors.New("Not supported dialect type") - // ErrNotUnexpectedUnionConditions using union in a wrong way - ErrNotUnexpectedUnionConditions = errors.New("Unexpected conditional fields in UNION query") - // ErrUnsupportedUnionMembers unexpected members in UNION query - ErrUnsupportedUnionMembers = errors.New("Unexpected members in UNION query") - // ErrUnexpectedSubQuery Unexpected sub-query in SELECT query - ErrUnexpectedSubQuery = errors.New("Unexpected sub-query in SELECT query") - // ErrDialectNotSetUp dialect is not setup yet - ErrDialectNotSetUp = errors.New("Dialect is not setup yet, try to use `Dialect(dbType)` at first") - // ErrInvalidLimitation offset or limit is not correct - ErrInvalidLimitation = errors.New("Offset or limit is not correct") - // ErrUnnamedDerivedTable Every derived table must have its own alias - ErrUnnamedDerivedTable = errors.New("Every derived table must have its own alias") - // ErrInconsistentDialect Inconsistent dialect in same builder - ErrInconsistentDialect = errors.New("Inconsistent dialect in same builder") -) diff --git a/vendor/xorm.io/builder/expr.go b/vendor/xorm.io/builder/expr.go deleted file mode 100644 index 3efe3ffb..00000000 --- a/vendor/xorm.io/builder/expr.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2016 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package builder - -import "fmt" - -// Expression represetns a SQL express with arguments -type Expression struct { - sql string - args []interface{} -} - -var _ Cond = &Expression{} - -// Expr generate customerize SQL -func Expr(sql string, args ...interface{}) Cond { - return &Expression{sql, args} -} - -func (expr *Expression) Content() string { - return expr.sql -} - -func (expr *Expression) Args() []interface{} { - return expr.args -} - -// OpWriteTo implements UpdateCond interface -func (expr *Expression) OpWriteTo(op string, w Writer) error { - return expr.WriteTo(w) -} - -// WriteTo implements Cond interface -func (expr *Expression) WriteTo(w Writer) error { - if _, err := fmt.Fprint(w, expr.sql); err != nil { - return err - } - w.Append(expr.args...) - return nil -} - -// And implements Cond interface -func (expr *Expression) And(conds ...Cond) Cond { - return And(expr, And(conds...)) -} - -// Or implements Cond interface -func (expr *Expression) Or(conds ...Cond) Cond { - return Or(expr, Or(conds...)) -} - -// IsValid implements Cond interface -func (expr *Expression) IsValid() bool { - return len(expr.sql) > 0 -} diff --git a/vendor/xorm.io/builder/sql.go b/vendor/xorm.io/builder/sql.go deleted file mode 100644 index 60f22621..00000000 --- a/vendor/xorm.io/builder/sql.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2018 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package builder - -import ( - sql2 "database/sql" - "fmt" - "reflect" - "strings" - "time" -) - -func condToSQL(cond Cond) (string, []interface{}, error) { - if cond == nil || !cond.IsValid() { - return "", nil, nil - } - - w := NewWriter() - if err := cond.WriteTo(w); err != nil { - return "", nil, err - } - return w.String(), w.args, nil -} - -func condToBoundSQL(cond Cond) (string, error) { - if cond == nil || !cond.IsValid() { - return "", nil - } - - w := NewWriter() - if err := cond.WriteTo(w); err != nil { - return "", err - } - return ConvertToBoundSQL(w.String(), w.args) -} - -// ToSQL convert a builder or conditions to SQL and args -func ToSQL(cond interface{}) (string, []interface{}, error) { - switch cond.(type) { - case Cond: - return condToSQL(cond.(Cond)) - case *Builder: - return cond.(*Builder).ToSQL() - } - return "", nil, ErrNotSupportType -} - -// ToBoundSQL convert a builder or conditions to parameters bound SQL -func ToBoundSQL(cond interface{}) (string, error) { - switch cond.(type) { - case Cond: - return condToBoundSQL(cond.(Cond)) - case *Builder: - return cond.(*Builder).ToBoundSQL() - } - return "", ErrNotSupportType -} - -func noSQLQuoteNeeded(a interface{}) bool { - if a == nil { - return false - } - switch a.(type) { - case int, int8, int16, int32, int64: - return true - case uint, uint8, uint16, uint32, uint64: - return true - case float32, float64: - return true - case bool: - return true - case string: - return false - case time.Time, *time.Time: - return false - } - - t := reflect.TypeOf(a) - - switch t.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return true - case reflect.Float32, reflect.Float64: - return true - case reflect.Bool: - return true - case reflect.String: - return false - } - - return false -} - -// ConvertToBoundSQL will convert SQL and args to a bound SQL -func ConvertToBoundSQL(sql string, args []interface{}) (string, error) { - buf := strings.Builder{} - var i, j, start int - for ; i < len(sql); i++ { - if sql[i] == '?' { - _, err := buf.WriteString(sql[start:i]) - if err != nil { - return "", err - } - start = i + 1 - - if len(args) == j { - return "", ErrNeedMoreArguments - } - - arg := args[j] - if namedArg, ok := arg.(sql2.NamedArg); ok { - arg = namedArg.Value - } - - if noSQLQuoteNeeded(arg) { - _, err = fmt.Fprint(&buf, arg) - } else { - // replace ' -> '' (standard replacement) to avoid critical SQL injection, - // NOTICE: may allow some injection like % (or _) in LIKE query - _, err = fmt.Fprintf(&buf, "'%v'", strings.Replace(fmt.Sprintf("%v", arg), "'", - "''", -1)) - } - if err != nil { - return "", err - } - j = j + 1 - } - } - _, err := buf.WriteString(sql[start:]) - if err != nil { - return "", err - } - return buf.String(), nil -} - -// ConvertPlaceholder replaces the place holder ? to $1, $2 ... or :1, :2 ... according prefix -func ConvertPlaceholder(sql, prefix string) (string, error) { - buf := strings.Builder{} - var i, j, start int - var ready = true - for ; i < len(sql); i++ { - if sql[i] == '\'' && i > 0 && sql[i-1] != '\\' { - ready = !ready - } - if ready && sql[i] == '?' { - if _, err := buf.WriteString(sql[start:i]); err != nil { - return "", err - } - - start = i + 1 - j = j + 1 - - if _, err := buf.WriteString(fmt.Sprintf("%v%d", prefix, j)); err != nil { - return "", err - } - } - } - - if _, err := buf.WriteString(sql[start:]); err != nil { - return "", err - } - - return buf.String(), nil -} diff --git a/vendor/xorm.io/builder/writer.go b/vendor/xorm.io/builder/writer.go deleted file mode 100644 index fb4fae5c..00000000 --- a/vendor/xorm.io/builder/writer.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2019 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package builder - -import ( - "io" - "strings" -) - -// Writer defines the interface -type Writer interface { - io.Writer - Append(...interface{}) -} - -var _ Writer = NewWriter() - -// BytesWriter implments Writer and save SQL in bytes.Buffer -type BytesWriter struct { - *strings.Builder - args []interface{} -} - -// NewWriter creates a new string writer -func NewWriter() *BytesWriter { - w := &BytesWriter{ - Builder: &strings.Builder{}, - } - return w -} - -// Append appends args to Writer -func (w *BytesWriter) Append(args ...interface{}) { - w.args = append(w.args, args...) -} - -// Args returns args -func (w *BytesWriter) Args() []interface{} { - return w.args -} diff --git a/vendor/xorm.io/xorm/.changelog.yml b/vendor/xorm.io/xorm/.changelog.yml deleted file mode 100644 index 1303c9cc..00000000 --- a/vendor/xorm.io/xorm/.changelog.yml +++ /dev/null @@ -1,53 +0,0 @@ -# The full repository name -repo: xorm/xorm - -# Service type (gitea or github) -service: gitea - -# Base URL for Gitea instance if using gitea service type (optional) -# Default: https://gitea.com -base-url: - -# Changelog groups and which labeled PRs to add to each group -groups: - - - name: BREAKING - labels: - - kind/breaking - - - name: FEATURES - labels: - - kind/feature - - - name: SECURITY - labels: - - kind/security - - - name: BUGFIXES - labels: - - kind/bug - - - name: ENHANCEMENTS - labels: - - kind/enhancement - - kind/refactor - - kind/ui - - - name: TESTING - labels: - - kind/testing - - - name: BUILD - labels: - - kind/build - - kind/lint - - - name: DOCS - labels: - - kind/docs - - - name: MISC - default: true - -# regex indicating which labels to skip for the changelog -skip-labels: skip-changelog|backport\/.+ diff --git a/vendor/xorm.io/xorm/.drone.yml b/vendor/xorm.io/xorm/.drone.yml deleted file mode 100644 index 210572b0..00000000 --- a/vendor/xorm.io/xorm/.drone.yml +++ /dev/null @@ -1,437 +0,0 @@ ---- -kind: pipeline -name: test-mysql -environment: - GO111MODULE: "on" - GOPROXY: "https://goproxy.io" - CGO_ENABLED: 1 -trigger: - ref: - - refs/heads/master - - refs/pull/*/head -steps: -- name: test-vet - image: golang:1.15 - pull: always - volumes: - - name: cache - path: /go/pkg/mod - commands: - - make vet -- name: test-sqlite3 - image: golang:1.15 - volumes: - - name: cache - path: /go/pkg/mod - depends_on: - - test-vet - commands: - - make fmt-check - - make test - - make test-sqlite3 - - TEST_CACHE_ENABLE=true make test-sqlite3 -- name: test-sqlite - image: golang:1.15 - volumes: - - name: cache - path: /go/pkg/mod - depends_on: - - test-vet - commands: - - make test-sqlite - - TEST_QUOTE_POLICY=reserved make test-sqlite -- name: test-mysql - image: golang:1.15 - pull: never - volumes: - - name: cache - path: /go/pkg/mod - depends_on: - - test-vet - environment: - TEST_MYSQL_HOST: mysql - TEST_MYSQL_CHARSET: utf8 - TEST_MYSQL_DBNAME: xorm_test - TEST_MYSQL_USERNAME: root - TEST_MYSQL_PASSWORD: - commands: - - TEST_CACHE_ENABLE=true make test-mysql - -- name: test-mysql-utf8mb4 - image: golang:1.15 - pull: never - volumes: - - name: cache - path: /go/pkg/mod - depends_on: - - test-mysql - environment: - TEST_MYSQL_HOST: mysql - TEST_MYSQL_CHARSET: utf8mb4 - TEST_MYSQL_DBNAME: xorm_test - TEST_MYSQL_USERNAME: root - TEST_MYSQL_PASSWORD: - commands: - - make test-mysql - - TEST_QUOTE_POLICY=reserved make test-mysql-tls - -volumes: -- name: cache - host: - path: /tmp/cache - -services: -- name: mysql - image: mysql:5.7 - environment: - MYSQL_ALLOW_EMPTY_PASSWORD: yes - MYSQL_DATABASE: xorm_test - ---- -kind: pipeline -name: test-mysql8 -depends_on: - - test-mysql -trigger: - ref: - - refs/heads/master - - refs/pull/*/head -steps: -- name: test-mysql8 - image: golang:1.15 - pull: never - volumes: - - name: cache - path: /go/pkg/mod - environment: - TEST_MYSQL_HOST: mysql8 - TEST_MYSQL_CHARSET: utf8mb4 - TEST_MYSQL_DBNAME: xorm_test - TEST_MYSQL_USERNAME: root - TEST_MYSQL_PASSWORD: - commands: - - make test-mysql - - TEST_CACHE_ENABLE=true make test-mysql - -volumes: -- name: cache - host: - path: /tmp/cache - -services: -- name: mysql8 - image: mysql:8.0 - environment: - MYSQL_ALLOW_EMPTY_PASSWORD: yes - MYSQL_DATABASE: xorm_test - ---- -kind: pipeline -name: test-mariadb -depends_on: - - test-mysql8 -trigger: - ref: - - refs/heads/master - - refs/pull/*/head -steps: -- name: test-mariadb - image: golang:1.15 - pull: never - volumes: - - name: cache - path: /go/pkg/mod - environment: - TEST_MYSQL_HOST: mariadb - TEST_MYSQL_CHARSET: utf8mb4 - TEST_MYSQL_DBNAME: xorm_test - TEST_MYSQL_USERNAME: root - TEST_MYSQL_PASSWORD: - commands: - - make test-mysql - - TEST_QUOTE_POLICY=reserved make test-mysql - -volumes: -- name: cache - host: - path: /tmp/cache - -services: -- name: mariadb - image: mariadb:10.4 - environment: - MYSQL_ALLOW_EMPTY_PASSWORD: yes - MYSQL_DATABASE: xorm_test - ---- -kind: pipeline -name: test-postgres -depends_on: - - test-mariadb -trigger: - ref: - - refs/heads/master - - refs/pull/*/head -steps: -- name: test-postgres - pull: never - image: golang:1.15 - volumes: - - name: cache - path: /go/pkg/mod - environment: - TEST_PGSQL_HOST: pgsql - TEST_PGSQL_DBNAME: xorm_test - TEST_PGSQL_USERNAME: postgres - TEST_PGSQL_PASSWORD: postgres - commands: - - make test-postgres - - TEST_CACHE_ENABLE=true make test-postgres - -- name: test-postgres-schema - pull: never - image: golang:1.15 - volumes: - - name: cache - path: /go/pkg/mod - depends_on: - - test-postgres - environment: - TEST_PGSQL_HOST: pgsql - TEST_PGSQL_SCHEMA: xorm - TEST_PGSQL_DBNAME: xorm_test - TEST_PGSQL_USERNAME: postgres - TEST_PGSQL_PASSWORD: postgres - commands: - - TEST_QUOTE_POLICY=reserved make test-postgres - -- name: test-pgx - pull: never - image: golang:1.15 - volumes: - - name: cache - path: /go/pkg/mod - depends_on: - - test-postgres-schema - environment: - TEST_PGSQL_HOST: pgsql - TEST_PGSQL_DBNAME: xorm_test - TEST_PGSQL_USERNAME: postgres - TEST_PGSQL_PASSWORD: postgres - commands: - - make test-pgx - - TEST_CACHE_ENABLE=true make test-pgx - - TEST_QUOTE_POLICY=reserved make test-pgx - -- name: test-pgx-schema - pull: never - image: golang:1.15 - volumes: - - name: cache - path: /go/pkg/mod - depends_on: - - test-pgx - environment: - TEST_PGSQL_HOST: pgsql - TEST_PGSQL_SCHEMA: xorm - TEST_PGSQL_DBNAME: xorm_test - TEST_PGSQL_USERNAME: postgres - TEST_PGSQL_PASSWORD: postgres - commands: - - make test-pgx - - TEST_CACHE_ENABLE=true make test-pgx - - TEST_QUOTE_POLICY=reserved make test-pgx - -volumes: -- name: cache - host: - path: /tmp/cache - -services: -- name: pgsql - image: postgres:9.5 - environment: - POSTGRES_DB: xorm_test - POSTGRES_USER: postgres - POSTGRES_PASSWORD: postgres - ---- -kind: pipeline -name: test-mssql -depends_on: - - test-postgres -trigger: - ref: - - refs/heads/master - - refs/pull/*/head -steps: -- name: test-mssql - pull: never - image: golang:1.15 - volumes: - - name: cache - path: /go/pkg/mod - environment: - TEST_MSSQL_HOST: mssql - TEST_MSSQL_DBNAME: xorm_test - TEST_MSSQL_USERNAME: sa - TEST_MSSQL_PASSWORD: "yourStrong(!)Password" - commands: - - make test-mssql - - TEST_MSSQL_DEFAULT_VARCHAR=NVARCHAR TEST_MSSQL_DEFAULT_CHAR=NCHAR make test-mssql - -volumes: -- name: cache - host: - path: /tmp/cache - -services: -- name: mssql - pull: always - image: mcr.microsoft.com/mssql/server:latest - environment: - ACCEPT_EULA: Y - SA_PASSWORD: yourStrong(!)Password - MSSQL_PID: Standard - ---- -kind: pipeline -name: test-tidb -depends_on: - - test-mssql -trigger: - ref: - - refs/heads/master - - refs/pull/*/head -steps: -- name: test-tidb - pull: never - image: golang:1.15 - volumes: - - name: cache - path: /go/pkg/mod - environment: - TEST_TIDB_HOST: "tidb:4000" - TEST_TIDB_DBNAME: xorm_test - TEST_TIDB_USERNAME: root - TEST_TIDB_PASSWORD: - commands: - - make test-tidb - -volumes: -- name: cache - host: - path: /tmp/cache - -services: -- name: tidb - image: pingcap/tidb:v3.0.3 - ---- -kind: pipeline -name: test-cockroach -depends_on: - - test-tidb -trigger: - ref: - - refs/heads/master - - refs/pull/*/head -steps: -- name: test-cockroach - pull: never - image: golang:1.15 - volumes: - - name: cache - path: /go/pkg/mod - environment: - TEST_COCKROACH_HOST: "cockroach:26257" - TEST_COCKROACH_DBNAME: xorm_test - TEST_COCKROACH_USERNAME: root - TEST_COCKROACH_PASSWORD: - commands: - - sleep 10 - - make test-cockroach - -volumes: -- name: cache - host: - path: /tmp/cache - -services: -- name: cockroach - image: cockroachdb/cockroach:v19.2.4 - commands: - - /cockroach/cockroach start --insecure - -# --- -# kind: pipeline -# name: test-dameng -# depends_on: -# - test-cockroach -# trigger: -# ref: -# - refs/heads/master -# - refs/pull/*/head -# steps: -# - name: test-dameng -# pull: never -# image: golang:1.15 -# volumes: -# - name: cache -# path: /go/pkg/mod -# environment: -# TEST_DAMENG_HOST: "dameng:5236" -# TEST_DAMENG_USERNAME: SYSDBA -# TEST_DAMENG_PASSWORD: SYSDBA -# commands: -# - sleep 30 -# - make test-dameng - -# volumes: -# - name: cache -# host: -# path: /tmp/cache - -# services: -# - name: dameng -# image: lunny/dm:v1.0 -# commands: -# - /bin/bash /startDm.sh - ---- -kind: pipeline -name: merge_coverage -depends_on: - - test-mysql - - test-mysql8 - - test-mariadb - - test-postgres - - test-mssql - - test-tidb - - test-cockroach - #- test-dameng -trigger: - ref: - - refs/heads/master - - refs/pull/*/head -steps: -- name: merge_coverage - image: golang:1.15 - commands: - - make coverage - ---- -kind: pipeline -name: release-tag -trigger: - event: - - tag -steps: -- name: release-tag-gitea - pull: always - image: plugins/gitea-release:latest - settings: - base_url: https://gitea.com - title: '${DRONE_TAG} is released' - api_key: - from_secret: gitea_token \ No newline at end of file diff --git a/vendor/xorm.io/xorm/.gitignore b/vendor/xorm.io/xorm/.gitignore deleted file mode 100644 index a183a295..00000000 --- a/vendor/xorm.io/xorm/.gitignore +++ /dev/null @@ -1,40 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so -*.db - -# Folders -_obj -_test -vendor/ - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe - -*.log -.vendor -temp_test.go -.vscode -xorm.test -*.sqlite3 -test.db.sql - -.idea/ - -*coverage.out -test.db -integrations/*.sql -integrations/test_sqlite* -cover.out \ No newline at end of file diff --git a/vendor/xorm.io/xorm/.golangci.yml b/vendor/xorm.io/xorm/.golangci.yml deleted file mode 100644 index 7b91f22d..00000000 --- a/vendor/xorm.io/xorm/.golangci.yml +++ /dev/null @@ -1,24 +0,0 @@ -linters: - enable: - - gosimple - - deadcode - - typecheck - - govet - - errcheck - - staticcheck - - unused - - structcheck - - varcheck - - dupl - #- gocyclo # The cyclomatic complexety of a lot of functions is too high, we should refactor those another time. - - gofmt - - misspell - - gocritic - - bidichk - - ineffassign - enable-all: false - disable-all: true - fast: false - -run: - timeout: 3m \ No newline at end of file diff --git a/vendor/xorm.io/xorm/CHANGELOG.md b/vendor/xorm.io/xorm/CHANGELOG.md deleted file mode 100644 index 6887cb97..00000000 --- a/vendor/xorm.io/xorm/CHANGELOG.md +++ /dev/null @@ -1,393 +0,0 @@ -# Changelog - -This changelog goes through all the changes that have been made in each release -without substantial changes to our git log. - -## [1.3.2](https://gitea.com/xorm/xorm/releases/tag/1.3.2) - 2022-09-03 - -* BUGFIXES - * Change schemas.Column to use int64 (#2160) -* MISC - * Prevent Sync failure with non-regular indexes on Postgres (#2174) - -## [1.3.1](https://gitea.com/xorm/xorm/releases/tag/1.3.1) - 2022-06-03 - -* BREAKING - * Refactor orderby and support arguments (#2150) - * return a clear error for set TEXT type as compare condition (#2062) -* BUGFIXES - * Fix oid index for postgres (#2154) - * Add ORDER BY SEQ_IN_INDEX to MySQL GetIndexes to Fix IndexTests (#2152) - * some improvement (#2136) -* ENHANCEMENTS - * Add interface to allow structs to provide specific index information (#2137) - * MySQL/MariaDB: return max length for text columns (#2133) - * PostgreSQL: enable comment on column (#2131) -* TESTING - * Add test for find date (#2121) - -## [1.3.0](https://gitea.com/xorm/xorm/releases/tag/1.3.0) - 2022-04-14 - -* BREAKING - * New Prepare useage (#2061) - * Make Get and Rows.Scan accept multiple parameters (#2029) - * Drop sync function and rename sync2 to sync (#2018) -* FEATURES - * Add dameng support (#2007) -* BUGFIXES - * bugfix :Oid It's a special index. You can't put it in (#2105) - * Fix new-lined query execution in master DB node. (#2066) - * Fix bug of Rows (#2048) - * Fix bug (#2046) - * fix panic when `Iterate()` fails (#2040) - * fix panic when convert sql and args with nil time.Time pointer (#2038) -* ENHANCEMENTS - * Fix to add session.statement.IsForUpdate check in Session.queryRows() (#2064) - * Expose ScanString / ScanInterface and etc (#2039) -* TESTING - * Add test for mysql tls (#2049) -* BUILD - * Upgrade dependencies modules (#2078) -* MISC - * Fix oracle keyword AS (#2109) - * Some performance optimization for get (#2043) - -## [1.2.2](https://gitea.com/xorm/xorm/releases/tag/1.2.2) - 2021-08-11 - -* MISC - * Move convert back to xorm.io/xorm/convert (#2030) - -## [1.2.1](https://gitea.com/xorm/xorm/releases/tag/1.2.1) - 2021-08-08 - -* FEATURES - * Add pgx driver support (#1795) -* BUGFIXES - * Fix wrong comment (#2027) - * Fix import file bug (#2025) -* ENHANCEMENTS - * Fix timesatmp (#2021) - -## [1.2.0](https://gitea.com/xorm/xorm/releases/tag/1.2.0) - 2021-08-04 - -* BREAKING - * Exec with time arg now will obey time zone settings on engine (#1989) - * Query interface (#1965) - * Support delete with no bean (#1926) - * Nil ptr is nullable (#1919) -* FEATURES - * Support batch insert map (#2019) - * Support big.Float (#1973) -* BUGFIXES - * fix possible null dereference in internal/statements/query.go (#1988) - * Fix bug on dumptable (#1984) -* ENHANCEMENTS - * Move assign functions to convert package (#2015) - * refactor conversion (#2001) - * refactor some code (#2000) - * refactor insert condition generation (#1998) - * refactor and add setjson function (#1997) - * Get struct and Find support big.Float (#1976) - * refactor slice2Bean (#1974, #1975) - * refactor get (#1967) - * Replace #1044 (#1935) - * Support Get time.Time (#1933) -* TESTING - * Add benchmark tests (#1978) - * Add tests for github.com/shopspring/decimal support (#1977) - * Add test for get map with NULL column (#1948) - * Add test for limit with query (#1787) -* MISC - * Fix DBMetas returned unsigned tinyint (#2017) - * Fix deleted column (#2014) - * Add database alias table and fix wrong warning (#1947) - -## [1.1.2](https://gitea.com/xorm/xorm/releases/tag/1.1.2) - 2021-07-04 - -* BUILD - * Add release tag (#1966) - -## [1.1.1](https://gitea.com/xorm/xorm/releases/tag/1.1.1) - 2021-07-03 - -* BUGFIXES - * Ignore comments when deciding when to replace question marks. #1954 (#1955) - * Fix bug didn't reset statement on update (#1939) - * Fix create table with struct missing columns (#1938) - * Fix #929 (#1936) - * Fix exist (#1921) -* ENHANCEMENTS - * Improve get field value of bean (#1961) - * refactor splitTag function (#1960) - * Fix #1663 (#1952) - * fix pg GetColumns missing comment (#1949) - * Support build flag jsoniter to replace default json (#1916) - * refactor exprParam (#1825) - * Add DBVersion (#1723) -* TESTING - * Add test to confirm #1247 resolved (#1951) - * Add test for dump table with default value (#1950) - * Test for #1486 (#1942) - * Add sync tests to confirm #539 is gone (#1937) - * test for unsigned int32 (#1923) - * Add tests for array store (#1922) -* BUILD - * Remove mymysql from ci (#1928) -* MISC - * fix lint (#1953) - * Compitable with cockroach (#1930) - * Replace goracle with godror (#1914) - -## [1.1.0](https://gitea.com/xorm/xorm/releases/tag/1.1.0) - 2021-05-14 - -* FEATURES - * Unsigned Support for mysql (#1889) - * Support modernc.org/sqlite (#1850) -* TESTING - * More tests (#1890) -* MISC - * Byte strings in postgres aren't 0x... (#1906) - * Fix another bug with #1872 (#1905) - * Fix two issues with dumptables (#1903) - * Fix comments (#1896) - * Fix comments (#1893) - * MariaDB 10.5 adds a suffix on old datatypes (#1885) - -## [1.0.7](https://gitea.com/xorm/xorm/pulls?q=&type=all&state=closed&milestone=1336) - 2021-01-21 - -* BUGFIXES - * Fix bug for mssql (#1854) -* MISC - * fix_bugs_for_mssql (#1852) - -## [1.0.6](https://gitea.com/xorm/xorm/pulls?q=&type=all&state=closed&milestone=1308) - 2021-01-05 - -* BUGFIXES - * Fix bug when modify column on mssql (#1849) - * Fix find and count bug with cols (#1826) - * Fix update bug (#1823) - * Fix json tag with other type (#1822) -* ENHANCEMENTS - * prevent panic when struct with unexport field (#1839) - * Automatically convert datetime to int64 (#1715) -* MISC - * Fix index (#1841) - * Performance improvement for columnsbyName (#1788) - -## [1.0.5](https://gitea.com/xorm/xorm/pulls?q=&type=all&state=closed&milestone=1299) - 2020-09-08 - -* BUGFIXES - * Fix bug of ToDB when update on a nil pointer (#1786) - * Fix warnings with schema Sync2 with default varchar as NVARCHAR (#1783) - * Do not ever quote asterisk symbol. Fixes #1780 (#1781) - * Fix bug on get columns for postgres (#1779) - -## [1.0.4](https://gitea.com/xorm/xorm/pulls?q=&type=all&state=closed&milestone=1286) - 2020-09-02 - -* FEATURES - * Add params for mssql to allow redefine varchar as nvarchar or char as nchar (#1741) -* BUGFIXES - * Fix mysql dialect error from invalid db identifier in orderby clause (#1743) (#1751) -* ENHANCEMENTS - * Support get dataSourceName on ContextHook for monitor which DB executed SQL (#1740) -* MISC - * Correct default detection in MariaDB >= 10.2.7 (#1778) - -## [1.0.3](https://gitea.com/xorm/xorm/pulls?q=&type=all&state=closed&milestone=1281) - 2020-07-10 - -* BUGFIXES - * Fix dump of sqlite (#1639) -* ENHANCEMENTS - * Fix index name parsing in SQLite dialect (#1737) - * add hooks for Commit and Rollback (#1733) - -## [1.0.2](https://gitea.com/xorm/xorm/pulls?q=&type=all&state=closed&milestone=1261) - 2020-06-16 - -* FEATURES - * Add Hook (#1644) -* BUGFIXES - * Fix bug when ID used but no reference table given (#1709) - * Fix find and count bug (#1651) -* ENHANCEMENTS - * chore: improve snakeCasedName performance (#1688) - * Fix find with another struct (#1666) - * fix GetColumns missing ordinal position (#1660) -* MISC - * chore: improve titleCasedName performance (#1691) - -## [1.0.1](https://gitea.com/xorm/xorm/pulls?q=&type=all&state=closed&milestone=1253) - 2020-03-25 - -* BUGFIXES - * Oracle : Local Naming Method (#1515) - * Fix find and count bug (#1618) - * Fix duplicated deleted condition on FindAndCount (#1619) - * Fix find and count bug with cache (#1622) - * Fix postgres schema problem (#1624) - * Fix quote with blank (#1626) - -## [1.0.0](https://gitea.com/xorm/xorm/pulls?q=&type=all&state=closed&milestone=1242) - 2020-03-22 - -* BREAKING - * Add context for dialects (#1558) - * Move zero functions to a standalone package (#1548) - * Merge core package back into the main repository and split into serval sub packages. (#1543) -* FEATURES - * Use a new ContextLogger interface to implement logger (#1557) -* BUGFIXES - * Fix setschema (#1606) - * Fix dump/import bug (#1603) - * Fix pk bug (#1602) - * Fix master/slave bug (#1601) - * Fix bug when dump (#1597) - * Ignore schema when dbtype is not postgres (#1593) - * Fix table name (#1590) - * Fix find alias bug (#1581) - * Fix rows bug (#1576) - * Fix map with cols (#1575) - * Fix bug on deleted with join (#1570) - * Improve quote policy (#1567) - * Fix break session sql enable feature (#1566) - * Fix mssql quote (#1535) - * Fix join table name quote bug (#1534) - * Fix mssql issue with duplicate columns. (#1225) - * Fix mysql8.0 sync failed (#808) -* ENHANCEMENTS - * Fix batch insert interface slice be panic (#1598) - * Move some codes to statement sub package (#1574) - * Remove circle file (#1569) - * Move statement as a sub package (#1564) - * Move maptype to tag parser (#1561) - * Move caches to manager (#1553) - * Improve code (#1552) - * Improve some codes (#1551) - * Improve statement (#1549) - * Move tag parser related codes as a standalone sub package (#1547) - * Move reserve words related files into dialects sub package (#1544) - * Fix `Conversion` method `ToDB() ([]byte, error)` return type is nil (#1296) - * Check driver.Valuer response, and skip the column if nil (#1167) - * Add cockroach support and tests (#896) -* TESTING - * Improve tests (#1572) -* BUILD - * Add changelog file and tool configuration (#1546) -* DOCS - * Fix outdate changelog (#1565) - -## old changelog - -* **v0.6.5** - * Postgres schema support - * vgo support - * Add FindAndCount - * Database special params support via NewEngineWithParams - * Some bugs fixed - -* **v0.6.4** - * Automatical Read/Write seperatelly - * Query/QueryString/QueryInterface and action with Where/And - * Get support non-struct variables - * BufferSize on Iterate - * fix some other bugs. - -* **v0.6.3** - * merge tests to main project - * add `Exist` function - * add `SumInt` function - * Mysql now support read and create column comment. - * fix time related bugs. - * fix some other bugs. - -* **v0.6.2** - * refactor tag parse methods - * add Scan features to Get - * add QueryString method - -* **v0.4.5** - * many bugs fixed - * extends support unlimited deep - * Delete Limit support - -* **v0.4.4** - * ql database expriment support - * tidb database expriment support - * sql.NullString and etc. field support - * select ForUpdate support - * many bugs fixed - -* **v0.4.3** - * Json column type support - * oracle expirement support - * bug fixed - -* **v0.4.2** - * Transaction will auto rollback if not Rollback or Commit be called. - * Gonic Mapper support - * bug fixed - -* **v0.4.1** - * deleted tag support for soft delete - * bug fixed - -* **v0.4.0 RC1** - Changes: - * moved xorm cmd to [github.com/go-xorm/cmd](github.com/go-xorm/cmd) - * refactored general DB operation a core lib at [github.com/go-xorm/core](https://github.com/go-xorm/core) - * moved tests to github.com/go-xorm/tests [github.com/go-xorm/tests](github.com/go-xorm/tests) - - Improvements: - * Prepared statement cache - * Add Incr API - * Specify Timezone Location - -* **v0.3.2** - Improvements: - * Add AllCols & MustCols function - * Add TableName for custom table name - - Bug Fixes: - * #46 - * #51 - * #53 - * #89 - * #86 - * #92 - -* **v0.3.1** - - Features: - * Support MSSQL DB via ODBC driver ([github.com/lunny/godbc](https://github.com/lunny/godbc)); - * Composite Key, using multiple pk xorm tag - * Added Row() API as alternative to Iterate() API for traversing result set, provide similar usages to sql.Rows type - * ORM struct allowed declaration of pointer builtin type as members to allow null DB fields - * Before and After Event processors - - Improvements: - * Allowed int/int32/int64/uint/uint32/uint64/string as Primary Key type - * Performance improvement for Get()/Find()/Iterate() - - -* **v0.2.3** : Improved documents; Optimistic Locking support; Timestamp with time zone support; Mapper change to tableMapper and columnMapper & added PrefixMapper & SuffixMapper support custom table or column name's prefix and suffix;Insert now return affected, err instead of id, err; Added UseBool & Distinct; - -* **v0.2.2** : Postgres drivers now support lib/pq; Added method Iterate for record by record to handler;Added SetMaxConns(go1.2+) support; some bugs fixed. - -* **v0.2.1** : Added database reverse tool, now support generate go & c++ codes, see [Xorm Tool README](https://github.com/go-xorm/xorm/blob/master/xorm/README.md); some bug fixed. - -* **v0.2.0** : Added Cache supported, select is speeder up 3~5x; Added SameMapper for same name between struct and table; Added Sync method for auto added tables, columns, indexes; - -* **v0.1.9** : Added postgres and mymysql supported; Added ` and ? supported on Raw SQL even if postgres; Added Cols, StoreEngine, Charset function, Added many column data type supported, please see [Mapping Rules](#mapping). - -* **v0.1.8** : Added union index and union unique supported, please see [Mapping Rules](#mapping). - -* **v0.1.7** : Added IConnectPool interface and NoneConnectPool, SysConnectPool, SimpleConnectPool the three implements. You can choose one of them and the default is SysConnectPool. You can customrize your own connection pool. struct Engine added Close method, It should be invoked before system exit. - -* **v0.1.6** : Added conversion interface support; added struct derive support; added single mapping support - -* **v0.1.5** : Added multi threads support; added Sql() function for struct query; Get function changed return inteface; MakeSession and Create are instead with NewSession and NewEngine. - -* **v0.1.4** : Added simple cascade load support; added more data type supports. - -* **v0.1.3** : Find function now supports both slice and map; Add Table function for multi tables and temperory tables support - -* **v0.1.2** : Insert function now supports both struct and slice pointer parameters, batch inserting and auto transaction - -* **v0.1.1** : Add Id, In functions and improved README - -* **v0.1.0** : Initial release. \ No newline at end of file diff --git a/vendor/xorm.io/xorm/CONTRIBUTING.md b/vendor/xorm.io/xorm/CONTRIBUTING.md deleted file mode 100644 index 27e6929b..00000000 --- a/vendor/xorm.io/xorm/CONTRIBUTING.md +++ /dev/null @@ -1,87 +0,0 @@ -## Contributing to xorm - -`xorm` has a backlog of [pull requests](https://gitea.com/xorm/xorm/pulls), but contributions are still very -much welcome. You can help with patch review, submitting [bug reports](https://gitea.com/xorm/xorm/issues), -or adding new functionality. There is no formal style guide, but -please conform to the style of existing code and general Go formatting -conventions when submitting patches. - -* [fork the repo](https://gitea.com/repo/fork/2038) -* [creating a pull request ](https://docs.gitea.io/en-us/pull-request/) - -### Language - -Since `xorm` is a world-wide open source project, please describe your issues or code changes in English as soon as possible. - -### Sign your codes with comments -``` -// !! your comments - -e.g., - -// !lunny! this is comments made by lunny -``` - -### Build xorm and test it locally - -Once you write some codes on your feature branch, you could build and test locally at first. Just - -``` -make build -``` -and -``` -make test -``` - -The `make test` is an alias of `make test-sqlite`, it will run the tests on a sqlite database file. No extra thing needed to do except you need to cgo compile enviroment. - -If you write a new test method, you could run - -``` -make test-sqlite#TestMyNewMethod -``` - -that will only run the special test method. - -If you want to run another datase, you have to prepare a running database at first, and then, you could - -``` -TEST_MYSQL_HOST= TEST_MYSQL_CHARSET= TEST_MYSQL_DBNAME= TEST_MYSQL_USERNAME= TEST_MYSQL_PASSWORD= make test-mysql -``` - -or other databases: -``` -TEST_MSSQL_HOST= TEST_MSSQL_DBNAME= TEST_MSSQL_USERNAME= TEST_MSSQL_PASSWORD= make test-mssql -``` -``` -TEST_PGSQL_HOST= TEST_PGSQL_SCHEMA= TEST_PGSQL_DBNAME= TEST_PGSQL_USERNAME= TEST_PGSQL_PASSWORD= make test-postgres -``` -``` -TEST_TIDB_HOST= TEST_TIDB_DBNAME= TEST_TIDB_USERNAME= TEST_TIDB_PASSWORD= make test-tidb -``` - -And if your branch is related with cache, you could also enable it via `TEST_CACHE_ENABLE=true`. - -### Patch review - -Help review existing open [pull requests](https://gitea.com/xorm/xorm/pulls) by commenting on the code or -proposed functionality. - -### Bug reports - -We appreciate any bug reports, but especially ones with self-contained -(doesn't depend on code outside of xorm), minimal (can't be simplified -further) test cases. It's especially helpful if you can submit a pull -request with just the failing test case(you can find some example test file like [session_get_test.go](https://gitea.com/xorm/xorm/src/branch/master/session_get_test.go)). - -If you implements a new database interface, you maybe need to add a test_.sh file. -For example, [mysql_test.go](https://gitea.com/xorm/xorm/src/branch/master/test_mysql.sh) - -### New functionality - -There are a number of pending patches for new functionality, so -additional feature patches will take a while to merge. Still, patches -are generally reviewed based on usefulness and complexity in addition -to time-in-queue, so if you have a knockout idea, take a shot. Feel -free to open an issue discussion your proposed patch beforehand. diff --git a/vendor/xorm.io/xorm/LICENSE b/vendor/xorm.io/xorm/LICENSE deleted file mode 100644 index 84d2ae53..00000000 --- a/vendor/xorm.io/xorm/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2013 - 2015 The Xorm Authors -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* Neither the name of the {organization} nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/xorm.io/xorm/Makefile b/vendor/xorm.io/xorm/Makefile deleted file mode 100644 index b43c4a4c..00000000 --- a/vendor/xorm.io/xorm/Makefile +++ /dev/null @@ -1,281 +0,0 @@ -IMPORT := xorm.io/xorm -export GO111MODULE=on - -GO ?= go -GOFMT ?= gofmt -s -TAGS ?= -SED_INPLACE := sed -i - -GO_DIRS := caches contexts integrations core dialects internal log migrate names schemas tags -GOFILES := $(wildcard *.go) -GOFILES += $(shell find $(GO_DIRS) -name "*.go" -type f) -INTEGRATION_PACKAGES := xorm.io/xorm/integrations -PACKAGES ?= $(filter-out $(INTEGRATION_PACKAGES),$(shell $(GO) list ./...)) - -TEST_COCKROACH_HOST ?= cockroach:26257 -TEST_COCKROACH_SCHEMA ?= -TEST_COCKROACH_DBNAME ?= xorm_test -TEST_COCKROACH_USERNAME ?= postgres -TEST_COCKROACH_PASSWORD ?= - -TEST_MSSQL_HOST ?= mssql:1433 -TEST_MSSQL_DBNAME ?= gitea -TEST_MSSQL_USERNAME ?= sa -TEST_MSSQL_PASSWORD ?= MwantsaSecurePassword1 -TEST_MSSQL_DEFAULT_VARCHAR ?= varchar -TEST_MSSQL_DEFAULT_CHAR ?= char -TEST_MSSQL_DO_NVARCHAR_OVERRIDE_TEST ?= true - -TEST_MYSQL_HOST ?= mysql:3306 -TEST_MYSQL_CHARSET ?= utf8 -TEST_MYSQL_DBNAME ?= xorm_test -TEST_MYSQL_USERNAME ?= root -TEST_MYSQL_PASSWORD ?= - -TEST_PGSQL_HOST ?= pgsql:5432 -TEST_PGSQL_SCHEMA ?= -TEST_PGSQL_DBNAME ?= xorm_test -TEST_PGSQL_USERNAME ?= postgres -TEST_PGSQL_PASSWORD ?= mysecretpassword - -TEST_TIDB_HOST ?= tidb:4000 -TEST_TIDB_DBNAME ?= xorm_test -TEST_TIDB_USERNAME ?= root -TEST_TIDB_PASSWORD ?= - -TEST_DAMENG_HOST ?= dameng:5236 -TEST_DAMENG_USERNAME ?= SYSDBA -TEST_DAMENG_PASSWORD ?= SYSDBA - -TEST_CACHE_ENABLE ?= false -TEST_QUOTE_POLICY ?= always - -.PHONY: all -all: build - -.PHONY: build -build: go-check $(GO_SOURCES) - $(GO) build $(PACKAGES) - -.PHONY: clean -clean: - $(GO) clean -i ./... - rm -rf *.sql *.log test.db *coverage.out coverage.all integrations/*.sql - -.PHONY: coverage -coverage: - @hash gocovmerge > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ - $(GO) get -u github.com/wadey/gocovmerge; \ - fi - gocovmerge $(shell find . -type f -name "coverage.out") > coverage.all;\ - -.PHONY: fmt -fmt: - $(GOFMT) -w $(GOFILES) - -.PHONY: fmt-check -fmt-check: - # get all go files and run go fmt on them - @diff=$$($(GOFMT) -d $(GOFILES)); \ - if [ -n "$$diff" ]; then \ - echo "Please run 'make fmt' and commit the result:"; \ - echo "$${diff}"; \ - exit 1; \ - fi; - -.PHONY: go-check -go-check: - $(eval GO_VERSION := $(shell printf "%03d%03d%03d" $(shell go version | grep -Eo '[0-9]+\.?[0-9]+?\.?[0-9]?\s' | tr '.' ' ');)) - @if [ "$(GO_VERSION)" -lt "001011000" ]; then \ - echo "Gitea requires Go 1.11.0 or greater to build. You can get it at https://golang.org/dl/"; \ - exit 1; \ - fi - -.PHONY: help -help: - @echo "Make Routines:" - @echo " - equivalent to \"build\"" - @echo " - build creates the entire project" - @echo " - clean delete integration files and build files but not css and js files" - @echo " - fmt format the code" - @echo " - lint run code linter" - @echo " - test run default unit test" - @echo " - test-cockroach run integration tests for cockroach" - @echo " - test-mysql run integration tests for mysql" - @echo " - test-mssql run integration tests for mssql" - @echo " - test-postgres run integration tests for postgres" - @echo " - test-sqlite3 run integration tests for sqlite" - @echo " - test-sqlite run integration tests for pure go sqlite" - @echo " - test-tidb run integration tests for tidb" - @echo " - vet examines Go source code and reports suspicious constructs" - -.PHONY: lint -lint: golangci-lint - -.PHONY: golangci-lint -golangci-lint: golangci-lint-check - golangci-lint run --timeout 10m - -.PHONY: golangci-lint-check -golangci-lint-check: - $(eval GOLANGCI_LINT_VERSION := $(shell printf "%03d%03d%03d" $(shell golangci-lint --version | grep -Eo '[0-9]+\.[0-9.]+' | tr '.' ' ');)) - $(eval MIN_GOLANGCI_LINT_VER_FMT := $(shell printf "%g.%g.%g" $(shell echo $(MIN_GOLANGCI_LINT_VERSION) | grep -o ...))) - @hash golangci-lint > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ - echo "Downloading golangci-lint v${MIN_GOLANGCI_LINT_VER_FMT}"; \ - export BINARY="golangci-lint"; \ - curl -sfL "https://raw.githubusercontent.com/golangci/golangci-lint/v${MIN_GOLANGCI_LINT_VER_FMT}/install.sh" | sh -s -- -b $(GOPATH)/bin v$(MIN_GOLANGCI_LINT_VER_FMT); \ - elif [ "$(GOLANGCI_LINT_VERSION)" -lt "$(MIN_GOLANGCI_LINT_VERSION)" ]; then \ - echo "Downloading newer version of golangci-lint v${MIN_GOLANGCI_LINT_VER_FMT}"; \ - export BINARY="golangci-lint"; \ - curl -sfL "https://raw.githubusercontent.com/golangci/golangci-lint/v${MIN_GOLANGCI_LINT_VER_FMT}/install.sh" | sh -s -- -b $(GOPATH)/bin v$(MIN_GOLANGCI_LINT_VER_FMT); \ - fi - -.PHONY: test -test: go-check - $(GO) test $(PACKAGES) - -.PNONY: test-cockroach -test-cockroach: go-check - $(GO) test $(INTEGRATION_PACKAGES) -v -race -db=postgres -schema='$(TEST_COCKROACH_SCHEMA)' -cache=$(TEST_CACHE_ENABLE) \ - -conn_str="postgres://$(TEST_COCKROACH_USERNAME):$(TEST_COCKROACH_PASSWORD)@$(TEST_COCKROACH_HOST)/$(TEST_COCKROACH_DBNAME)?sslmode=disable&experimental_serial_normalization=sql_sequence" \ - -ignore_update_limit=true -coverprofile=cockroach.$(TEST_COCKROACH_SCHEMA).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic -timeout=20m - -.PHONY: test-cockroach\#% -test-cockroach\#%: go-check - $(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -db=postgres -schema='$(TEST_COCKROACH_SCHEMA)' -cache=$(TEST_CACHE_ENABLE) \ - -conn_str="postgres://$(TEST_COCKROACH_USERNAME):$(TEST_COCKROACH_PASSWORD)@$(TEST_COCKROACH_HOST)/$(TEST_COCKROACH_DBNAME)?sslmode=disable&experimental_serial_normalization=sql_sequence" \ - -ignore_update_limit=true -coverprofile=cockroach.$(TEST_COCKROACH_SCHEMA).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic - -.PNONY: test-mssql -test-mssql: go-check - $(GO) test $(INTEGRATION_PACKAGES) -v -race -db=mssql -cache=$(TEST_CACHE_ENABLE) -quote=$(TEST_QUOTE_POLICY) \ - -conn_str="server=$(TEST_MSSQL_HOST);user id=$(TEST_MSSQL_USERNAME);password=$(TEST_MSSQL_PASSWORD);database=$(TEST_MSSQL_DBNAME)" \ - -default_varchar=$(TEST_MSSQL_DEFAULT_VARCHAR) -default_char=$(TEST_MSSQL_DEFAULT_CHAR) \ - -do_nvarchar_override_test=$(TEST_MSSQL_DO_NVARCHAR_OVERRIDE_TEST) \ - -coverprofile=mssql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic -timeout=20m - -.PNONY: test-mssql\#% -test-mssql\#%: go-check - $(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -db=mssql -cache=$(TEST_CACHE_ENABLE) -quote=$(TEST_QUOTE_POLICY) \ - -conn_str="server=$(TEST_MSSQL_HOST);user id=$(TEST_MSSQL_USERNAME);password=$(TEST_MSSQL_PASSWORD);database=$(TEST_MSSQL_DBNAME)" \ - -default_varchar=$(TEST_MSSQL_DEFAULT_VARCHAR) -default_char=$(TEST_MSSQL_DEFAULT_CHAR) \ - -do_nvarchar_override_test=$(TEST_MSSQL_DO_NVARCHAR_OVERRIDE_TEST) \ - -coverprofile=mssql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic - -.PNONY: test-mymysql -test-mymysql: go-check - $(GO) test $(INTEGRATION_PACKAGES) -v -race -db=mymysql -cache=$(TEST_CACHE_ENABLE) -quote=$(TEST_QUOTE_POLICY) \ - -conn_str="tcp:$(TEST_MYSQL_HOST)*$(TEST_MYSQL_DBNAME)/$(TEST_MYSQL_USERNAME)/$(TEST_MYSQL_PASSWORD)" \ - -coverprofile=mymysql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic -timeout=20m - -.PNONY: test-mymysql\#% -test-mymysql\#%: go-check - $(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -db=mymysql -cache=$(TEST_CACHE_ENABLE) -quote=$(TEST_QUOTE_POLICY) \ - -conn_str="tcp:$(TEST_MYSQL_HOST)*$(TEST_MYSQL_DBNAME)/$(TEST_MYSQL_USERNAME)/$(TEST_MYSQL_PASSWORD)" \ - -coverprofile=mymysql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic - -.PNONY: test-mysql -test-mysql: go-check - $(GO) test $(INTEGRATION_PACKAGES) -v -race -db=mysql -cache=$(TEST_CACHE_ENABLE) -quote=$(TEST_QUOTE_POLICY) \ - -conn_str="$(TEST_MYSQL_USERNAME):$(TEST_MYSQL_PASSWORD)@tcp($(TEST_MYSQL_HOST))/$(TEST_MYSQL_DBNAME)?charset=$(TEST_MYSQL_CHARSET)" \ - -coverprofile=mysql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic -timeout=20m - -.PHONY: test-mysql\#% -test-mysql\#%: go-check - $(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -db=mysql -cache=$(TEST_CACHE_ENABLE) -quote=$(TEST_QUOTE_POLICY) \ - -conn_str="$(TEST_MYSQL_USERNAME):$(TEST_MYSQL_PASSWORD)@tcp($(TEST_MYSQL_HOST))/$(TEST_MYSQL_DBNAME)?charset=$(TEST_MYSQL_CHARSET)" \ - -coverprofile=mysql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic - -.PNONY: test-mysql-tls -test-mysql-tls: go-check - $(GO) test $(INTEGRATION_PACKAGES) -v -race -db=mysql -cache=$(TEST_CACHE_ENABLE) -quote=$(TEST_QUOTE_POLICY) \ - -conn_str="$(TEST_MYSQL_USERNAME):$(TEST_MYSQL_PASSWORD)@tcp($(TEST_MYSQL_HOST))/$(TEST_MYSQL_DBNAME)?charset=$(TEST_MYSQL_CHARSET)&tls=skip-verify" \ - -coverprofile=mysql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic -timeout=20m - -.PHONY: test-mysql-tls\#% -test-mysql-tls\#%: go-check - $(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -db=mysql -cache=$(TEST_CACHE_ENABLE) -quote=$(TEST_QUOTE_POLICY) \ - -conn_str="$(TEST_MYSQL_USERNAME):$(TEST_MYSQL_PASSWORD)@tcp($(TEST_MYSQL_HOST))/$(TEST_MYSQL_DBNAME)?charset=$(TEST_MYSQL_CHARSET)&tls=skip-verify" \ - -coverprofile=mysql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic - -.PNONY: test-postgres -test-postgres: go-check - $(GO) test $(INTEGRATION_PACKAGES) -v -race -db=postgres -schema='$(TEST_PGSQL_SCHEMA)' -cache=$(TEST_CACHE_ENABLE) \ - -conn_str="postgres://$(TEST_PGSQL_USERNAME):$(TEST_PGSQL_PASSWORD)@$(TEST_PGSQL_HOST)/$(TEST_PGSQL_DBNAME)?sslmode=disable" \ - -quote=$(TEST_QUOTE_POLICY) -coverprofile=postgres.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic -timeout=20m - -.PHONY: test-postgres\#% -test-postgres\#%: go-check - $(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -db=postgres -schema='$(TEST_PGSQL_SCHEMA)' -cache=$(TEST_CACHE_ENABLE) \ - -conn_str="postgres://$(TEST_PGSQL_USERNAME):$(TEST_PGSQL_PASSWORD)@$(TEST_PGSQL_HOST)/$(TEST_PGSQL_DBNAME)?sslmode=disable" \ - -quote=$(TEST_QUOTE_POLICY) -coverprofile=postgres.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic - -.PHONY: test-sqlite3 -test-sqlite3: go-check - $(GO) test $(INTEGRATION_PACKAGES) -v -race -cache=$(TEST_CACHE_ENABLE) -db=sqlite3 -conn_str="./test.db?cache=shared&mode=rwc" \ - -quote=$(TEST_QUOTE_POLICY) -coverprofile=sqlite3.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic -timeout=20m - -.PHONY: test-sqlite3-schema -test-sqlite3-schema: go-check - $(GO) test $(INTEGRATION_PACKAGES) -v -race -schema=xorm -cache=$(TEST_CACHE_ENABLE) -db=sqlite3 -conn_str="./test.db?cache=shared&mode=rwc" \ - -quote=$(TEST_QUOTE_POLICY) -coverprofile=sqlite3.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic -timeout=20m - -.PHONY: test-sqlite3\#% -test-sqlite3\#%: go-check - $(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -cache=$(TEST_CACHE_ENABLE) -db=sqlite3 -conn_str="./test.db?cache=shared&mode=rwc" \ - -quote=$(TEST_QUOTE_POLICY) -coverprofile=sqlite3.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic -timeout=20m - -.PNONY: test-pgx -test-pgx: go-check - $(GO) test $(INTEGRATION_PACKAGES) -v -race -db=pgx -schema='$(TEST_PGSQL_SCHEMA)' -cache=$(TEST_CACHE_ENABLE) \ - -conn_str="postgres://$(TEST_PGSQL_USERNAME):$(TEST_PGSQL_PASSWORD)@$(TEST_PGSQL_HOST)/$(TEST_PGSQL_DBNAME)?sslmode=disable" \ - -quote=$(TEST_QUOTE_POLICY) -coverprofile=postgres.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic -timeout=20m - -.PHONY: test-pgx\#% -test-pgx\#%: go-check - $(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -db=pgx -schema='$(TEST_PGSQL_SCHEMA)' -cache=$(TEST_CACHE_ENABLE) \ - -conn_str="postgres://$(TEST_PGSQL_USERNAME):$(TEST_PGSQL_PASSWORD)@$(TEST_PGSQL_HOST)/$(TEST_PGSQL_DBNAME)?sslmode=disable" \ - -quote=$(TEST_QUOTE_POLICY) -coverprofile=postgres.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic -timeout=20m - -.PHONY: test-sqlite -test-sqlite: go-check - $(GO) test $(INTEGRATION_PACKAGES) -v -race -cache=$(TEST_CACHE_ENABLE) -db=sqlite -conn_str="./test.db?cache=shared&mode=rwc" \ - -quote=$(TEST_QUOTE_POLICY) -coverprofile=sqlite.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic -timeout=20m - -.PHONY: test-sqlite-schema -test-sqlite-schema: go-check - $(GO) test $(INTEGRATION_PACKAGES) -v -race -schema=xorm -cache=$(TEST_CACHE_ENABLE) -db=sqlite -conn_str="./test.db?cache=shared&mode=rwc" \ - -quote=$(TEST_QUOTE_POLICY) -coverprofile=sqlite.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic -timeout=20m - -.PHONY: test-sqlite\#% -test-sqlite\#%: go-check - $(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -cache=$(TEST_CACHE_ENABLE) -db=sqlite -conn_str="./test.db?cache=shared&mode=rwc" \ - -quote=$(TEST_QUOTE_POLICY) -coverprofile=sqlite.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic - -.PNONY: test-tidb -test-tidb: go-check - $(GO) test $(INTEGRATION_PACKAGES) -v -race -db=mysql -cache=$(TEST_CACHE_ENABLE) -ignore_select_update=true \ - -conn_str="$(TEST_TIDB_USERNAME):$(TEST_TIDB_PASSWORD)@tcp($(TEST_TIDB_HOST))/$(TEST_TIDB_DBNAME)" \ - -quote=$(TEST_QUOTE_POLICY) -coverprofile=tidb.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic -timeout=20m - -.PHONY: test-tidb\#% -test-tidb\#%: go-check - $(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -db=mysql -cache=$(TEST_CACHE_ENABLE) -ignore_select_update=true \ - -conn_str="$(TEST_TIDB_USERNAME):$(TEST_TIDB_PASSWORD)@tcp($(TEST_TIDB_HOST))/$(TEST_TIDB_DBNAME)" \ - -quote=$(TEST_QUOTE_POLICY) -coverprofile=tidb.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic - -.PNONY: test-dameng -test-dameng: go-check - $(GO) test $(INTEGRATION_PACKAGES) -v -race -db=dm -cache=$(TEST_CACHE_ENABLE) -quote=$(TEST_QUOTE_POLICY) \ - -conn_str="dm://$(TEST_DAMENG_USERNAME):$(TEST_DAMENG_PASSWORD)@$(TEST_DAMENG_HOST)" \ - -coverprofile=dameng.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic -timeout=20m - -.PHONY: test-dameng\#% -test-dameng\#%: go-check - $(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -db=dm -cache=$(TEST_CACHE_ENABLE) -quote=$(TEST_QUOTE_POLICY) \ - -conn_str="dm://$(TEST_DAMENG_USERNAME):$(TEST_DAMENG_PASSWORD)@$(TEST_DAMENG_HOST)" \ - -coverprofile=dameng.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic -timeout=20m - -.PHONY: vet -vet: - $(GO) vet $(shell $(GO) list ./...) diff --git a/vendor/xorm.io/xorm/README.md b/vendor/xorm.io/xorm/README.md deleted file mode 100644 index ccf49348..00000000 --- a/vendor/xorm.io/xorm/README.md +++ /dev/null @@ -1,528 +0,0 @@ -# xorm - -[中文](https://gitea.com/xorm/xorm/src/branch/master/README_CN.md) - -Xorm is a simple and powerful ORM for Go. - -[![Build Status](https://drone.gitea.com/api/badges/xorm/xorm/status.svg)](https://drone.gitea.com/xorm/xorm) [![](http://gocover.io/_badge/xorm.io/xorm)](https://gocover.io/xorm.io/xorm) [![](https://goreportcard.com/badge/xorm.io/xorm)](https://goreportcard.com/report/xorm.io/xorm) [![Join the chat at https://img.shields.io/discord/323460943201959939.svg](https://img.shields.io/discord/323460943201959939.svg)](https://discord.gg/HuR2CF3) - -## Notice - -v1.0.0 has some break changes from v0.8.2. - -- Removed some non gonic function name `Id`, `Sql`, please use `ID`, `SQL` instead. -- Removed the dependent from `xorm.io/core` and moved the codes to `xorm.io/xorm/core`, `xorm.io/xorm/names`, `xorm.io/xorm/schemas` and others. -- Renamed some interface names. i.e. `core.IMapper` -> `names.Mapper`, `core.ILogger` -> `log.Logger`. - -## Features - -* Struct <-> Table Mapping Support -* Chainable APIs -* Transaction Support -* Both ORM and raw SQL operation Support -* Sync database schema Support -* Query Cache speed up -* Database Reverse support via [xorm.io/reverse](https://xorm.io/reverse) -* Simple cascade loading support -* Optimistic Locking support -* SQL Builder support via [xorm.io/builder](https://xorm.io/builder) -* Automatical Read/Write seperatelly -* Postgres schema support -* Context Cache support -* Support log/SQLLog context - -## Drivers Support - -Drivers for Go's sql package which currently support database/sql includes: - -* [Mysql5.*](https://github.com/mysql/mysql-server/tree/5.7) / [Mysql8.*](https://github.com/mysql/mysql-server) / [Mariadb](https://github.com/MariaDB/server) / [Tidb](https://github.com/pingcap/tidb) - - [github.com/go-sql-driver/mysql](https://github.com/go-sql-driver/mysql) - - [github.com/ziutek/mymysql/godrv](https://github.com/ziutek/mymysql/godrv) - -* [Postgres](https://github.com/postgres/postgres) / [Cockroach](https://github.com/cockroachdb/cockroach) - - [github.com/lib/pq](https://github.com/lib/pq) - - [github.com/jackc/pgx](https://github.com/jackc/pgx) - -* [SQLite](https://sqlite.org) - - [github.com/mattn/go-sqlite3](https://github.com/mattn/go-sqlite3) - - [modernc.org/sqlite](https://gitlab.com/cznic/sqlite) (windows unsupported) - -* MsSql - - [github.com/denisenkom/go-mssqldb](https://github.com/denisenkom/go-mssqldb) - -* Oracle - - [github.com/godror/godror](https://github.com/godror/godror) (experiment) - - [github.com/mattn/go-oci8](https://github.com/mattn/go-oci8) (experiment) - -## Installation - - go get xorm.io/xorm - -## Documents - -* [Manual](http://xorm.io/docs) - -* [GoDoc](http://pkg.go.dev/xorm.io/xorm) - -## Quick Start - -* Create Engine - -Firstly, we should new an engine for a database. - -```Go -engine, err := xorm.NewEngine(driverName, dataSourceName) -``` - -* Define a struct and Sync table struct to database - -```Go -type User struct { - Id int64 - Name string - Salt string - Age int - Passwd string `xorm:"varchar(200)"` - Created time.Time `xorm:"created"` - Updated time.Time `xorm:"updated"` -} - -err := engine.Sync(new(User)) -``` - -* Create Engine Group - -```Go -dataSourceNameSlice := []string{masterDataSourceName, slave1DataSourceName, slave2DataSourceName} -engineGroup, err := xorm.NewEngineGroup(driverName, dataSourceNameSlice) -``` - -```Go -masterEngine, err := xorm.NewEngine(driverName, masterDataSourceName) -slave1Engine, err := xorm.NewEngine(driverName, slave1DataSourceName) -slave2Engine, err := xorm.NewEngine(driverName, slave2DataSourceName) -engineGroup, err := xorm.NewEngineGroup(masterEngine, []*Engine{slave1Engine, slave2Engine}) -``` - -Then all place where `engine` you can just use `engineGroup`. - -* `Query` runs a SQL string, the returned results is `[]map[string][]byte`, `QueryString` returns `[]map[string]string`, `QueryInterface` returns `[]map[string]interface{}`. - -```Go -results, err := engine.Query("select * from user") -results, err := engine.Where("a = 1").Query() - -results, err := engine.QueryString("select * from user") -results, err := engine.Where("a = 1").QueryString() - -results, err := engine.QueryInterface("select * from user") -results, err := engine.Where("a = 1").QueryInterface() -``` - -* `Exec` runs a SQL string, it returns `affected` and `error` - -```Go -affected, err := engine.Exec("update user set age = ? where name = ?", age, name) -``` - -* `Insert` one or multiple records to database - -```Go -affected, err := engine.Insert(&user) -// INSERT INTO struct () values () - -affected, err := engine.Insert(&user1, &user2) -// INSERT INTO struct1 () values () -// INSERT INTO struct2 () values () - -affected, err := engine.Insert(&users) -// INSERT INTO struct () values (),(),() - -affected, err := engine.Insert(&user1, &users) -// INSERT INTO struct1 () values () -// INSERT INTO struct2 () values (),(),() - -affected, err := engine.Table("user").Insert(map[string]interface{}{ - "name": "lunny", - "age": 18, -}) -// INSERT INTO user (name, age) values (?,?) - -affected, err := engine.Table("user").Insert([]map[string]interface{}{ - { - "name": "lunny", - "age": 18, - }, - { - "name": "lunny2", - "age": 19, - }, -}) -// INSERT INTO user (name, age) values (?,?),(?,?) -``` - -* `Get` query one record from database - -```Go -has, err := engine.Get(&user) -// SELECT * FROM user LIMIT 1 - -has, err := engine.Where("name = ?", name).Desc("id").Get(&user) -// SELECT * FROM user WHERE name = ? ORDER BY id DESC LIMIT 1 - -var name string -has, err := engine.Table(&user).Where("id = ?", id).Cols("name").Get(&name) -// SELECT name FROM user WHERE id = ? - -var id int64 -has, err := engine.Table(&user).Where("name = ?", name).Cols("id").Get(&id) -has, err := engine.SQL("select id from user").Get(&id) -// SELECT id FROM user WHERE name = ? - -var id int64 -var name string -has, err := engine.Table(&user).Cols("id", "name").Get(&id, &name) -// SELECT id, name FROM user LIMIT 1 - -var valuesMap = make(map[string]string) -has, err := engine.Table(&user).Where("id = ?", id).Get(&valuesMap) -// SELECT * FROM user WHERE id = ? - -var valuesSlice = make([]interface{}, len(cols)) -has, err := engine.Table(&user).Where("id = ?", id).Cols(cols...).Get(&valuesSlice) -// SELECT col1, col2, col3 FROM user WHERE id = ? -``` - -* `Exist` check if one record exist on table - -```Go -has, err := testEngine.Exist(new(RecordExist)) -// SELECT * FROM record_exist LIMIT 1 - -has, err = testEngine.Exist(&RecordExist{ - Name: "test1", - }) -// SELECT * FROM record_exist WHERE name = ? LIMIT 1 - -has, err = testEngine.Where("name = ?", "test1").Exist(&RecordExist{}) -// SELECT * FROM record_exist WHERE name = ? LIMIT 1 - -has, err = testEngine.SQL("select * from record_exist where name = ?", "test1").Exist() -// select * from record_exist where name = ? - -has, err = testEngine.Table("record_exist").Exist() -// SELECT * FROM record_exist LIMIT 1 - -has, err = testEngine.Table("record_exist").Where("name = ?", "test1").Exist() -// SELECT * FROM record_exist WHERE name = ? LIMIT 1 -``` - -* `Find` query multiple records from database, also you can use join and extends - -```Go -var users []User -err := engine.Where("name = ?", name).And("age > 10").Limit(10, 0).Find(&users) -// SELECT * FROM user WHERE name = ? AND age > 10 limit 10 offset 0 - -type Detail struct { - Id int64 - UserId int64 `xorm:"index"` -} - -type UserDetail struct { - User `xorm:"extends"` - Detail `xorm:"extends"` -} - -var users []UserDetail -err := engine.Table("user").Select("user.*, detail.*"). - Join("INNER", "detail", "detail.user_id = user.id"). - Where("user.name = ?", name).Limit(10, 0). - Find(&users) -// SELECT user.*, detail.* FROM user INNER JOIN detail WHERE user.name = ? limit 10 offset 0 -``` - -* `Iterate` and `Rows` query multiple records and record by record handle, there are two methods Iterate and Rows - -```Go -err := engine.Iterate(&User{Name:name}, func(idx int, bean interface{}) error { - user := bean.(*User) - return nil -}) -// SELECT * FROM user - -err := engine.BufferSize(100).Iterate(&User{Name:name}, func(idx int, bean interface{}) error { - user := bean.(*User) - return nil -}) -// SELECT * FROM user Limit 0, 100 -// SELECT * FROM user Limit 101, 100 -``` - -You can use rows which is similiar with `sql.Rows` - -```Go -rows, err := engine.Rows(&User{Name:name}) -// SELECT * FROM user -defer rows.Close() -bean := new(Struct) -for rows.Next() { - err = rows.Scan(bean) -} -``` - -or - -```Go -rows, err := engine.Cols("name", "age").Rows(&User{Name:name}) -// SELECT * FROM user -defer rows.Close() -for rows.Next() { - var name string - var age int - err = rows.Scan(&name, &age) -} -``` - -* `Update` update one or more records, default will update non-empty and non-zero fields except when you use Cols, AllCols and so on. - -```Go -affected, err := engine.ID(1).Update(&user) -// UPDATE user SET ... WHERE id = ? - -affected, err := engine.Update(&user, &User{Name:name}) -// UPDATE user SET ... WHERE name = ? - -var ids = []int64{1, 2, 3} -affected, err := engine.In("id", ids).Update(&user) -// UPDATE user SET ... WHERE id IN (?, ?, ?) - -// force update indicated columns by Cols -affected, err := engine.ID(1).Cols("age").Update(&User{Name:name, Age: 12}) -// UPDATE user SET age = ?, updated=? WHERE id = ? - -// force NOT update indicated columns by Omit -affected, err := engine.ID(1).Omit("name").Update(&User{Name:name, Age: 12}) -// UPDATE user SET age = ?, updated=? WHERE id = ? - -affected, err := engine.ID(1).AllCols().Update(&user) -// UPDATE user SET name=?,age=?,salt=?,passwd=?,updated=? WHERE id = ? -``` - -* `Delete` delete one or more records, Delete MUST have condition - -```Go -affected, err := engine.Where(...).Delete(&user) -// DELETE FROM user WHERE ... - -affected, err := engine.ID(2).Delete(&user) -// DELETE FROM user WHERE id = ? - -affected, err := engine.Table("user").Where(...).Delete() -// DELETE FROM user WHERE ... -``` - -* `Count` count records - -```Go -counts, err := engine.Count(&user) -// SELECT count(*) AS total FROM user -``` - -* `FindAndCount` combines function `Find` with `Count` which is usually used in query by page - -```Go -var users []User -counts, err := engine.FindAndCount(&users) -``` - -* `Sum` sum functions - -```Go -agesFloat64, err := engine.Sum(&user, "age") -// SELECT sum(age) AS total FROM user - -agesInt64, err := engine.SumInt(&user, "age") -// SELECT sum(age) AS total FROM user - -sumFloat64Slice, err := engine.Sums(&user, "age", "score") -// SELECT sum(age), sum(score) FROM user - -sumInt64Slice, err := engine.SumsInt(&user, "age", "score") -// SELECT sum(age), sum(score) FROM user -``` - -* Query conditions builder - -```Go -err := engine.Where(builder.NotIn("a", 1, 2).And(builder.In("b", "c", "d", "e"))).Find(&users) -// SELECT id, name ... FROM user WHERE a NOT IN (?, ?) AND b IN (?, ?, ?) -``` - -* Multiple operations in one go routine, no transaction here but resue session memory - -```Go -session := engine.NewSession() -defer session.Close() - -user1 := Userinfo{Username: "xiaoxiao", Departname: "dev", Alias: "lunny", Created: time.Now()} -if _, err := session.Insert(&user1); err != nil { - return err -} - -user2 := Userinfo{Username: "yyy"} -if _, err := session.Where("id = ?", 2).Update(&user2); err != nil { - return err -} - -if _, err := session.Exec("delete from userinfo where username = ?", user2.Username); err != nil { - return err -} - -return nil -``` - -* Transaction should be on one go routine. There is transaction and resue session memory - -```Go -session := engine.NewSession() -defer session.Close() - -// add Begin() before any action -if err := session.Begin(); err != nil { - // if returned then will rollback automatically - return err -} - -user1 := Userinfo{Username: "xiaoxiao", Departname: "dev", Alias: "lunny", Created: time.Now()} -if _, err := session.Insert(&user1); err != nil { - return err -} - -user2 := Userinfo{Username: "yyy"} -if _, err := session.Where("id = ?", 2).Update(&user2); err != nil { - return err -} - -if _, err := session.Exec("delete from userinfo where username = ?", user2.Username); err != nil { - return err -} - -// add Commit() after all actions -return session.Commit() -``` - -* Or you can use `Transaction` to replace above codes. - -```Go -res, err := engine.Transaction(func(session *xorm.Session) (interface{}, error) { - user1 := Userinfo{Username: "xiaoxiao", Departname: "dev", Alias: "lunny", Created: time.Now()} - if _, err := session.Insert(&user1); err != nil { - return nil, err - } - - user2 := Userinfo{Username: "yyy"} - if _, err := session.Where("id = ?", 2).Update(&user2); err != nil { - return nil, err - } - - if _, err := session.Exec("delete from userinfo where username = ?", user2.Username); err != nil { - return nil, err - } - return nil, nil -}) -``` - -* Context Cache, if enabled, current query result will be cached on session and be used by next same statement on the same session. - -```Go - sess := engine.NewSession() - defer sess.Close() - - var context = xorm.NewMemoryContextCache() - - var c2 ContextGetStruct - has, err := sess.ID(1).ContextCache(context).Get(&c2) - assert.NoError(t, err) - assert.True(t, has) - assert.EqualValues(t, 1, c2.Id) - assert.EqualValues(t, "1", c2.Name) - sql, args := sess.LastSQL() - assert.True(t, len(sql) > 0) - assert.True(t, len(args) > 0) - - var c3 ContextGetStruct - has, err = sess.ID(1).ContextCache(context).Get(&c3) - assert.NoError(t, err) - assert.True(t, has) - assert.EqualValues(t, 1, c3.Id) - assert.EqualValues(t, "1", c3.Name) - sql, args = sess.LastSQL() - assert.True(t, len(sql) == 0) - assert.True(t, len(args) == 0) -``` - -## Contributing - -If you want to pull request, please see [CONTRIBUTING](https://gitea.com/xorm/xorm/src/branch/master/CONTRIBUTING.md). And you can also go to [Xorm on discourse](https://xorm.discourse.group) to discuss. - -## Credits - -### Contributors - -This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)]. - - -### Backers - -Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/xorm#backer)] - - - -### Sponsors - -Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/xorm#sponsor)] - -## Changelog - -You can find all the changelog [here](CHANGELOG.md) - -## Cases - -* [studygolang](http://studygolang.com/) - [github.com/studygolang/studygolang](https://github.com/studygolang/studygolang) - -* [Gitea](http://gitea.io) - [github.com/go-gitea/gitea](http://github.com/go-gitea/gitea) - -* [Gogs](http://try.gogits.org) - [github.com/gogits/gogs](http://github.com/gogits/gogs) - -* [grafana](https://grafana.com/) - [github.com/grafana/grafana](http://github.com/grafana/grafana) - -* [github.com/m3ng9i/qreader](https://github.com/m3ng9i/qreader) - -* [Wego](http://github.com/go-tango/wego) - -* [Docker.cn](https://docker.cn/) - -* [Xorm Adapter](https://github.com/casbin/xorm-adapter) for [Casbin](https://github.com/casbin/casbin) - [github.com/casbin/xorm-adapter](https://github.com/casbin/xorm-adapter) - -* [Gorevel](http://gorevel.cn/) - [github.com/goofcc/gorevel](http://github.com/goofcc/gorevel) - -* [Gowalker](http://gowalker.org) - [github.com/Unknwon/gowalker](http://github.com/Unknwon/gowalker) - -* [Gobuild.io](http://gobuild.io) - [github.com/shxsun/gobuild](http://github.com/shxsun/gobuild) - -* [Sudo China](http://sudochina.com) - [github.com/insionng/toropress](http://github.com/insionng/toropress) - -* [Godaily](http://godaily.org) - [github.com/govc/godaily](http://github.com/govc/godaily) - -* [YouGam](http://www.yougam.com/) - -* [GoCMS - github.com/zzboy/GoCMS](https://github.com/zzdboy/GoCMS) - -* [GoBBS - gobbs.domolo.com](http://gobbs.domolo.com/) - -* [go-blog](http://wangcheng.me) - [github.com/easykoo/go-blog](https://github.com/easykoo/go-blog) - -## LICENSE - -BSD License [http://creativecommons.org/licenses/BSD/](http://creativecommons.org/licenses/BSD/) diff --git a/vendor/xorm.io/xorm/README_CN.md b/vendor/xorm.io/xorm/README_CN.md deleted file mode 100644 index a5aaae66..00000000 --- a/vendor/xorm.io/xorm/README_CN.md +++ /dev/null @@ -1,520 +0,0 @@ -# xorm - -[English](https://gitea.com/xorm/xorm/src/branch/master/README.md) - -xorm 是一个简单而强大的Go语言ORM库. 通过它可以使数据库操作非常简便。 - -[![Build Status](https://drone.gitea.com/api/badges/xorm/xorm/status.svg)](https://drone.gitea.com/xorm/xorm) [![](http://gocover.io/_badge/xorm.io/xorm)](https://gocover.io/xorm.io/xorm) [![](https://goreportcard.com/badge/xorm.io/xorm)](https://goreportcard.com/report/xorm.io/xorm) [![Join the chat at https://img.shields.io/discord/323460943201959939.svg](https://img.shields.io/discord/323460943201959939.svg)](https://discord.gg/HuR2CF3) - -## Notice - -v1.0.0 相对于 v0.8.2 有以下不兼容的变更: - -- 移除了部分不符合Go语言命名的函数,如 `Id`, `Sql`,请使用 `ID`, `SQL` 替代。 -- 删除了对 `xorm.io/core` 的依赖。大部分代码迁移到了 `xorm.io/xorm/core`, `xorm.io/xorm/names`, `xorm.io/xorm/schemas` 等等几个包中. -- 重命名了几个结构体,如: `core.IMapper` -> `names.Mapper`, `core.ILogger` -> `log.Logger`. - -## 特性 - -* 支持 Struct 和数据库表之间的灵活映射,并支持自动同步 -* 事务支持 -* 同时支持原始SQL语句和ORM操作的混合执行 -* 使用连写来简化调用 -* 支持使用ID, In, Where, Limit, Join, Having, Table, SQL, Cols等函数和结构体等方式作为条件 -* 支持级联加载Struct -* Schema支持(仅Postgres) -* 支持缓存 -* 通过 [xorm.io/reverse](https://xorm.io/reverse) 支持根据数据库自动生成 xorm 结构体 -* 支持记录版本(即乐观锁) -* 通过 [xorm.io/builder](https://xorm.io/builder) 内置 SQL Builder 支持 -* 上下文缓存支持 -* 支持日志上下文 - -## 驱动支持 - -目前支持的Go数据库驱动和对应的数据库如下: - -* [Mysql5.*](https://github.com/mysql/mysql-server/tree/5.7) / [Mysql8.*](https://github.com/mysql/mysql-server) / [Mariadb](https://github.com/MariaDB/server) / [Tidb](https://github.com/pingcap/tidb) - - [github.com/go-sql-driver/mysql](https://github.com/go-sql-driver/mysql) - - [github.com/ziutek/mymysql/godrv](https://github.com/ziutek/mymysql/godrv) - -* [Postgres](https://github.com/postgres/postgres) / [Cockroach](https://github.com/cockroachdb/cockroach) - - [github.com/lib/pq](https://github.com/lib/pq) - - [github.com/jackc/pgx](https://github.com/jackc/pgx) - -* [SQLite](https://sqlite.org) - - [github.com/mattn/go-sqlite3](https://github.com/mattn/go-sqlite3) - - [modernc.org/sqlite](https://gitlab.com/cznic/sqlite) (Windows试验性支持) - -* MsSql - - [github.com/denisenkom/go-mssqldb](https://github.com/denisenkom/go-mssqldb) - -* Oracle - - [github.com/godror/godror](https://github.com/godror/godror) (试验性支持) - - [github.com/mattn/go-oci8](https://github.com/mattn/go-oci8) (试验性支持) - -## 安装 - - go get xorm.io/xorm - -## 文档 - -* [操作指南](http://xorm.io/docs) - -* [Godoc代码文档](http://pkg.go.dev/xorm.io/xorm) - -# 快速开始 - -* 第一步创建引擎,`driverName`, `dataSourceName` 和 `database/sql` 接口相同 - -```Go -engine, err := xorm.NewEngine(driverName, dataSourceName) -``` - -* 定义一个和表同步的结构体,并且自动同步结构体到数据库 - -```Go -type User struct { - Id int64 - Name string - Salt string - Age int - Passwd string `xorm:"varchar(200)"` - Created time.Time `xorm:"created"` - Updated time.Time `xorm:"updated"` -} - -err := engine.Sync(new(User)) -``` - -* 创建Engine组 - -```Go -dataSourceNameSlice := []string{masterDataSourceName, slave1DataSourceName, slave2DataSourceName} -engineGroup, err := xorm.NewEngineGroup(driverName, dataSourceNameSlice) -``` - -```Go -masterEngine, err := xorm.NewEngine(driverName, masterDataSourceName) -slave1Engine, err := xorm.NewEngine(driverName, slave1DataSourceName) -slave2Engine, err := xorm.NewEngine(driverName, slave2DataSourceName) -engineGroup, err := xorm.NewEngineGroup(masterEngine, []*Engine{slave1Engine, slave2Engine}) -``` - -所有使用 `engine` 都可以简单的用 `engineGroup` 来替换。 - -* `Query` 最原始的也支持SQL语句查询,返回的结果类型为 `[]map[string][]byte`。`QueryString` 返回 `[]map[string]string`, `QueryInterface` 返回 `[]map[string]interface{}`. - -```Go -results, err := engine.Query("select * from user") -results, err := engine.Where("a = 1").Query() - -results, err := engine.QueryString("select * from user") -results, err := engine.Where("a = 1").QueryString() - -results, err := engine.QueryInterface("select * from user") -results, err := engine.Where("a = 1").QueryInterface() -``` - -* `Exec` 执行一个SQL语句 - -```Go -affected, err := engine.Exec("update user set age = ? where name = ?", age, name) -``` - -* `Insert` 插入一条或者多条记录 - -```Go -affected, err := engine.Insert(&user) -// INSERT INTO struct () values () - -affected, err := engine.Insert(&user1, &user2) -// INSERT INTO struct1 () values () -// INSERT INTO struct2 () values () - -affected, err := engine.Insert(&users) -// INSERT INTO struct () values (),(),() - -affected, err := engine.Insert(&user1, &users) -// INSERT INTO struct1 () values () -// INSERT INTO struct2 () values (),(),() - -affected, err := engine.Table("user").Insert(map[string]interface{}{ - "name": "lunny", - "age": 18, -}) -// INSERT INTO user (name, age) values (?,?) - -affected, err := engine.Table("user").Insert([]map[string]interface{}{ - { - "name": "lunny", - "age": 18, - }, - { - "name": "lunny2", - "age": 19, - }, -}) -// INSERT INTO user (name, age) values (?,?),(?,?) -``` - -* `Get` 查询单条记录 - -```Go -has, err := engine.Get(&user) -// SELECT * FROM user LIMIT 1 - -has, err := engine.Where("name = ?", name).Desc("id").Get(&user) -// SELECT * FROM user WHERE name = ? ORDER BY id DESC LIMIT 1 - -var name string -has, err := engine.Table(&user).Where("id = ?", id).Cols("name").Get(&name) -// SELECT name FROM user WHERE id = ? - -var id int64 -has, err := engine.Table(&user).Where("name = ?", name).Cols("id").Get(&id) -has, err := engine.SQL("select id from user").Get(&id) -// SELECT id FROM user WHERE name = ? - -var id int64 -var name string -has, err := engine.Table(&user).Cols("id", "name").Get(&id, &name) -// SELECT id, name FROM user LIMIT 1 - -var valuesMap = make(map[string]string) -has, err := engine.Table(&user).Where("id = ?", id).Get(&valuesMap) -// SELECT * FROM user WHERE id = ? - -var valuesSlice = make([]interface{}, len(cols)) -has, err := engine.Table(&user).Where("id = ?", id).Cols(cols...).Get(&valuesSlice) -// SELECT col1, col2, col3 FROM user WHERE id = ? -``` - -* `Exist` 检测记录是否存在 - -```Go -has, err := testEngine.Exist(new(RecordExist)) -// SELECT * FROM record_exist LIMIT 1 - -has, err = testEngine.Exist(&RecordExist{ - Name: "test1", - }) -// SELECT * FROM record_exist WHERE name = ? LIMIT 1 - -has, err = testEngine.Where("name = ?", "test1").Exist(&RecordExist{}) -// SELECT * FROM record_exist WHERE name = ? LIMIT 1 - -has, err = testEngine.SQL("select * from record_exist where name = ?", "test1").Exist() -// select * from record_exist where name = ? - -has, err = testEngine.Table("record_exist").Exist() -// SELECT * FROM record_exist LIMIT 1 - -has, err = testEngine.Table("record_exist").Where("name = ?", "test1").Exist() -// SELECT * FROM record_exist WHERE name = ? LIMIT 1 -``` - -* `Find` 查询多条记录,当然可以使用Join和extends来组合使用 - -```Go -var users []User -err := engine.Where("name = ?", name).And("age > 10").Limit(10, 0).Find(&users) -// SELECT * FROM user WHERE name = ? AND age > 10 limit 10 offset 0 - -type Detail struct { - Id int64 - UserId int64 `xorm:"index"` -} - -type UserDetail struct { - User `xorm:"extends"` - Detail `xorm:"extends"` -} - -var users []UserDetail -err := engine.Table("user").Select("user.*, detail.*"). - Join("INNER", "detail", "detail.user_id = user.id"). - Where("user.name = ?", name).Limit(10, 0). - Find(&users) -// SELECT user.*, detail.* FROM user INNER JOIN detail WHERE user.name = ? limit 10 offset 0 -``` - -* `Iterate` 和 `Rows` 根据条件遍历数据库,可以有两种方式: Iterate and Rows - -```Go -err := engine.Iterate(&User{Name:name}, func(idx int, bean interface{}) error { - user := bean.(*User) - return nil -}) -// SELECT * FROM user - -err := engine.BufferSize(100).Iterate(&User{Name:name}, func(idx int, bean interface{}) error { - user := bean.(*User) - return nil -}) -// SELECT * FROM user Limit 0, 100 -// SELECT * FROM user Limit 101, 100 -``` - -Rows 的用法类似 `sql.Rows`。 - -```Go -rows, err := engine.Rows(&User{Name:name}) -// SELECT * FROM user -defer rows.Close() -bean := new(Struct) -for rows.Next() { - err = rows.Scan(bean) -} -``` - -或者 - -```Go -rows, err := engine.Cols("name", "age").Rows(&User{Name:name}) -// SELECT * FROM user -defer rows.Close() -for rows.Next() { - var name string - var age int - err = rows.Scan(&name, &age) -} -``` - -* `Update` 更新数据,除非使用Cols,AllCols函数指明,默认只更新非空和非0的字段 - -```Go -affected, err := engine.ID(1).Update(&user) -// UPDATE user SET ... Where id = ? - -affected, err := engine.Update(&user, &User{Name:name}) -// UPDATE user SET ... Where name = ? - -var ids = []int64{1, 2, 3} -affected, err := engine.In(ids).Update(&user) -// UPDATE user SET ... Where id IN (?, ?, ?) - -// force update indicated columns by Cols -affected, err := engine.ID(1).Cols("age").Update(&User{Name:name, Age: 12}) -// UPDATE user SET age = ?, updated=? Where id = ? - -// force NOT update indicated columns by Omit -affected, err := engine.ID(1).Omit("name").Update(&User{Name:name, Age: 12}) -// UPDATE user SET age = ?, updated=? Where id = ? - -affected, err := engine.ID(1).AllCols().Update(&user) -// UPDATE user SET name=?,age=?,salt=?,passwd=?,updated=? Where id = ? -``` - -* `Delete` 删除记录,需要注意,删除必须至少有一个条件,否则会报错。要清空数据库可以用EmptyTable - -```Go -affected, err := engine.Where(...).Delete(&user) -// DELETE FROM user Where ... - -affected, err := engine.ID(2).Delete(&user) -// DELETE FROM user Where id = ? - -affected, err := engine.Table("user").Where(...).Delete() -// DELETE FROM user WHERE ... -``` - -* `Count` 获取记录条数 - -```Go -counts, err := engine.Count(&user) -// SELECT count(*) AS total FROM user -``` - -* `Sum` 求和函数 - -```Go -agesFloat64, err := engine.Sum(&user, "age") -// SELECT sum(age) AS total FROM user - -agesInt64, err := engine.SumInt(&user, "age") -// SELECT sum(age) AS total FROM user - -sumFloat64Slice, err := engine.Sums(&user, "age", "score") -// SELECT sum(age), sum(score) FROM user - -sumInt64Slice, err := engine.SumsInt(&user, "age", "score") -// SELECT sum(age), sum(score) FROM user -``` - -* 条件编辑器 - -```Go -err := engine.Where(builder.NotIn("a", 1, 2).And(builder.In("b", "c", "d", "e"))).Find(&users) -// SELECT id, name ... FROM user WHERE a NOT IN (?, ?) AND b IN (?, ?, ?) -``` - -* 在一个Go程中多次操作数据库,但没有事务 - -```Go -session := engine.NewSession() -defer session.Close() - -user1 := Userinfo{Username: "xiaoxiao", Departname: "dev", Alias: "lunny", Created: time.Now()} -if _, err := session.Insert(&user1); err != nil { - return err -} - -user2 := Userinfo{Username: "yyy"} -if _, err := session.Where("id = ?", 2).Update(&user2); err != nil { - return err -} - -if _, err := session.Exec("delete from userinfo where username = ?", user2.Username); err != nil { - return err -} - -return nil -``` - -* 在一个Go程中有事务 - -```Go -session := engine.NewSession() -defer session.Close() - -// add Begin() before any action -if err := session.Begin(); err != nil { - // if returned then will rollback automatically - return err -} - -user1 := Userinfo{Username: "xiaoxiao", Departname: "dev", Alias: "lunny", Created: time.Now()} -if _, err := session.Insert(&user1); err != nil { - return err -} - -user2 := Userinfo{Username: "yyy"} -if _, err := session.Where("id = ?", 2).Update(&user2); err != nil { - return err -} - -if _, err := session.Exec("delete from userinfo where username = ?", user2.Username); err != nil { - return err -} - -// add Commit() after all actions -return session.Commit() -``` - -* 事务的简写方法 - -```Go -res, err := engine.Transaction(func(session *xorm.Session) (interface{}, error) { - user1 := Userinfo{Username: "xiaoxiao", Departname: "dev", Alias: "lunny", Created: time.Now()} - if _, err := session.Insert(&user1); err != nil { - return nil, err - } - - user2 := Userinfo{Username: "yyy"} - if _, err := session.Where("id = ?", 2).Update(&user2); err != nil { - return nil, err - } - - if _, err := session.Exec("delete from userinfo where username = ?", user2.Username); err != nil { - return nil, err - } - return nil, nil -}) -``` - -* 上下文缓存,如果启用,那么针对单个对象的查询将会被缓存到系统中,可以被下一个查询使用。 - -```Go - sess := engine.NewSession() - defer sess.Close() - - var context = xorm.NewMemoryContextCache() - - var c2 ContextGetStruct - has, err := sess.ID(1).ContextCache(context).Get(&c2) - assert.NoError(t, err) - assert.True(t, has) - assert.EqualValues(t, 1, c2.Id) - assert.EqualValues(t, "1", c2.Name) - sql, args := sess.LastSQL() - assert.True(t, len(sql) > 0) - assert.True(t, len(args) > 0) - - var c3 ContextGetStruct - has, err = sess.ID(1).ContextCache(context).Get(&c3) - assert.NoError(t, err) - assert.True(t, has) - assert.EqualValues(t, 1, c3.Id) - assert.EqualValues(t, "1", c3.Name) - sql, args = sess.LastSQL() - assert.True(t, len(sql) == 0) - assert.True(t, len(args) == 0) -``` - -## 贡献 - -如果您也想为Xorm贡献您的力量,请查看 [CONTRIBUTING](https://gitea.com/xorm/xorm/src/branch/master/CONTRIBUTING.md)。您也可以加入QQ群 技术帮助和讨论。 -群一:280360085 (已满) -群二:795010183 - -## Credits - -### Contributors - -感谢所有的贡献者. [[Contribute](CONTRIBUTING.md)]. - - -### Backers - -感谢我们所有的 backers! 🙏 [[成为 backer](https://opencollective.com/xorm#backer)] - - - -### Sponsors - -成为 sponsor 来支持 xorm。您的 logo 将会被显示并被链接到您的网站。 [[成为 sponsor](https://opencollective.com/xorm#sponsor)] - -# 案例 - -* [Gitea](http://gitea.io) - [github.com/go-gitea/gitea](http://github.com/go-gitea/gitea) - -* [Gogs](http://try.gogits.org) - [github.com/gogits/gogs](http://github.com/gogits/gogs) - -* [grafana](https://grafana.com/) - [github.com/grafana/grafana](http://github.com/grafana/grafana) - -* [Go语言中文网](http://studygolang.com/) - [github.com/studygolang/studygolang](https://github.com/studygolang/studygolang) - -* [github.com/m3ng9i/qreader](https://github.com/m3ng9i/qreader) - -* [Wego](http://github.com/go-tango/wego) - -* [Docker.cn](https://docker.cn/) - -* [Xorm Adapter](https://github.com/casbin/xorm-adapter) for [Casbin](https://github.com/casbin/casbin) - [github.com/casbin/xorm-adapter](https://github.com/casbin/xorm-adapter) - -* [Gowalker](http://gowalker.org) - [github.com/Unknwon/gowalker](http://github.com/Unknwon/gowalker) - -* [Gobuild.io](http://gobuild.io) - [github.com/shxsun/gobuild](http://github.com/shxsun/gobuild) - -* [Sudo China](http://sudochina.com) - [github.com/insionng/toropress](http://github.com/insionng/toropress) - -* [Godaily](http://godaily.org) - [github.com/govc/godaily](http://github.com/govc/godaily) - -* [YouGam](http://www.yougam.com/) - -* [GoCMS - github.com/zzboy/GoCMS](https://github.com/zzdboy/GoCMS) - -* [GoBBS - gobbs.domolo.com](http://gobbs.domolo.com/) - -* [go-blog](http://wangcheng.me) - [github.com/easykoo/go-blog](https://github.com/easykoo/go-blog) - - -## 更新日志 - -请访问 [CHANGELOG.md](CHANGELOG.md) 获得更新日志。 - -## LICENSE - -BSD License -[http://creativecommons.org/licenses/BSD/](http://creativecommons.org/licenses/BSD/) diff --git a/vendor/xorm.io/xorm/caches/cache.go b/vendor/xorm.io/xorm/caches/cache.go deleted file mode 100644 index 7b80eb88..00000000 --- a/vendor/xorm.io/xorm/caches/cache.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2019 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package caches - -import ( - "bytes" - "encoding/gob" - "errors" - "fmt" - "strings" - "time" - - "xorm.io/xorm/schemas" -) - -const ( - // CacheExpired is default cache expired time - CacheExpired = 60 * time.Minute - // CacheMaxMemory is not use now - CacheMaxMemory = 256 - // CacheGcInterval represents interval time to clear all expired nodes - CacheGcInterval = 10 * time.Minute - // CacheGcMaxRemoved represents max nodes removed when gc - CacheGcMaxRemoved = 20 -) - -// list all the errors -var ( - ErrCacheMiss = errors.New("xorm/cache: key not found") - ErrNotStored = errors.New("xorm/cache: not stored") - // ErrNotExist record does not exist error - ErrNotExist = errors.New("Record does not exist") -) - -// CacheStore is a interface to store cache -type CacheStore interface { - // key is primary key or composite primary key - // value is struct's pointer - // key format : -p--... - Put(key string, value interface{}) error - Get(key string) (interface{}, error) - Del(key string) error -} - -// Cacher is an interface to provide cache -// id format : u--... -type Cacher interface { - GetIds(tableName, sql string) interface{} - GetBean(tableName string, id string) interface{} - PutIds(tableName, sql string, ids interface{}) - PutBean(tableName string, id string, obj interface{}) - DelIds(tableName, sql string) - DelBean(tableName string, id string) - ClearIds(tableName string) - ClearBeans(tableName string) -} - -func encodeIds(ids []schemas.PK) (string, error) { - buf := new(bytes.Buffer) - enc := gob.NewEncoder(buf) - err := enc.Encode(ids) - - return buf.String(), err -} - -func decodeIds(s string) ([]schemas.PK, error) { - pks := make([]schemas.PK, 0) - - dec := gob.NewDecoder(strings.NewReader(s)) - err := dec.Decode(&pks) - - return pks, err -} - -// GetCacheSql returns cacher PKs via SQL -func GetCacheSql(m Cacher, tableName, sql string, args interface{}) ([]schemas.PK, error) { - bytes := m.GetIds(tableName, GenSqlKey(sql, args)) - if bytes == nil { - return nil, errors.New("Not Exist") - } - return decodeIds(bytes.(string)) -} - -// PutCacheSql puts cacher SQL and PKs -func PutCacheSql(m Cacher, ids []schemas.PK, tableName, sql string, args interface{}) error { - bytes, err := encodeIds(ids) - if err != nil { - return err - } - m.PutIds(tableName, GenSqlKey(sql, args), bytes) - return nil -} - -// GenSqlKey generates cache key -func GenSqlKey(sql string, args interface{}) string { - return fmt.Sprintf("%v-%v", sql, args) -} diff --git a/vendor/xorm.io/xorm/caches/encode.go b/vendor/xorm.io/xorm/caches/encode.go deleted file mode 100644 index 8659668c..00000000 --- a/vendor/xorm.io/xorm/caches/encode.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2020 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package caches - -import ( - "bytes" - "crypto/md5" - "encoding/gob" - "encoding/json" - "fmt" - "io" -) - -// Md5 return md5 hash string -func Md5(str string) string { - m := md5.New() - _, _ = io.WriteString(m, str) - return fmt.Sprintf("%x", m.Sum(nil)) -} - -// Encode Encode data -func Encode(data interface{}) ([]byte, error) { - // return JsonEncode(data) - return GobEncode(data) -} - -// Decode decode data -func Decode(data []byte, to interface{}) error { - // return JsonDecode(data, to) - return GobDecode(data, to) -} - -// GobEncode encode data with gob -func GobEncode(data interface{}) ([]byte, error) { - var buf bytes.Buffer - enc := gob.NewEncoder(&buf) - err := enc.Encode(&data) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// GobDecode decode data with gob -func GobDecode(data []byte, to interface{}) error { - buf := bytes.NewBuffer(data) - dec := gob.NewDecoder(buf) - return dec.Decode(to) -} - -// JsonEncode encode data with json -func JsonEncode(data interface{}) ([]byte, error) { - val, err := json.Marshal(data) - if err != nil { - return nil, err - } - return val, nil -} - -// JsonDecode decode data with json -func JsonDecode(data []byte, to interface{}) error { - return json.Unmarshal(data, to) -} diff --git a/vendor/xorm.io/xorm/caches/leveldb.go b/vendor/xorm.io/xorm/caches/leveldb.go deleted file mode 100644 index f2f71d84..00000000 --- a/vendor/xorm.io/xorm/caches/leveldb.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2020 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package caches - -import ( - "log" - - "github.com/syndtr/goleveldb/leveldb" -) - -// LevelDBStore implements CacheStore provide local machine -type LevelDBStore struct { - store *leveldb.DB - Debug bool - v interface{} -} - -var _ CacheStore = &LevelDBStore{} - -// NewLevelDBStore creates a leveldb store -func NewLevelDBStore(dbfile string) (*LevelDBStore, error) { - db := &LevelDBStore{} - h, err := leveldb.OpenFile(dbfile, nil) - if err != nil { - return nil, err - } - db.store = h - return db, nil -} - -// Put implements CacheStore -func (s *LevelDBStore) Put(key string, value interface{}) error { - val, err := Encode(value) - if err != nil { - if s.Debug { - log.Println("[LevelDB]EncodeErr: ", err, "Key:", key) - } - return err - } - err = s.store.Put([]byte(key), val, nil) - if err != nil { - if s.Debug { - log.Println("[LevelDB]PutErr: ", err, "Key:", key) - } - return err - } - if s.Debug { - log.Println("[LevelDB]Put: ", key) - } - return err -} - -// Get implements CacheStore -func (s *LevelDBStore) Get(key string) (interface{}, error) { - data, err := s.store.Get([]byte(key), nil) - if err != nil { - if s.Debug { - log.Println("[LevelDB]GetErr: ", err, "Key:", key) - } - if err == leveldb.ErrNotFound { - return nil, ErrNotExist - } - return nil, err - } - - err = Decode(data, &s.v) - if err != nil { - if s.Debug { - log.Println("[LevelDB]DecodeErr: ", err, "Key:", key) - } - return nil, err - } - if s.Debug { - log.Println("[LevelDB]Get: ", key, s.v) - } - return s.v, err -} - -// Del implements CacheStore -func (s *LevelDBStore) Del(key string) error { - err := s.store.Delete([]byte(key), nil) - if err != nil { - if s.Debug { - log.Println("[LevelDB]DelErr: ", err, "Key:", key) - } - return err - } - if s.Debug { - log.Println("[LevelDB]Del: ", key) - } - return err -} - -// Close implements CacheStore -func (s *LevelDBStore) Close() { - s.store.Close() -} diff --git a/vendor/xorm.io/xorm/caches/lru.go b/vendor/xorm.io/xorm/caches/lru.go deleted file mode 100644 index 885f02d6..00000000 --- a/vendor/xorm.io/xorm/caches/lru.go +++ /dev/null @@ -1,278 +0,0 @@ -// Copyright 2015 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package caches - -import ( - "container/list" - "fmt" - "sync" - "time" -) - -// LRUCacher implments cache object facilities -type LRUCacher struct { - idList *list.List - sqlList *list.List - idIndex map[string]map[string]*list.Element - sqlIndex map[string]map[string]*list.Element - store CacheStore - mutex sync.Mutex - MaxElementSize int - Expired time.Duration - GcInterval time.Duration -} - -// NewLRUCacher creates a cacher -func NewLRUCacher(store CacheStore, maxElementSize int) *LRUCacher { - return NewLRUCacher2(store, 3600*time.Second, maxElementSize) -} - -// NewLRUCacher2 creates a cache include different params -func NewLRUCacher2(store CacheStore, expired time.Duration, maxElementSize int) *LRUCacher { - cacher := &LRUCacher{store: store, idList: list.New(), - sqlList: list.New(), Expired: expired, - GcInterval: CacheGcInterval, MaxElementSize: maxElementSize, - sqlIndex: make(map[string]map[string]*list.Element), - idIndex: make(map[string]map[string]*list.Element), - } - cacher.RunGC() - return cacher -} - -// RunGC run once every m.GcInterval -func (m *LRUCacher) RunGC() { - time.AfterFunc(m.GcInterval, func() { - m.RunGC() - m.GC() - }) -} - -// GC check ids lit and sql list to remove all element expired -func (m *LRUCacher) GC() { - m.mutex.Lock() - defer m.mutex.Unlock() - var removedNum int - for e := m.idList.Front(); e != nil; { - if removedNum <= CacheGcMaxRemoved && - time.Since(e.Value.(*idNode).lastVisit) > m.Expired { - removedNum++ - next := e.Next() - node := e.Value.(*idNode) - m.delBean(node.tbName, node.id) - e = next - } else { - break - } - } - - removedNum = 0 - for e := m.sqlList.Front(); e != nil; { - if removedNum <= CacheGcMaxRemoved && - time.Since(e.Value.(*sqlNode).lastVisit) > m.Expired { - removedNum++ - next := e.Next() - node := e.Value.(*sqlNode) - m.delIds(node.tbName, node.sql) - e = next - } else { - break - } - } -} - -// GetIds returns all bean's ids according to sql and parameter from cache -func (m *LRUCacher) GetIds(tableName, sql string) interface{} { - m.mutex.Lock() - defer m.mutex.Unlock() - if _, ok := m.sqlIndex[tableName]; !ok { - m.sqlIndex[tableName] = make(map[string]*list.Element) - } - if v, err := m.store.Get(sql); err == nil { - if el, ok := m.sqlIndex[tableName][sql]; !ok { - el = m.sqlList.PushBack(newSQLNode(tableName, sql)) - m.sqlIndex[tableName][sql] = el - } else { - lastTime := el.Value.(*sqlNode).lastVisit - // if expired, remove the node and return nil - if time.Since(lastTime) > m.Expired { - m.delIds(tableName, sql) - return nil - } - m.sqlList.MoveToBack(el) - el.Value.(*sqlNode).lastVisit = time.Now() - } - return v - } - - m.delIds(tableName, sql) - return nil -} - -// GetBean returns bean according tableName and id from cache -func (m *LRUCacher) GetBean(tableName string, id string) interface{} { - m.mutex.Lock() - defer m.mutex.Unlock() - if _, ok := m.idIndex[tableName]; !ok { - m.idIndex[tableName] = make(map[string]*list.Element) - } - tid := genID(tableName, id) - if v, err := m.store.Get(tid); err == nil { - if el, ok := m.idIndex[tableName][id]; ok { - lastTime := el.Value.(*idNode).lastVisit - // if expired, remove the node and return nil - if time.Since(lastTime) > m.Expired { - m.delBean(tableName, id) - return nil - } - m.idList.MoveToBack(el) - el.Value.(*idNode).lastVisit = time.Now() - } else { - el = m.idList.PushBack(newIDNode(tableName, id)) - m.idIndex[tableName][id] = el - } - return v - } - - // store bean is not exist, then remove memory's index - m.delBean(tableName, id) - return nil -} - -// clearIds clears all sql-ids mapping on table tableName from cache -func (m *LRUCacher) clearIds(tableName string) { - if tis, ok := m.sqlIndex[tableName]; ok { - for sql, v := range tis { - m.sqlList.Remove(v) - _ = m.store.Del(sql) - } - } - m.sqlIndex[tableName] = make(map[string]*list.Element) -} - -// ClearIds clears all sql-ids mapping on table tableName from cache -func (m *LRUCacher) ClearIds(tableName string) { - m.mutex.Lock() - m.clearIds(tableName) - m.mutex.Unlock() -} - -func (m *LRUCacher) clearBeans(tableName string) { - if tis, ok := m.idIndex[tableName]; ok { - for id, v := range tis { - m.idList.Remove(v) - tid := genID(tableName, id) - _ = m.store.Del(tid) - } - } - m.idIndex[tableName] = make(map[string]*list.Element) -} - -// ClearBeans clears all beans in some table -func (m *LRUCacher) ClearBeans(tableName string) { - m.mutex.Lock() - m.clearBeans(tableName) - m.mutex.Unlock() -} - -// PutIds pus ids into table -func (m *LRUCacher) PutIds(tableName, sql string, ids interface{}) { - m.mutex.Lock() - if _, ok := m.sqlIndex[tableName]; !ok { - m.sqlIndex[tableName] = make(map[string]*list.Element) - } - if el, ok := m.sqlIndex[tableName][sql]; !ok { - el = m.sqlList.PushBack(newSQLNode(tableName, sql)) - m.sqlIndex[tableName][sql] = el - } else { - el.Value.(*sqlNode).lastVisit = time.Now() - } - _ = m.store.Put(sql, ids) - if m.sqlList.Len() > m.MaxElementSize { - e := m.sqlList.Front() - node := e.Value.(*sqlNode) - m.delIds(node.tbName, node.sql) - } - m.mutex.Unlock() -} - -// PutBean puts beans into table -func (m *LRUCacher) PutBean(tableName string, id string, obj interface{}) { - m.mutex.Lock() - var el *list.Element - var ok bool - - if el, ok = m.idIndex[tableName][id]; !ok { - el = m.idList.PushBack(newIDNode(tableName, id)) - m.idIndex[tableName][id] = el - } else { - el.Value.(*idNode).lastVisit = time.Now() - } - - _ = m.store.Put(genID(tableName, id), obj) - if m.idList.Len() > m.MaxElementSize { - e := m.idList.Front() - node := e.Value.(*idNode) - m.delBean(node.tbName, node.id) - } - m.mutex.Unlock() -} - -func (m *LRUCacher) delIds(tableName, sql string) { - if _, ok := m.sqlIndex[tableName]; ok { - if el, ok := m.sqlIndex[tableName][sql]; ok { - delete(m.sqlIndex[tableName], sql) - m.sqlList.Remove(el) - } - } - _ = m.store.Del(sql) -} - -// DelIds deletes ids -func (m *LRUCacher) DelIds(tableName, sql string) { - m.mutex.Lock() - m.delIds(tableName, sql) - m.mutex.Unlock() -} - -func (m *LRUCacher) delBean(tableName string, id string) { - tid := genID(tableName, id) - if el, ok := m.idIndex[tableName][id]; ok { - delete(m.idIndex[tableName], id) - m.idList.Remove(el) - m.clearIds(tableName) - } - _ = m.store.Del(tid) -} - -// DelBean deletes beans in some table -func (m *LRUCacher) DelBean(tableName string, id string) { - m.mutex.Lock() - m.delBean(tableName, id) - m.mutex.Unlock() -} - -type idNode struct { - tbName string - id string - lastVisit time.Time -} - -type sqlNode struct { - tbName string - sql string - lastVisit time.Time -} - -func genID(prefix string, id string) string { - return fmt.Sprintf("%s-%s", prefix, id) -} - -func newIDNode(tbName string, id string) *idNode { - return &idNode{tbName, id, time.Now()} -} - -func newSQLNode(tbName, sql string) *sqlNode { - return &sqlNode{tbName, sql, time.Now()} -} diff --git a/vendor/xorm.io/xorm/caches/manager.go b/vendor/xorm.io/xorm/caches/manager.go deleted file mode 100644 index 89a14106..00000000 --- a/vendor/xorm.io/xorm/caches/manager.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2020 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package caches - -import "sync" - -// Manager represents a cache manager -type Manager struct { - cacher Cacher - disableGlobalCache bool - - cachers map[string]Cacher - cacherLock sync.RWMutex -} - -// NewManager creates a cache manager -func NewManager() *Manager { - return &Manager{ - cachers: make(map[string]Cacher), - } -} - -// SetDisableGlobalCache disable global cache or not -func (mgr *Manager) SetDisableGlobalCache(disable bool) { - if mgr.disableGlobalCache != disable { - mgr.disableGlobalCache = disable - } -} - -// SetCacher set cacher of table -func (mgr *Manager) SetCacher(tableName string, cacher Cacher) { - mgr.cacherLock.Lock() - mgr.cachers[tableName] = cacher - mgr.cacherLock.Unlock() -} - -// GetCacher returns a cache of a table -func (mgr *Manager) GetCacher(tableName string) Cacher { - var cacher Cacher - var ok bool - mgr.cacherLock.RLock() - cacher, ok = mgr.cachers[tableName] - mgr.cacherLock.RUnlock() - if !ok && !mgr.disableGlobalCache { - cacher = mgr.cacher - } - return cacher -} - -// SetDefaultCacher set the default cacher. Xorm's default not enable cacher. -func (mgr *Manager) SetDefaultCacher(cacher Cacher) { - mgr.cacher = cacher -} - -// GetDefaultCacher returns the default cacher -func (mgr *Manager) GetDefaultCacher() Cacher { - return mgr.cacher -} diff --git a/vendor/xorm.io/xorm/caches/memory_store.go b/vendor/xorm.io/xorm/caches/memory_store.go deleted file mode 100644 index f16254d8..00000000 --- a/vendor/xorm.io/xorm/caches/memory_store.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2015 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package caches - -import ( - "sync" -) - -var _ CacheStore = NewMemoryStore() - -// MemoryStore represents in-memory store -type MemoryStore struct { - store map[interface{}]interface{} - mutex sync.RWMutex -} - -// NewMemoryStore creates a new store in memory -func NewMemoryStore() *MemoryStore { - return &MemoryStore{store: make(map[interface{}]interface{})} -} - -// Put puts object into store -func (s *MemoryStore) Put(key string, value interface{}) error { - s.mutex.Lock() - defer s.mutex.Unlock() - s.store[key] = value - return nil -} - -// Get gets object from store -func (s *MemoryStore) Get(key string) (interface{}, error) { - s.mutex.RLock() - defer s.mutex.RUnlock() - if v, ok := s.store[key]; ok { - return v, nil - } - - return nil, ErrNotExist -} - -// Del deletes object -func (s *MemoryStore) Del(key string) error { - s.mutex.Lock() - defer s.mutex.Unlock() - delete(s.store, key) - return nil -} diff --git a/vendor/xorm.io/xorm/contexts/context_cache.go b/vendor/xorm.io/xorm/contexts/context_cache.go deleted file mode 100644 index 0d0f0f02..00000000 --- a/vendor/xorm.io/xorm/contexts/context_cache.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2018 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package contexts - -// ContextCache is the interface that operates the cache data. -type ContextCache interface { - // Put puts value into cache with key. - Put(key string, val interface{}) - // Get gets cached value by given key. - Get(key string) interface{} -} - -type memoryContextCache map[string]interface{} - -// NewMemoryContextCache return memoryContextCache -func NewMemoryContextCache() memoryContextCache { - return make(map[string]interface{}) -} - -// Put puts value into cache with key. -func (m memoryContextCache) Put(key string, val interface{}) { - m[key] = val -} - -// Get gets cached value by given key. -func (m memoryContextCache) Get(key string) interface{} { - return m[key] -} diff --git a/vendor/xorm.io/xorm/contexts/hook.go b/vendor/xorm.io/xorm/contexts/hook.go deleted file mode 100644 index f6d86cfc..00000000 --- a/vendor/xorm.io/xorm/contexts/hook.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2020 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package contexts - -import ( - "context" - "database/sql" - "time" -) - -// ContextHook represents a hook context -type ContextHook struct { - start time.Time - Ctx context.Context - SQL string // log content or SQL - Args []interface{} // if it's a SQL, it's the arguments - Result sql.Result - ExecuteTime time.Duration - Err error // SQL executed error -} - -// NewContextHook return context for hook -func NewContextHook(ctx context.Context, sql string, args []interface{}) *ContextHook { - return &ContextHook{ - start: time.Now(), - Ctx: ctx, - SQL: sql, - Args: args, - } -} - -// End finish the hook invokation -func (c *ContextHook) End(ctx context.Context, result sql.Result, err error) { - c.Ctx = ctx - c.Result = result - c.Err = err - c.ExecuteTime = time.Since(c.start) -} - -// Hook represents a hook behaviour -type Hook interface { - BeforeProcess(c *ContextHook) (context.Context, error) - AfterProcess(c *ContextHook) error -} - -// Hooks implements Hook interface but contains multiple Hook -type Hooks struct { - hooks []Hook -} - -// AddHook adds a Hook -func (h *Hooks) AddHook(hooks ...Hook) { - h.hooks = append(h.hooks, hooks...) -} - -// BeforeProcess invoked before execute the process -func (h *Hooks) BeforeProcess(c *ContextHook) (context.Context, error) { - ctx := c.Ctx - for _, h := range h.hooks { - var err error - ctx, err = h.BeforeProcess(c) - if err != nil { - return nil, err - } - } - return ctx, nil -} - -// AfterProcess invoked after exetue the process -func (h *Hooks) AfterProcess(c *ContextHook) error { - firstErr := c.Err - for _, h := range h.hooks { - err := h.AfterProcess(c) - if err != nil && firstErr == nil { - firstErr = err - } - } - return firstErr -} diff --git a/vendor/xorm.io/xorm/convert/bool.go b/vendor/xorm.io/xorm/convert/bool.go deleted file mode 100644 index 58b23f4b..00000000 --- a/vendor/xorm.io/xorm/convert/bool.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2021 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package convert - -import ( - "database/sql" - "fmt" - "strconv" -) - -// AsBool convert interface as bool -func AsBool(src interface{}) (bool, error) { - switch v := src.(type) { - case bool: - return v, nil - case *bool: - return *v, nil - case *sql.NullBool: - return v.Bool, nil - case int64: - return v > 0, nil - case int: - return v > 0, nil - case int8: - return v > 0, nil - case int16: - return v > 0, nil - case int32: - return v > 0, nil - case []byte: - if len(v) == 0 { - return false, nil - } - if v[0] == 0x00 { - return false, nil - } else if v[0] == 0x01 { - return true, nil - } - return strconv.ParseBool(string(v)) - case string: - return strconv.ParseBool(v) - case *sql.NullInt64: - return v.Int64 > 0, nil - case *sql.NullInt32: - return v.Int32 > 0, nil - default: - return false, fmt.Errorf("unknow type %T as bool", src) - } -} diff --git a/vendor/xorm.io/xorm/convert/conversion.go b/vendor/xorm.io/xorm/convert/conversion.go deleted file mode 100644 index b69e345c..00000000 --- a/vendor/xorm.io/xorm/convert/conversion.go +++ /dev/null @@ -1,389 +0,0 @@ -// Copyright 2017 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package convert - -import ( - "database/sql" - "database/sql/driver" - "encoding/json" - "errors" - "fmt" - "math/big" - "reflect" - "strconv" - "time" -) - -// Conversion is an interface. A type implements Conversion will according -// the custom method to fill into database and retrieve from database. -type Conversion interface { - FromDB([]byte) error - ToDB() ([]byte, error) -} - -// ErrNilPtr represents an error -var ErrNilPtr = errors.New("destination pointer is nil") // embedded in descriptive error - -func strconvErr(err error) error { - if ne, ok := err.(*strconv.NumError); ok { - return ne.Err - } - return err -} - -func cloneBytes(b []byte) []byte { - if b == nil { - return nil - } - c := make([]byte, len(b)) - copy(c, b) - return c -} - -// Assign copies to dest the value in src, converting it if possible. -// An error is returned if the copy would result in loss of information. -// dest should be a pointer type. -func Assign(dest, src interface{}, originalLocation *time.Location, convertedLocation *time.Location) error { - // Common cases, without reflect. - switch s := src.(type) { - case *interface{}: - return Assign(dest, *s, originalLocation, convertedLocation) - case string: - switch d := dest.(type) { - case *string: - if d == nil { - return ErrNilPtr - } - *d = s - return nil - case *[]byte: - if d == nil { - return ErrNilPtr - } - *d = []byte(s) - return nil - } - case []byte: - switch d := dest.(type) { - case *string: - if d == nil { - return ErrNilPtr - } - *d = string(s) - return nil - case *interface{}: - if d == nil { - return ErrNilPtr - } - *d = cloneBytes(s) - return nil - case *[]byte: - if d == nil { - return ErrNilPtr - } - *d = cloneBytes(s) - return nil - } - case time.Time: - switch d := dest.(type) { - case *string: - *d = s.Format(time.RFC3339Nano) - return nil - case *[]byte: - if d == nil { - return ErrNilPtr - } - *d = []byte(s.Format(time.RFC3339Nano)) - return nil - } - case nil: - switch d := dest.(type) { - case *interface{}: - if d == nil { - return ErrNilPtr - } - *d = nil - return nil - case *[]byte: - if d == nil { - return ErrNilPtr - } - *d = nil - return nil - } - case *sql.NullString: - switch d := dest.(type) { - case *int: - if s.Valid { - *d, _ = strconv.Atoi(s.String) - } - return nil - case *int64: - if s.Valid { - *d, _ = strconv.ParseInt(s.String, 10, 64) - } - return nil - case *string: - if s.Valid { - *d = s.String - } - return nil - case *time.Time: - if s.Valid { - var err error - dt, err := String2Time(s.String, originalLocation, convertedLocation) - if err != nil { - return err - } - *d = *dt - } - return nil - case *sql.NullTime: - if s.Valid { - var err error - dt, err := String2Time(s.String, originalLocation, convertedLocation) - if err != nil { - return err - } - d.Valid = true - d.Time = *dt - } - return nil - case *big.Float: - if s.Valid { - if d == nil { - d = big.NewFloat(0) - } - d.SetString(s.String) - } - return nil - } - case *sql.NullInt32: - switch d := dest.(type) { - case *int: - if s.Valid { - *d = int(s.Int32) - } - return nil - case *int8: - if s.Valid { - *d = int8(s.Int32) - } - return nil - case *int16: - if s.Valid { - *d = int16(s.Int32) - } - return nil - case *int32: - if s.Valid { - *d = s.Int32 - } - return nil - case *int64: - if s.Valid { - *d = int64(s.Int32) - } - return nil - } - case *sql.NullInt64: - switch d := dest.(type) { - case *int: - if s.Valid { - *d = int(s.Int64) - } - return nil - case *int8: - if s.Valid { - *d = int8(s.Int64) - } - return nil - case *int16: - if s.Valid { - *d = int16(s.Int64) - } - return nil - case *int32: - if s.Valid { - *d = int32(s.Int64) - } - return nil - case *int64: - if s.Valid { - *d = s.Int64 - } - return nil - } - case *sql.NullFloat64: - switch d := dest.(type) { - case *int: - if s.Valid { - *d = int(s.Float64) - } - return nil - case *float64: - if s.Valid { - *d = s.Float64 - } - return nil - } - case *sql.NullBool: - switch d := dest.(type) { - case *bool: - if s.Valid { - *d = s.Bool - } - return nil - } - case *sql.NullTime: - switch d := dest.(type) { - case *time.Time: - if s.Valid { - *d = s.Time - } - return nil - case *string: - if s.Valid { - *d = s.Time.In(convertedLocation).Format("2006-01-02 15:04:05") - } - return nil - } - case *NullUint32: - switch d := dest.(type) { - case *uint8: - if s.Valid { - *d = uint8(s.Uint32) - } - return nil - case *uint16: - if s.Valid { - *d = uint16(s.Uint32) - } - return nil - case *uint: - if s.Valid { - *d = uint(s.Uint32) - } - return nil - } - case *NullUint64: - switch d := dest.(type) { - case *uint64: - if s.Valid { - *d = s.Uint64 - } - return nil - } - case *sql.RawBytes: - switch d := dest.(type) { - case Conversion: - return d.FromDB(*s) - } - } - - switch d := dest.(type) { - case *string: - var sv = reflect.ValueOf(src) - switch sv.Kind() { - case reflect.Bool, - reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, - reflect.Float32, reflect.Float64: - *d = AsString(src) - return nil - } - case *[]byte: - if b, ok := AsBytes(src); ok { - *d = b - return nil - } - case *bool: - bv, err := driver.Bool.ConvertValue(src) - if err == nil { - *d = bv.(bool) - } - return err - case *interface{}: - *d = src - return nil - } - - return AssignValue(reflect.ValueOf(dest), src) -} - -var ( - scannerTypePlaceHolder sql.Scanner - scannerType = reflect.TypeOf(&scannerTypePlaceHolder).Elem() -) - -// AssignValue assign src as dv -func AssignValue(dv reflect.Value, src interface{}) error { - if src == nil { - return nil - } - if v, ok := src.(*interface{}); ok { - return AssignValue(dv, *v) - } - - if dv.Type().Implements(scannerType) { - return dv.Interface().(sql.Scanner).Scan(src) - } - - switch dv.Kind() { - case reflect.Ptr: - if dv.IsNil() { - dv.Set(reflect.New(dv.Type().Elem())) - } - return AssignValue(dv.Elem(), src) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - i64, err := AsInt64(src) - if err != nil { - err = strconvErr(err) - return fmt.Errorf("converting driver.Value type %T to a %s: %v", src, dv.Kind(), err) - } - dv.SetInt(i64) - return nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - u64, err := AsUint64(src) - if err != nil { - err = strconvErr(err) - return fmt.Errorf("converting driver.Value type %T to a %s: %v", src, dv.Kind(), err) - } - dv.SetUint(u64) - return nil - case reflect.Float32, reflect.Float64: - f64, err := AsFloat64(src) - if err != nil { - err = strconvErr(err) - return fmt.Errorf("converting driver.Value type %T to a %s: %v", src, dv.Kind(), err) - } - dv.SetFloat(f64) - return nil - case reflect.String: - dv.SetString(AsString(src)) - return nil - case reflect.Bool: - b, err := AsBool(src) - if err != nil { - return err - } - dv.SetBool(b) - return nil - case reflect.Slice, reflect.Map, reflect.Struct, reflect.Array: - data, ok := AsBytes(src) - if !ok { - return fmt.Errorf("convert.AssignValue: src cannot be as bytes %#v", src) - } - if data == nil { - return nil - } - if dv.Kind() != reflect.Ptr { - dv = dv.Addr() - } - return json.Unmarshal(data, dv.Interface()) - default: - return fmt.Errorf("convert.AssignValue: unsupported Scan, storing driver.Value type %T into type %T", src, dv.Interface()) - } -} diff --git a/vendor/xorm.io/xorm/convert/float.go b/vendor/xorm.io/xorm/convert/float.go deleted file mode 100644 index 51b441ce..00000000 --- a/vendor/xorm.io/xorm/convert/float.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2021 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package convert - -import ( - "database/sql" - "fmt" - "math/big" - "reflect" - "strconv" -) - -// AsFloat64 convets interface as float64 -func AsFloat64(src interface{}) (float64, error) { - switch v := src.(type) { - case int: - return float64(v), nil - case int16: - return float64(v), nil - case int32: - return float64(v), nil - case int8: - return float64(v), nil - case int64: - return float64(v), nil - case uint: - return float64(v), nil - case uint8: - return float64(v), nil - case uint16: - return float64(v), nil - case uint32: - return float64(v), nil - case uint64: - return float64(v), nil - case []byte: - return strconv.ParseFloat(string(v), 64) - case string: - return strconv.ParseFloat(v, 64) - case *sql.NullString: - return strconv.ParseFloat(v.String, 64) - case *sql.NullInt32: - return float64(v.Int32), nil - case *sql.NullInt64: - return float64(v.Int64), nil - case *sql.NullFloat64: - return v.Float64, nil - } - - rv := reflect.ValueOf(src) - switch rv.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return float64(rv.Int()), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return float64(rv.Uint()), nil - case reflect.Float64, reflect.Float32: - return float64(rv.Float()), nil - case reflect.String: - return strconv.ParseFloat(rv.String(), 64) - } - return 0, fmt.Errorf("unsupported value %T as int64", src) -} - -// AsBigFloat converts interface as big.Float -func AsBigFloat(src interface{}) (*big.Float, error) { - res := big.NewFloat(0) - switch v := src.(type) { - case int: - res.SetInt64(int64(v)) - return res, nil - case int16: - res.SetInt64(int64(v)) - return res, nil - case int32: - res.SetInt64(int64(v)) - return res, nil - case int8: - res.SetInt64(int64(v)) - return res, nil - case int64: - res.SetInt64(int64(v)) - return res, nil - case uint: - res.SetUint64(uint64(v)) - return res, nil - case uint8: - res.SetUint64(uint64(v)) - return res, nil - case uint16: - res.SetUint64(uint64(v)) - return res, nil - case uint32: - res.SetUint64(uint64(v)) - return res, nil - case uint64: - res.SetUint64(uint64(v)) - return res, nil - case []byte: - res.SetString(string(v)) - return res, nil - case string: - res.SetString(v) - return res, nil - case *sql.NullString: - if v.Valid { - res.SetString(v.String) - return res, nil - } - return nil, nil - case *sql.NullInt32: - if v.Valid { - res.SetInt64(int64(v.Int32)) - return res, nil - } - return nil, nil - case *sql.NullInt64: - if v.Valid { - res.SetInt64(int64(v.Int64)) - return res, nil - } - return nil, nil - } - - rv := reflect.ValueOf(src) - switch rv.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - res.SetInt64(rv.Int()) - return res, nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - res.SetUint64(rv.Uint()) - return res, nil - case reflect.Float64, reflect.Float32: - res.SetFloat64(rv.Float()) - return res, nil - case reflect.String: - res.SetString(rv.String()) - return res, nil - } - return nil, fmt.Errorf("unsupported value %T as big.Float", src) -} diff --git a/vendor/xorm.io/xorm/convert/int.go b/vendor/xorm.io/xorm/convert/int.go deleted file mode 100644 index af8d4f75..00000000 --- a/vendor/xorm.io/xorm/convert/int.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2021 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package convert - -import ( - "database/sql" - "database/sql/driver" - "fmt" - "reflect" - "strconv" -) - -// AsInt64 converts interface as int64 -func AsInt64(src interface{}) (int64, error) { - switch v := src.(type) { - case int: - return int64(v), nil - case int16: - return int64(v), nil - case int32: - return int64(v), nil - case int8: - return int64(v), nil - case int64: - return v, nil - case uint: - return int64(v), nil - case uint8: - return int64(v), nil - case uint16: - return int64(v), nil - case uint32: - return int64(v), nil - case uint64: - return int64(v), nil - case []byte: - return strconv.ParseInt(string(v), 10, 64) - case string: - return strconv.ParseInt(v, 10, 64) - case *sql.NullString: - return strconv.ParseInt(v.String, 10, 64) - case *sql.NullInt32: - return int64(v.Int32), nil - case *sql.NullInt64: - return int64(v.Int64), nil - } - - rv := reflect.ValueOf(src) - switch rv.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return rv.Int(), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return int64(rv.Uint()), nil - case reflect.Float64, reflect.Float32: - return int64(rv.Float()), nil - case reflect.String: - return strconv.ParseInt(rv.String(), 10, 64) - } - return 0, fmt.Errorf("unsupported value %T as int64", src) -} - -// AsUint64 converts interface as uint64 -func AsUint64(src interface{}) (uint64, error) { - switch v := src.(type) { - case int: - return uint64(v), nil - case int16: - return uint64(v), nil - case int32: - return uint64(v), nil - case int8: - return uint64(v), nil - case int64: - return uint64(v), nil - case uint: - return uint64(v), nil - case uint8: - return uint64(v), nil - case uint16: - return uint64(v), nil - case uint32: - return uint64(v), nil - case uint64: - return v, nil - case []byte: - return strconv.ParseUint(string(v), 10, 64) - case string: - return strconv.ParseUint(v, 10, 64) - case *sql.NullString: - return strconv.ParseUint(v.String, 10, 64) - case *sql.NullInt32: - return uint64(v.Int32), nil - case *sql.NullInt64: - return uint64(v.Int64), nil - } - - rv := reflect.ValueOf(src) - switch rv.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return uint64(rv.Int()), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return uint64(rv.Uint()), nil - case reflect.Float64, reflect.Float32: - return uint64(rv.Float()), nil - case reflect.String: - return strconv.ParseUint(rv.String(), 10, 64) - } - return 0, fmt.Errorf("unsupported value %T as uint64", src) -} - -var ( - _ sql.Scanner = &NullUint64{} -) - -// NullUint64 represents an uint64 that may be null. -// NullUint64 implements the Scanner interface so -// it can be used as a scan destination, similar to NullString. -type NullUint64 struct { - Uint64 uint64 - Valid bool -} - -// Scan implements the Scanner interface. -func (n *NullUint64) Scan(value interface{}) error { - if value == nil { - n.Uint64, n.Valid = 0, false - return nil - } - n.Valid = true - var err error - n.Uint64, err = AsUint64(value) - return err -} - -// Value implements the driver Valuer interface. -func (n NullUint64) Value() (driver.Value, error) { - if !n.Valid { - return nil, nil - } - return n.Uint64, nil -} - -var ( - _ sql.Scanner = &NullUint32{} -) - -// NullUint32 represents an uint32 that may be null. -// NullUint32 implements the Scanner interface so -// it can be used as a scan destination, similar to NullString. -type NullUint32 struct { - Uint32 uint32 - Valid bool // Valid is true if Uint32 is not NULL -} - -// Scan implements the Scanner interface. -func (n *NullUint32) Scan(value interface{}) error { - if value == nil { - n.Uint32, n.Valid = 0, false - return nil - } - n.Valid = true - i64, err := AsUint64(value) - if err != nil { - return err - } - n.Uint32 = uint32(i64) - return nil -} - -// Value implements the driver Valuer interface. -func (n NullUint32) Value() (driver.Value, error) { - if !n.Valid { - return nil, nil - } - return int64(n.Uint32), nil -} diff --git a/vendor/xorm.io/xorm/convert/interface.go b/vendor/xorm.io/xorm/convert/interface.go deleted file mode 100644 index b0f28c81..00000000 --- a/vendor/xorm.io/xorm/convert/interface.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2021 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package convert - -import ( - "database/sql" - "fmt" - "time" -) - -// Interface2Interface converts interface of pointer as interface of value -func Interface2Interface(userLocation *time.Location, v interface{}) (interface{}, error) { - if v == nil { - return nil, nil - } - switch vv := v.(type) { - case *int64: - return *vv, nil - case *int8: - return *vv, nil - case *sql.NullString: - return vv.String, nil - case *sql.RawBytes: - if len([]byte(*vv)) > 0 { - return []byte(*vv), nil - } - return nil, nil - case *sql.NullInt32: - return vv.Int32, nil - case *sql.NullInt64: - return vv.Int64, nil - case *sql.NullFloat64: - return vv.Float64, nil - case *sql.NullBool: - if vv.Valid { - return vv.Bool, nil - } - return nil, nil - case *sql.NullTime: - if vv.Valid { - return vv.Time.In(userLocation).Format("2006-01-02 15:04:05"), nil - } - return "", nil - default: - return "", fmt.Errorf("convert assign string unsupported type: %#v", vv) - } -} diff --git a/vendor/xorm.io/xorm/convert/scanner.go b/vendor/xorm.io/xorm/convert/scanner.go deleted file mode 100644 index 505d3be0..00000000 --- a/vendor/xorm.io/xorm/convert/scanner.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2021 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package convert - -import "database/sql" - -var ( - _ sql.Scanner = &EmptyScanner{} -) - -// EmptyScanner represents an empty scanner which will ignore the scan -type EmptyScanner struct{} - -// Scan implements sql.Scanner -func (EmptyScanner) Scan(value interface{}) error { - return nil -} diff --git a/vendor/xorm.io/xorm/convert/string.go b/vendor/xorm.io/xorm/convert/string.go deleted file mode 100644 index de11fa01..00000000 --- a/vendor/xorm.io/xorm/convert/string.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2021 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package convert - -import ( - "database/sql" - "fmt" - "reflect" - "strconv" -) - -// AsString converts interface as string -func AsString(src interface{}) string { - switch v := src.(type) { - case string: - return v - case []byte: - return string(v) - case *sql.NullString: - return v.String - case *sql.NullInt32: - return fmt.Sprintf("%d", v.Int32) - case *sql.NullInt64: - return fmt.Sprintf("%d", v.Int64) - } - rv := reflect.ValueOf(src) - switch rv.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return strconv.FormatInt(rv.Int(), 10) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return strconv.FormatUint(rv.Uint(), 10) - case reflect.Float64: - return strconv.FormatFloat(rv.Float(), 'g', -1, 64) - case reflect.Float32: - return strconv.FormatFloat(rv.Float(), 'g', -1, 32) - case reflect.Bool: - return strconv.FormatBool(rv.Bool()) - } - return fmt.Sprintf("%v", src) -} - -// AsBytes converts interface as bytes -func AsBytes(src interface{}) ([]byte, bool) { - switch t := src.(type) { - case []byte: - return t, true - case *sql.NullString: - if !t.Valid { - return nil, true - } - return []byte(t.String), true - case *sql.RawBytes: - return *t, true - } - - rv := reflect.ValueOf(src) - - switch rv.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return strconv.AppendInt(nil, rv.Int(), 10), true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return strconv.AppendUint(nil, rv.Uint(), 10), true - case reflect.Float32: - return strconv.AppendFloat(nil, rv.Float(), 'g', -1, 32), true - case reflect.Float64: - return strconv.AppendFloat(nil, rv.Float(), 'g', -1, 64), true - case reflect.Bool: - return strconv.AppendBool(nil, rv.Bool()), true - case reflect.String: - return []byte(rv.String()), true - } - return nil, false -} diff --git a/vendor/xorm.io/xorm/convert/time.go b/vendor/xorm.io/xorm/convert/time.go deleted file mode 100644 index cc2e0a10..00000000 --- a/vendor/xorm.io/xorm/convert/time.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2021 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package convert - -import ( - "database/sql" - "fmt" - "strconv" - "strings" - "time" - - "xorm.io/xorm/internal/utils" -) - -// String2Time converts a string to time with original location -func String2Time(s string, originalLocation *time.Location, convertedLocation *time.Location) (*time.Time, error) { - if len(s) == 19 { - if s == utils.ZeroTime0 || s == utils.ZeroTime1 { - return &time.Time{}, nil - } - dt, err := time.ParseInLocation("2006-01-02 15:04:05", s, originalLocation) - if err != nil { - return nil, err - } - dt = dt.In(convertedLocation) - return &dt, nil - } else if len(s) == 20 && s[10] == 'T' && s[19] == 'Z' { - dt, err := time.ParseInLocation("2006-01-02T15:04:05", s[:19], originalLocation) - if err != nil { - return nil, err - } - dt = dt.In(convertedLocation) - return &dt, nil - } else if len(s) == 25 && s[10] == 'T' && s[19] == '+' && s[22] == ':' { - dt, err := time.Parse(time.RFC3339, s) - if err != nil { - return nil, err - } - dt = dt.In(convertedLocation) - return &dt, nil - } else if len(s) >= 21 && s[19] == '.' { - var layout = "2006-01-02 15:04:05." + strings.Repeat("0", len(s)-20) - dt, err := time.ParseInLocation(layout, s, originalLocation) - if err != nil { - return nil, err - } - dt = dt.In(convertedLocation) - return &dt, nil - } else if len(s) == 10 && s[4] == '-' { - if s == "0000-00-00" || s == "0001-01-01" { - return &time.Time{}, nil - } - dt, err := time.ParseInLocation("2006-01-02", s, originalLocation) - if err != nil { - return nil, err - } - dt = dt.In(convertedLocation) - return &dt, nil - } else { - i, err := strconv.ParseInt(s, 10, 64) - if err == nil { - tm := time.Unix(i, 0).In(convertedLocation) - return &tm, nil - } - } - return nil, fmt.Errorf("unsupported conversion from %s to time", s) -} - -// AsTime converts interface as time -func AsTime(src interface{}, dbLoc *time.Location, uiLoc *time.Location) (*time.Time, error) { - switch t := src.(type) { - case string: - return String2Time(t, dbLoc, uiLoc) - case *sql.NullString: - if !t.Valid { - return nil, nil - } - return String2Time(t.String, dbLoc, uiLoc) - case []uint8: - if t == nil { - return nil, nil - } - return String2Time(string(t), dbLoc, uiLoc) - case *sql.NullTime: - if !t.Valid { - return nil, nil - } - z, _ := t.Time.Zone() - if len(z) == 0 || t.Time.Year() == 0 || t.Time.Location().String() != dbLoc.String() { - tm := time.Date(t.Time.Year(), t.Time.Month(), t.Time.Day(), t.Time.Hour(), - t.Time.Minute(), t.Time.Second(), t.Time.Nanosecond(), dbLoc).In(uiLoc) - return &tm, nil - } - tm := t.Time.In(uiLoc) - return &tm, nil - case *time.Time: - z, _ := t.Zone() - if len(z) == 0 || t.Year() == 0 || t.Location().String() != dbLoc.String() { - tm := time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), - t.Minute(), t.Second(), t.Nanosecond(), dbLoc).In(uiLoc) - return &tm, nil - } - tm := t.In(uiLoc) - return &tm, nil - case time.Time: - z, _ := t.Zone() - if len(z) == 0 || t.Year() == 0 || t.Location().String() != dbLoc.String() { - tm := time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), - t.Minute(), t.Second(), t.Nanosecond(), dbLoc).In(uiLoc) - return &tm, nil - } - tm := t.In(uiLoc) - return &tm, nil - case int: - tm := time.Unix(int64(t), 0).In(uiLoc) - return &tm, nil - case int64: - tm := time.Unix(t, 0).In(uiLoc) - return &tm, nil - case *sql.NullInt64: - tm := time.Unix(t.Int64, 0).In(uiLoc) - return &tm, nil - } - return nil, fmt.Errorf("unsupported value %#v as time", src) -} diff --git a/vendor/xorm.io/xorm/core/db.go b/vendor/xorm.io/xorm/core/db.go deleted file mode 100644 index b476ef9a..00000000 --- a/vendor/xorm.io/xorm/core/db.go +++ /dev/null @@ -1,308 +0,0 @@ -// Copyright 2019 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package core - -import ( - "context" - "database/sql" - "database/sql/driver" - "fmt" - "reflect" - "regexp" - "sync" - - "xorm.io/xorm/contexts" - "xorm.io/xorm/log" - "xorm.io/xorm/names" -) - -var ( - // DefaultCacheSize sets the default cache size - DefaultCacheSize = 200 -) - -// MapToSlice map query and struct as sql and args -func MapToSlice(query string, mp interface{}) (string, []interface{}, error) { - vv := reflect.ValueOf(mp) - if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Map { - return "", []interface{}{}, ErrNoMapPointer - } - - args := make([]interface{}, 0, len(vv.Elem().MapKeys())) - var err error - query = re.ReplaceAllStringFunc(query, func(src string) string { - v := vv.Elem().MapIndex(reflect.ValueOf(src[1:])) - if !v.IsValid() { - err = fmt.Errorf("map key %s is missing", src[1:]) - } else { - args = append(args, v.Interface()) - } - return "?" - }) - - return query, args, err -} - -// StructToSlice converts a query and struct as sql and args -func StructToSlice(query string, st interface{}) (string, []interface{}, error) { - vv := reflect.ValueOf(st) - if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Struct { - return "", []interface{}{}, ErrNoStructPointer - } - - args := make([]interface{}, 0) - var err error - query = re.ReplaceAllStringFunc(query, func(src string) string { - fv := vv.Elem().FieldByName(src[1:]).Interface() - if v, ok := fv.(driver.Valuer); ok { - var value driver.Value - value, err = v.Value() - if err != nil { - return "?" - } - args = append(args, value) - } else { - args = append(args, fv) - } - return "?" - }) - if err != nil { - return "", []interface{}{}, err - } - return query, args, nil -} - -type cacheStruct struct { - value reflect.Value - idx int -} - -var ( - _ QueryExecuter = &DB{} -) - -// DB is a wrap of sql.DB with extra contents -type DB struct { - *sql.DB - Mapper names.Mapper - reflectCache map[reflect.Type]*cacheStruct - reflectCacheMutex sync.RWMutex - Logger log.ContextLogger - hooks contexts.Hooks -} - -// Open opens a database -func Open(driverName, dataSourceName string) (*DB, error) { - db, err := sql.Open(driverName, dataSourceName) - if err != nil { - return nil, err - } - return &DB{ - DB: db, - Mapper: names.NewCacheMapper(&names.SnakeMapper{}), - reflectCache: make(map[reflect.Type]*cacheStruct), - }, nil -} - -// FromDB creates a DB from a sql.DB -func FromDB(db *sql.DB) *DB { - return &DB{ - DB: db, - Mapper: names.NewCacheMapper(&names.SnakeMapper{}), - reflectCache: make(map[reflect.Type]*cacheStruct), - } -} - -// NeedLogSQL returns true if need to log SQL -func (db *DB) NeedLogSQL(ctx context.Context) bool { - if db.Logger == nil { - return false - } - - v := ctx.Value(log.SessionShowSQLKey) - if showSQL, ok := v.(bool); ok { - return showSQL - } - return db.Logger.IsShowSQL() -} - -func (db *DB) reflectNew(typ reflect.Type) reflect.Value { - db.reflectCacheMutex.Lock() - defer db.reflectCacheMutex.Unlock() - cs, ok := db.reflectCache[typ] - if !ok || cs.idx+1 > DefaultCacheSize-1 { - cs = &cacheStruct{reflect.MakeSlice(reflect.SliceOf(typ), DefaultCacheSize, DefaultCacheSize), 0} - db.reflectCache[typ] = cs - } else { - cs.idx++ - } - return cs.value.Index(cs.idx).Addr() -} - -// QueryContext overwrites sql.DB.QueryContext -func (db *DB) QueryContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { - hookCtx := contexts.NewContextHook(ctx, query, args) - ctx, err := db.beforeProcess(hookCtx) - if err != nil { - return nil, err - } - rows, err := db.DB.QueryContext(ctx, query, args...) - hookCtx.End(ctx, nil, err) - if err := db.afterProcess(hookCtx); err != nil { - if rows != nil { - rows.Close() - } - return nil, err - } - return &Rows{rows, db}, nil -} - -// Query overwrites sql.DB.Query -func (db *DB) Query(query string, args ...interface{}) (*Rows, error) { - return db.QueryContext(context.Background(), query, args...) -} - -// QueryMapContext executes query with parameters via map and context -func (db *DB) QueryMapContext(ctx context.Context, query string, mp interface{}) (*Rows, error) { - query, args, err := MapToSlice(query, mp) - if err != nil { - return nil, err - } - return db.QueryContext(ctx, query, args...) -} - -// QueryMap executes query with parameters via map -func (db *DB) QueryMap(query string, mp interface{}) (*Rows, error) { - return db.QueryMapContext(context.Background(), query, mp) -} - -// QueryStructContext query rows with struct -func (db *DB) QueryStructContext(ctx context.Context, query string, st interface{}) (*Rows, error) { - query, args, err := StructToSlice(query, st) - if err != nil { - return nil, err - } - return db.QueryContext(ctx, query, args...) -} - -// QueryStruct query rows with struct -func (db *DB) QueryStruct(query string, st interface{}) (*Rows, error) { - return db.QueryStructContext(context.Background(), query, st) -} - -// QueryRowContext query row with args -func (db *DB) QueryRowContext(ctx context.Context, query string, args ...interface{}) *Row { - rows, err := db.QueryContext(ctx, query, args...) - if err != nil { - return &Row{nil, err} - } - return &Row{rows, nil} -} - -// QueryRow query row with args -func (db *DB) QueryRow(query string, args ...interface{}) *Row { - return db.QueryRowContext(context.Background(), query, args...) -} - -// QueryRowMapContext query row with map -func (db *DB) QueryRowMapContext(ctx context.Context, query string, mp interface{}) *Row { - query, args, err := MapToSlice(query, mp) - if err != nil { - return &Row{nil, err} - } - return db.QueryRowContext(ctx, query, args...) -} - -// QueryRowMap query row with map -func (db *DB) QueryRowMap(query string, mp interface{}) *Row { - return db.QueryRowMapContext(context.Background(), query, mp) -} - -// QueryRowStructContext query row with struct -func (db *DB) QueryRowStructContext(ctx context.Context, query string, st interface{}) *Row { - query, args, err := StructToSlice(query, st) - if err != nil { - return &Row{nil, err} - } - return db.QueryRowContext(ctx, query, args...) -} - -// QueryRowStruct query row with struct -func (db *DB) QueryRowStruct(query string, st interface{}) *Row { - return db.QueryRowStructContext(context.Background(), query, st) -} - -var ( - re = regexp.MustCompile(`[?](\w+)`) -) - -// ExecMapContext exec map with context.ContextHook -// insert into (name) values (?) -// insert into (name) values (?name) -func (db *DB) ExecMapContext(ctx context.Context, query string, mp interface{}) (sql.Result, error) { - query, args, err := MapToSlice(query, mp) - if err != nil { - return nil, err - } - return db.ExecContext(ctx, query, args...) -} - -// ExecMap exec query with map -func (db *DB) ExecMap(query string, mp interface{}) (sql.Result, error) { - return db.ExecMapContext(context.Background(), query, mp) -} - -// ExecStructContext exec query with map -func (db *DB) ExecStructContext(ctx context.Context, query string, st interface{}) (sql.Result, error) { - query, args, err := StructToSlice(query, st) - if err != nil { - return nil, err - } - return db.ExecContext(ctx, query, args...) -} - -// ExecContext exec query with args -func (db *DB) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { - hookCtx := contexts.NewContextHook(ctx, query, args) - ctx, err := db.beforeProcess(hookCtx) - if err != nil { - return nil, err - } - res, err := db.DB.ExecContext(ctx, query, args...) - hookCtx.End(ctx, res, err) - if err := db.afterProcess(hookCtx); err != nil { - return nil, err - } - return res, nil -} - -// ExecStruct exec query with struct -func (db *DB) ExecStruct(query string, st interface{}) (sql.Result, error) { - return db.ExecStructContext(context.Background(), query, st) -} - -func (db *DB) beforeProcess(c *contexts.ContextHook) (context.Context, error) { - if db.NeedLogSQL(c.Ctx) { - db.Logger.BeforeSQL(log.LogContext(*c)) - } - ctx, err := db.hooks.BeforeProcess(c) - if err != nil { - return nil, err - } - return ctx, nil -} - -func (db *DB) afterProcess(c *contexts.ContextHook) error { - err := db.hooks.AfterProcess(c) - if db.NeedLogSQL(c.Ctx) { - db.Logger.AfterSQL(log.LogContext(*c)) - } - return err -} - -// AddHook adds hook -func (db *DB) AddHook(h ...contexts.Hook) { - db.hooks.AddHook(h...) -} diff --git a/vendor/xorm.io/xorm/core/error.go b/vendor/xorm.io/xorm/core/error.go deleted file mode 100644 index 1fd18348..00000000 --- a/vendor/xorm.io/xorm/core/error.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2019 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package core - -import "errors" - -var ( - // ErrNoMapPointer represents error when no map pointer - ErrNoMapPointer = errors.New("mp should be a map's pointer") - // ErrNoStructPointer represents error when no struct pointer - ErrNoStructPointer = errors.New("mp should be a struct's pointer") -) diff --git a/vendor/xorm.io/xorm/core/interface.go b/vendor/xorm.io/xorm/core/interface.go deleted file mode 100644 index a5c8e4e2..00000000 --- a/vendor/xorm.io/xorm/core/interface.go +++ /dev/null @@ -1,22 +0,0 @@ -package core - -import ( - "context" - "database/sql" -) - -// Queryer represents an interface to query a SQL to get data from database -type Queryer interface { - QueryContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) -} - -// Executer represents an interface to execute a SQL -type Executer interface { - ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) -} - -// QueryExecuter combines the Queryer and Executer -type QueryExecuter interface { - Queryer - Executer -} diff --git a/vendor/xorm.io/xorm/core/rows.go b/vendor/xorm.io/xorm/core/rows.go deleted file mode 100644 index 75d6ebf0..00000000 --- a/vendor/xorm.io/xorm/core/rows.go +++ /dev/null @@ -1,346 +0,0 @@ -// Copyright 2019 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package core - -import ( - "database/sql" - "errors" - "reflect" - "sync" -) - -// Rows represents rows of table -type Rows struct { - *sql.Rows - db *DB -} - -// ToMapString returns all records -func (rs *Rows) ToMapString() ([]map[string]string, error) { - cols, err := rs.Columns() - if err != nil { - return nil, err - } - - var results = make([]map[string]string, 0, 10) - for rs.Next() { - var record = make(map[string]string, len(cols)) - err = rs.ScanMap(&record) - if err != nil { - return nil, err - } - results = append(results, record) - } - return results, nil -} - -// ScanStructByIndex scan data to a struct's pointer according field index -func (rs *Rows) ScanStructByIndex(dest ...interface{}) error { - if len(dest) == 0 { - return errors.New("at least one struct") - } - - vvvs := make([]reflect.Value, len(dest)) - for i, s := range dest { - vv := reflect.ValueOf(s) - if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Struct { - return errors.New("dest should be a struct's pointer") - } - - vvvs[i] = vv.Elem() - } - - cols, err := rs.Columns() - if err != nil { - return err - } - newDest := make([]interface{}, len(cols)) - - var i = 0 - for _, vvv := range vvvs { - for j := 0; j < vvv.NumField(); j++ { - newDest[i] = vvv.Field(j).Addr().Interface() - i++ - } - } - - return rs.Rows.Scan(newDest...) -} - -var ( - fieldCache = make(map[reflect.Type]map[string]int) - fieldCacheMutex sync.RWMutex -) - -func fieldByName(v reflect.Value, name string) reflect.Value { - t := v.Type() - fieldCacheMutex.RLock() - cache, ok := fieldCache[t] - fieldCacheMutex.RUnlock() - if !ok { - cache = make(map[string]int) - for i := 0; i < v.NumField(); i++ { - cache[t.Field(i).Name] = i - } - fieldCacheMutex.Lock() - fieldCache[t] = cache - fieldCacheMutex.Unlock() - } - - if i, ok := cache[name]; ok { - return v.Field(i) - } - - return reflect.Zero(t) -} - -// ScanStructByName scan data to a struct's pointer according field name -func (rs *Rows) ScanStructByName(dest interface{}) error { - vv := reflect.ValueOf(dest) - if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Struct { - return errors.New("dest should be a struct's pointer") - } - - cols, err := rs.Columns() - if err != nil { - return err - } - - newDest := make([]interface{}, len(cols)) - var v EmptyScanner - for j, name := range cols { - f := fieldByName(vv.Elem(), rs.db.Mapper.Table2Obj(name)) - if f.IsValid() { - newDest[j] = f.Addr().Interface() - } else { - newDest[j] = &v - } - } - - return rs.Rows.Scan(newDest...) -} - -// ScanSlice scan data to a slice's pointer, slice's length should equal to columns' number -func (rs *Rows) ScanSlice(dest interface{}) error { - vv := reflect.ValueOf(dest) - if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Slice { - return errors.New("dest should be a slice's pointer") - } - - vvv := vv.Elem() - cols, err := rs.Columns() - if err != nil { - return err - } - - newDest := make([]interface{}, len(cols)) - - for j := 0; j < len(cols); j++ { - if j >= vvv.Len() { - newDest[j] = reflect.New(vvv.Type().Elem()).Interface() - } else { - newDest[j] = vvv.Index(j).Addr().Interface() - } - } - - err = rs.Rows.Scan(newDest...) - if err != nil { - return err - } - - srcLen := vvv.Len() - for i := srcLen; i < len(cols); i++ { - vvv = reflect.Append(vvv, reflect.ValueOf(newDest[i]).Elem()) - } - return nil -} - -// ScanMap scan data to a map's pointer -func (rs *Rows) ScanMap(dest interface{}) error { - vv := reflect.ValueOf(dest) - if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Map { - return errors.New("dest should be a map's pointer") - } - - cols, err := rs.Columns() - if err != nil { - return err - } - - newDest := make([]interface{}, len(cols)) - vvv := vv.Elem() - - for i := range cols { - newDest[i] = rs.db.reflectNew(vvv.Type().Elem()).Interface() - } - - err = rs.Rows.Scan(newDest...) - if err != nil { - return err - } - - for i, name := range cols { - vname := reflect.ValueOf(name) - vvv.SetMapIndex(vname, reflect.ValueOf(newDest[i]).Elem()) - } - - return nil -} - -// Row reprents a row of a tab -type Row struct { - rows *Rows - // One of these two will be non-nil: - err error // deferred error for easy chaining -} - -// ErrorRow return an error row -func ErrorRow(err error) *Row { - return &Row{ - err: err, - } -} - -// NewRow from rows -func NewRow(rows *Rows, err error) *Row { - return &Row{rows, err} -} - -// Columns returns all columns of the row -func (row *Row) Columns() ([]string, error) { - if row.err != nil { - return nil, row.err - } - return row.rows.Columns() -} - -// Scan retrieves all row column values -func (row *Row) Scan(dest ...interface{}) error { - if row.err != nil { - return row.err - } - defer row.rows.Close() - - for _, dp := range dest { - if _, ok := dp.(*sql.RawBytes); ok { - return errors.New("sql: RawBytes isn't allowed on Row.Scan") - } - } - - if !row.rows.Next() { - if err := row.rows.Err(); err != nil { - return err - } - return sql.ErrNoRows - } - err := row.rows.Scan(dest...) - if err != nil { - return err - } - // Make sure the query can be processed to completion with no errors. - return row.rows.Close() -} - -// ScanStructByName retrieves all row column values into a struct -func (row *Row) ScanStructByName(dest interface{}) error { - if row.err != nil { - return row.err - } - defer row.rows.Close() - - if !row.rows.Next() { - if err := row.rows.Err(); err != nil { - return err - } - return sql.ErrNoRows - } - err := row.rows.ScanStructByName(dest) - if err != nil { - return err - } - // Make sure the query can be processed to completion with no errors. - return row.rows.Close() -} - -// ScanStructByIndex retrieves all row column values into a struct -func (row *Row) ScanStructByIndex(dest interface{}) error { - if row.err != nil { - return row.err - } - defer row.rows.Close() - - if !row.rows.Next() { - if err := row.rows.Err(); err != nil { - return err - } - return sql.ErrNoRows - } - err := row.rows.ScanStructByIndex(dest) - if err != nil { - return err - } - // Make sure the query can be processed to completion with no errors. - return row.rows.Close() -} - -// ScanSlice scan data to a slice's pointer, slice's length should equal to columns' number -func (row *Row) ScanSlice(dest interface{}) error { - if row.err != nil { - return row.err - } - defer row.rows.Close() - - if !row.rows.Next() { - if err := row.rows.Err(); err != nil { - return err - } - return sql.ErrNoRows - } - err := row.rows.ScanSlice(dest) - if err != nil { - return err - } - - // Make sure the query can be processed to completion with no errors. - return row.rows.Close() -} - -// ScanMap scan data to a map's pointer -func (row *Row) ScanMap(dest interface{}) error { - if row.err != nil { - return row.err - } - defer row.rows.Close() - - if !row.rows.Next() { - if err := row.rows.Err(); err != nil { - return err - } - return sql.ErrNoRows - } - err := row.rows.ScanMap(dest) - if err != nil { - return err - } - - // Make sure the query can be processed to completion with no errors. - return row.rows.Close() -} - -// ToMapString returns all clumns of this record -func (row *Row) ToMapString() (map[string]string, error) { - cols, err := row.Columns() - if err != nil { - return nil, err - } - - var record = make(map[string]string, len(cols)) - err = row.ScanMap(&record) - if err != nil { - return nil, err - } - - return record, nil -} diff --git a/vendor/xorm.io/xorm/core/scan.go b/vendor/xorm.io/xorm/core/scan.go deleted file mode 100644 index 1e7e4525..00000000 --- a/vendor/xorm.io/xorm/core/scan.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2019 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package core - -import ( - "database/sql/driver" - "fmt" - "time" -) - -// NullTime defines a customize type NullTime -type NullTime time.Time - -var ( - _ driver.Valuer = NullTime{} -) - -// Scan implements driver.Valuer -func (ns *NullTime) Scan(value interface{}) error { - if value == nil { - return nil - } - return convertTime(ns, value) -} - -// Value implements the driver Valuer interface. -func (ns NullTime) Value() (driver.Value, error) { - if (time.Time)(ns).IsZero() { - return nil, nil - } - return (time.Time)(ns).Format("2006-01-02 15:04:05"), nil -} - -func convertTime(dest *NullTime, src interface{}) error { - // Common cases, without reflect. - switch s := src.(type) { - case string: - t, err := time.Parse("2006-01-02 15:04:05", s) - if err != nil { - return err - } - *dest = NullTime(t) - return nil - case []uint8: - t, err := time.Parse("2006-01-02 15:04:05", string(s)) - if err != nil { - return err - } - *dest = NullTime(t) - return nil - case time.Time: - *dest = NullTime(s) - return nil - case nil: - default: - return fmt.Errorf("unsupported driver -> Scan pair: %T -> %T", src, dest) - } - return nil -} - -// EmptyScanner represents an empty scanner -type EmptyScanner struct { -} - -// Scan implements -func (EmptyScanner) Scan(src interface{}) error { - return nil -} diff --git a/vendor/xorm.io/xorm/core/stmt.go b/vendor/xorm.io/xorm/core/stmt.go deleted file mode 100644 index 3247efed..00000000 --- a/vendor/xorm.io/xorm/core/stmt.go +++ /dev/null @@ -1,213 +0,0 @@ -// Copyright 2019 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package core - -import ( - "context" - "database/sql" - "errors" - "reflect" - - "xorm.io/xorm/contexts" -) - -// Stmt reprents a stmt objects -type Stmt struct { - *sql.Stmt - db *DB - names map[string]int - query string -} - -// PrepareContext creates a prepare statement -func (db *DB) PrepareContext(ctx context.Context, query string) (*Stmt, error) { - names := make(map[string]int) - var i int - query = re.ReplaceAllStringFunc(query, func(src string) string { - names[src[1:]] = i - i++ - return "?" - }) - hookCtx := contexts.NewContextHook(ctx, "PREPARE", nil) - ctx, err := db.beforeProcess(hookCtx) - if err != nil { - return nil, err - } - stmt, err := db.DB.PrepareContext(ctx, query) - hookCtx.End(ctx, nil, err) - if err := db.afterProcess(hookCtx); err != nil { - return nil, err - } - return &Stmt{stmt, db, names, query}, nil -} - -// Prepare creates a prepare statement -func (db *DB) Prepare(query string) (*Stmt, error) { - return db.PrepareContext(context.Background(), query) -} - -// ExecMapContext execute with map -func (s *Stmt) ExecMapContext(ctx context.Context, mp interface{}) (sql.Result, error) { - vv := reflect.ValueOf(mp) - if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Map { - return nil, errors.New("mp should be a map's pointer") - } - - args := make([]interface{}, len(s.names)) - for k, i := range s.names { - args[i] = vv.Elem().MapIndex(reflect.ValueOf(k)).Interface() - } - return s.ExecContext(ctx, args...) -} - -// ExecMap executes with map -func (s *Stmt) ExecMap(mp interface{}) (sql.Result, error) { - return s.ExecMapContext(context.Background(), mp) -} - -// ExecStructContext executes with struct -func (s *Stmt) ExecStructContext(ctx context.Context, st interface{}) (sql.Result, error) { - vv := reflect.ValueOf(st) - if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Struct { - return nil, errors.New("mp should be a map's pointer") - } - - args := make([]interface{}, len(s.names)) - for k, i := range s.names { - args[i] = vv.Elem().FieldByName(k).Interface() - } - return s.ExecContext(ctx, args...) -} - -// ExecStruct executes with struct -func (s *Stmt) ExecStruct(st interface{}) (sql.Result, error) { - return s.ExecStructContext(context.Background(), st) -} - -// ExecContext with args -func (s *Stmt) ExecContext(ctx context.Context, args ...interface{}) (sql.Result, error) { - hookCtx := contexts.NewContextHook(ctx, s.query, args) - ctx, err := s.db.beforeProcess(hookCtx) - if err != nil { - return nil, err - } - res, err := s.Stmt.ExecContext(ctx, args...) - hookCtx.End(ctx, res, err) - if err := s.db.afterProcess(hookCtx); err != nil { - return nil, err - } - return res, nil -} - -// QueryContext query with args -func (s *Stmt) QueryContext(ctx context.Context, args ...interface{}) (*Rows, error) { - hookCtx := contexts.NewContextHook(ctx, s.query, args) - ctx, err := s.db.beforeProcess(hookCtx) - if err != nil { - return nil, err - } - rows, err := s.Stmt.QueryContext(ctx, args...) - hookCtx.End(ctx, nil, err) - if err := s.db.afterProcess(hookCtx); err != nil { - return nil, err - } - return &Rows{rows, s.db}, nil -} - -// Query query with args -func (s *Stmt) Query(args ...interface{}) (*Rows, error) { - return s.QueryContext(context.Background(), args...) -} - -// QueryMapContext query with map -func (s *Stmt) QueryMapContext(ctx context.Context, mp interface{}) (*Rows, error) { - vv := reflect.ValueOf(mp) - if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Map { - return nil, errors.New("mp should be a map's pointer") - } - - args := make([]interface{}, len(s.names)) - for k, i := range s.names { - args[i] = vv.Elem().MapIndex(reflect.ValueOf(k)).Interface() - } - - return s.QueryContext(ctx, args...) -} - -// QueryMap query with map -func (s *Stmt) QueryMap(mp interface{}) (*Rows, error) { - return s.QueryMapContext(context.Background(), mp) -} - -// QueryStructContext query with struct -func (s *Stmt) QueryStructContext(ctx context.Context, st interface{}) (*Rows, error) { - vv := reflect.ValueOf(st) - if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Struct { - return nil, errors.New("mp should be a map's pointer") - } - - args := make([]interface{}, len(s.names)) - for k, i := range s.names { - args[i] = vv.Elem().FieldByName(k).Interface() - } - - return s.QueryContext(ctx, args...) -} - -// QueryStruct query with struct -func (s *Stmt) QueryStruct(st interface{}) (*Rows, error) { - return s.QueryStructContext(context.Background(), st) -} - -// QueryRowContext query row with args -func (s *Stmt) QueryRowContext(ctx context.Context, args ...interface{}) *Row { - rows, err := s.QueryContext(ctx, args...) - return &Row{rows, err} -} - -// QueryRow query row with args -func (s *Stmt) QueryRow(args ...interface{}) *Row { - return s.QueryRowContext(context.Background(), args...) -} - -// QueryRowMapContext query row with map -func (s *Stmt) QueryRowMapContext(ctx context.Context, mp interface{}) *Row { - vv := reflect.ValueOf(mp) - if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Map { - return &Row{nil, errors.New("mp should be a map's pointer")} - } - - args := make([]interface{}, len(s.names)) - for k, i := range s.names { - args[i] = vv.Elem().MapIndex(reflect.ValueOf(k)).Interface() - } - - return s.QueryRowContext(ctx, args...) -} - -// QueryRowMap query row with map -func (s *Stmt) QueryRowMap(mp interface{}) *Row { - return s.QueryRowMapContext(context.Background(), mp) -} - -// QueryRowStructContext query row with struct -func (s *Stmt) QueryRowStructContext(ctx context.Context, st interface{}) *Row { - vv := reflect.ValueOf(st) - if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Struct { - return &Row{nil, errors.New("st should be a struct's pointer")} - } - - args := make([]interface{}, len(s.names)) - for k, i := range s.names { - args[i] = vv.Elem().FieldByName(k).Interface() - } - - return s.QueryRowContext(ctx, args...) -} - -// QueryRowStruct query row with struct -func (s *Stmt) QueryRowStruct(st interface{}) *Row { - return s.QueryRowStructContext(context.Background(), st) -} diff --git a/vendor/xorm.io/xorm/core/tx.go b/vendor/xorm.io/xorm/core/tx.go deleted file mode 100644 index a2f745f8..00000000 --- a/vendor/xorm.io/xorm/core/tx.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright 2019 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package core - -import ( - "context" - "database/sql" - - "xorm.io/xorm/contexts" -) - -var ( - _ QueryExecuter = &Tx{} -) - -// Tx represents a transaction -type Tx struct { - *sql.Tx - db *DB - ctx context.Context -} - -// BeginTx begin a transaction with option -func (db *DB) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) { - hookCtx := contexts.NewContextHook(ctx, "BEGIN TRANSACTION", nil) - ctx, err := db.beforeProcess(hookCtx) - if err != nil { - return nil, err - } - tx, err := db.DB.BeginTx(ctx, opts) - hookCtx.End(ctx, nil, err) - if err := db.afterProcess(hookCtx); err != nil { - return nil, err - } - return &Tx{tx, db, ctx}, nil -} - -// Begin begins a transaction -func (db *DB) Begin() (*Tx, error) { - return db.BeginTx(context.Background(), nil) -} - -// Commit submit the transaction -func (tx *Tx) Commit() error { - hookCtx := contexts.NewContextHook(tx.ctx, "COMMIT", nil) - ctx, err := tx.db.beforeProcess(hookCtx) - if err != nil { - return err - } - err = tx.Tx.Commit() - hookCtx.End(ctx, nil, err) - return tx.db.afterProcess(hookCtx) -} - -// Rollback rollback the transaction -func (tx *Tx) Rollback() error { - hookCtx := contexts.NewContextHook(tx.ctx, "ROLLBACK", nil) - ctx, err := tx.db.beforeProcess(hookCtx) - if err != nil { - return err - } - err = tx.Tx.Rollback() - hookCtx.End(ctx, nil, err) - return tx.db.afterProcess(hookCtx) -} - -// PrepareContext prepare the query -func (tx *Tx) PrepareContext(ctx context.Context, query string) (*Stmt, error) { - names := make(map[string]int) - var i int - query = re.ReplaceAllStringFunc(query, func(src string) string { - names[src[1:]] = i - i++ - return "?" - }) - hookCtx := contexts.NewContextHook(ctx, "PREPARE", nil) - ctx, err := tx.db.beforeProcess(hookCtx) - if err != nil { - return nil, err - } - stmt, err := tx.Tx.PrepareContext(ctx, query) - hookCtx.End(ctx, nil, err) - if err := tx.db.afterProcess(hookCtx); err != nil { - return nil, err - } - return &Stmt{stmt, tx.db, names, query}, nil -} - -// Prepare prepare the query -func (tx *Tx) Prepare(query string) (*Stmt, error) { - return tx.PrepareContext(context.Background(), query) -} - -// StmtContext creates Stmt with context -func (tx *Tx) StmtContext(ctx context.Context, stmt *Stmt) *Stmt { - stmt.Stmt = tx.Tx.StmtContext(ctx, stmt.Stmt) - return stmt -} - -// Stmt creates Stmt -func (tx *Tx) Stmt(stmt *Stmt) *Stmt { - return tx.StmtContext(context.Background(), stmt) -} - -// ExecMapContext executes query with args in a map -func (tx *Tx) ExecMapContext(ctx context.Context, query string, mp interface{}) (sql.Result, error) { - query, args, err := MapToSlice(query, mp) - if err != nil { - return nil, err - } - return tx.ExecContext(ctx, query, args...) -} - -// ExecMap executes query with args in a map -func (tx *Tx) ExecMap(query string, mp interface{}) (sql.Result, error) { - return tx.ExecMapContext(context.Background(), query, mp) -} - -// ExecStructContext executes query with args in a struct -func (tx *Tx) ExecStructContext(ctx context.Context, query string, st interface{}) (sql.Result, error) { - query, args, err := StructToSlice(query, st) - if err != nil { - return nil, err - } - return tx.ExecContext(ctx, query, args...) -} - -// ExecContext executes a query with args -func (tx *Tx) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { - hookCtx := contexts.NewContextHook(ctx, query, args) - ctx, err := tx.db.beforeProcess(hookCtx) - if err != nil { - return nil, err - } - res, err := tx.Tx.ExecContext(ctx, query, args...) - hookCtx.End(ctx, res, err) - if err := tx.db.afterProcess(hookCtx); err != nil { - return nil, err - } - return res, err -} - -// ExecStruct executes query with args in a struct -func (tx *Tx) ExecStruct(query string, st interface{}) (sql.Result, error) { - return tx.ExecStructContext(context.Background(), query, st) -} - -// QueryContext query with args -func (tx *Tx) QueryContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { - hookCtx := contexts.NewContextHook(ctx, query, args) - ctx, err := tx.db.beforeProcess(hookCtx) - if err != nil { - return nil, err - } - rows, err := tx.Tx.QueryContext(ctx, query, args...) - hookCtx.End(ctx, nil, err) - if err := tx.db.afterProcess(hookCtx); err != nil { - if rows != nil { - rows.Close() - } - return nil, err - } - return &Rows{rows, tx.db}, nil -} - -// Query query with args -func (tx *Tx) Query(query string, args ...interface{}) (*Rows, error) { - return tx.QueryContext(context.Background(), query, args...) -} - -// QueryMapContext query with args in a map -func (tx *Tx) QueryMapContext(ctx context.Context, query string, mp interface{}) (*Rows, error) { - query, args, err := MapToSlice(query, mp) - if err != nil { - return nil, err - } - return tx.QueryContext(ctx, query, args...) -} - -// QueryMap query with args in a map -func (tx *Tx) QueryMap(query string, mp interface{}) (*Rows, error) { - return tx.QueryMapContext(context.Background(), query, mp) -} - -// QueryStructContext query with args in struct -func (tx *Tx) QueryStructContext(ctx context.Context, query string, st interface{}) (*Rows, error) { - query, args, err := StructToSlice(query, st) - if err != nil { - return nil, err - } - return tx.QueryContext(ctx, query, args...) -} - -// QueryStruct query with args in struct -func (tx *Tx) QueryStruct(query string, st interface{}) (*Rows, error) { - return tx.QueryStructContext(context.Background(), query, st) -} - -// QueryRowContext query one row with args -func (tx *Tx) QueryRowContext(ctx context.Context, query string, args ...interface{}) *Row { - rows, err := tx.QueryContext(ctx, query, args...) - return &Row{rows, err} -} - -// QueryRow query one row with args -func (tx *Tx) QueryRow(query string, args ...interface{}) *Row { - return tx.QueryRowContext(context.Background(), query, args...) -} - -// QueryRowMapContext query one row with args in a map -func (tx *Tx) QueryRowMapContext(ctx context.Context, query string, mp interface{}) *Row { - query, args, err := MapToSlice(query, mp) - if err != nil { - return &Row{nil, err} - } - return tx.QueryRowContext(ctx, query, args...) -} - -// QueryRowMap query one row with args in a map -func (tx *Tx) QueryRowMap(query string, mp interface{}) *Row { - return tx.QueryRowMapContext(context.Background(), query, mp) -} - -// QueryRowStructContext query one row with args in struct -func (tx *Tx) QueryRowStructContext(ctx context.Context, query string, st interface{}) *Row { - query, args, err := StructToSlice(query, st) - if err != nil { - return &Row{nil, err} - } - return tx.QueryRowContext(ctx, query, args...) -} - -// QueryRowStruct query one row with args in struct -func (tx *Tx) QueryRowStruct(query string, st interface{}) *Row { - return tx.QueryRowStructContext(context.Background(), query, st) -} diff --git a/vendor/xorm.io/xorm/dialects/dameng.go b/vendor/xorm.io/xorm/dialects/dameng.go deleted file mode 100644 index 5e92ec2f..00000000 --- a/vendor/xorm.io/xorm/dialects/dameng.go +++ /dev/null @@ -1,1201 +0,0 @@ -// Copyright 2021 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package dialects - -import ( - "context" - "database/sql" - "errors" - "fmt" - "net/url" - "strconv" - "strings" - - "xorm.io/xorm/convert" - "xorm.io/xorm/core" - "xorm.io/xorm/internal/utils" - "xorm.io/xorm/schemas" -) - -func init() { - RegisterDriver("dm", &damengDriver{}) - RegisterDialect(schemas.DAMENG, func() Dialect { - return &dameng{} - }) -} - -var ( - damengReservedWords = map[string]bool{ - "ACCESS": true, - "ACCOUNT": true, - "ACTIVATE": true, - "ADD": true, - "ADMIN": true, - "ADVISE": true, - "AFTER": true, - "ALL": true, - "ALL_ROWS": true, - "ALLOCATE": true, - "ALTER": true, - "ANALYZE": true, - "AND": true, - "ANY": true, - "ARCHIVE": true, - "ARCHIVELOG": true, - "ARRAY": true, - "AS": true, - "ASC": true, - "AT": true, - "AUDIT": true, - "AUTHENTICATED": true, - "AUTHORIZATION": true, - "AUTOEXTEND": true, - "AUTOMATIC": true, - "BACKUP": true, - "BECOME": true, - "BEFORE": true, - "BEGIN": true, - "BETWEEN": true, - "BFILE": true, - "BITMAP": true, - "BLOB": true, - "BLOCK": true, - "BODY": true, - "BY": true, - "CACHE": true, - "CACHE_INSTANCES": true, - "CANCEL": true, - "CASCADE": true, - "CAST": true, - "CFILE": true, - "CHAINED": true, - "CHANGE": true, - "CHAR": true, - "CHAR_CS": true, - "CHARACTER": true, - "CHECK": true, - "CHECKPOINT": true, - "CHOOSE": true, - "CHUNK": true, - "CLEAR": true, - "CLOB": true, - "CLONE": true, - "CLOSE": true, - "CLOSE_CACHED_OPEN_CURSORS": true, - "CLUSTER": true, - "COALESCE": true, - "COLUMN": true, - "COLUMNS": true, - "COMMENT": true, - "COMMIT": true, - "COMMITTED": true, - "COMPATIBILITY": true, - "COMPILE": true, - "COMPLETE": true, - "COMPOSITE_LIMIT": true, - "COMPRESS": true, - "COMPUTE": true, - "CONNECT": true, - "CONNECT_TIME": true, - "CONSTRAINT": true, - "CONSTRAINTS": true, - "CONTENTS": true, - "CONTINUE": true, - "CONTROLFILE": true, - "CONVERT": true, - "COST": true, - "CPU_PER_CALL": true, - "CPU_PER_SESSION": true, - "CREATE": true, - "CURRENT": true, - "CURRENT_SCHEMA": true, - "CURREN_USER": true, - "CURSOR": true, - "CYCLE": true, - "DANGLING": true, - "DATABASE": true, - "DATAFILE": true, - "DATAFILES": true, - "DATAOBJNO": true, - "DATE": true, - "DBA": true, - "DBHIGH": true, - "DBLOW": true, - "DBMAC": true, - "DEALLOCATE": true, - "DEBUG": true, - "DEC": true, - "DECIMAL": true, - "DECLARE": true, - "DEFAULT": true, - "DEFERRABLE": true, - "DEFERRED": true, - "DEGREE": true, - "DELETE": true, - "DEREF": true, - "DESC": true, - "DIRECTORY": true, - "DISABLE": true, - "DISCONNECT": true, - "DISMOUNT": true, - "DISTINCT": true, - "DISTRIBUTED": true, - "DML": true, - "DOUBLE": true, - "DROP": true, - "DUMP": true, - "EACH": true, - "ELSE": true, - "ENABLE": true, - "END": true, - "ENFORCE": true, - "ENTRY": true, - "ESCAPE": true, - "EXCEPT": true, - "EXCEPTIONS": true, - "EXCHANGE": true, - "EXCLUDING": true, - "EXCLUSIVE": true, - "EXECUTE": true, - "EXISTS": true, - "EXPIRE": true, - "EXPLAIN": true, - "EXTENT": true, - "EXTENTS": true, - "EXTERNALLY": true, - "FAILED_LOGIN_ATTEMPTS": true, - "FALSE": true, - "FAST": true, - "FILE": true, - "FIRST_ROWS": true, - "FLAGGER": true, - "FLOAT": true, - "FLOB": true, - "FLUSH": true, - "FOR": true, - "FORCE": true, - "FOREIGN": true, - "FREELIST": true, - "FREELISTS": true, - "FROM": true, - "FULL": true, - "FUNCTION": true, - "GLOBAL": true, - "GLOBALLY": true, - "GLOBAL_NAME": true, - "GRANT": true, - "GROUP": true, - "GROUPS": true, - "HASH": true, - "HASHKEYS": true, - "HAVING": true, - "HEADER": true, - "HEAP": true, - "IDENTIFIED": true, - "IDGENERATORS": true, - "IDLE_TIME": true, - "IF": true, - "IMMEDIATE": true, - "IN": true, - "INCLUDING": true, - "INCREMENT": true, - "INDEX": true, - "INDEXED": true, - "INDEXES": true, - "INDICATOR": true, - "IND_PARTITION": true, - "INITIAL": true, - "INITIALLY": true, - "INITRANS": true, - "INSERT": true, - "INSTANCE": true, - "INSTANCES": true, - "INSTEAD": true, - "INT": true, - "INTEGER": true, - "INTERMEDIATE": true, - "INTERSECT": true, - "INTO": true, - "IS": true, - "ISOLATION": true, - "ISOLATION_LEVEL": true, - "KEEP": true, - "KEY": true, - "KILL": true, - "LABEL": true, - "LAYER": true, - "LESS": true, - "LEVEL": true, - "LIBRARY": true, - "LIKE": true, - "LIMIT": true, - "LINK": true, - "LIST": true, - "LOB": true, - "LOCAL": true, - "LOCK": true, - "LOCKED": true, - "LOG": true, - "LOGFILE": true, - "LOGGING": true, - "LOGICAL_READS_PER_CALL": true, - "LOGICAL_READS_PER_SESSION": true, - "LONG": true, - "MANAGE": true, - "MASTER": true, - "MAX": true, - "MAXARCHLOGS": true, - "MAXDATAFILES": true, - "MAXEXTENTS": true, - "MAXINSTANCES": true, - "MAXLOGFILES": true, - "MAXLOGHISTORY": true, - "MAXLOGMEMBERS": true, - "MAXSIZE": true, - "MAXTRANS": true, - "MAXVALUE": true, - "MIN": true, - "MEMBER": true, - "MINIMUM": true, - "MINEXTENTS": true, - "MINUS": true, - "MINVALUE": true, - "MLSLABEL": true, - "MLS_LABEL_FORMAT": true, - "MODE": true, - "MODIFY": true, - "MOUNT": true, - "MOVE": true, - "MTS_DISPATCHERS": true, - "MULTISET": true, - "NATIONAL": true, - "NCHAR": true, - "NCHAR_CS": true, - "NCLOB": true, - "NEEDED": true, - "NESTED": true, - "NETWORK": true, - "NEW": true, - "NEXT": true, - "NOARCHIVELOG": true, - "NOAUDIT": true, - "NOCACHE": true, - "NOCOMPRESS": true, - "NOCYCLE": true, - "NOFORCE": true, - "NOLOGGING": true, - "NOMAXVALUE": true, - "NOMINVALUE": true, - "NONE": true, - "NOORDER": true, - "NOOVERRIDE": true, - "NOPARALLEL": true, - "NOREVERSE": true, - "NORMAL": true, - "NOSORT": true, - "NOT": true, - "NOTHING": true, - "NOWAIT": true, - "NULL": true, - "NUMBER": true, - "NUMERIC": true, - "NVARCHAR2": true, - "OBJECT": true, - "OBJNO": true, - "OBJNO_REUSE": true, - "OF": true, - "OFF": true, - "OFFLINE": true, - "OID": true, - "OIDINDEX": true, - "OLD": true, - "ON": true, - "ONLINE": true, - "ONLY": true, - "OPCODE": true, - "OPEN": true, - "OPTIMAL": true, - "OPTIMIZER_GOAL": true, - "OPTION": true, - "OR": true, - "ORDER": true, - "ORGANIZATION": true, - "OSLABEL": true, - "OVERFLOW": true, - "OWN": true, - "PACKAGE": true, - "PARALLEL": true, - "PARTITION": true, - "PASSWORD": true, - "PASSWORD_GRACE_TIME": true, - "PASSWORD_LIFE_TIME": true, - "PASSWORD_LOCK_TIME": true, - "PASSWORD_REUSE_MAX": true, - "PASSWORD_REUSE_TIME": true, - "PASSWORD_VERIFY_FUNCTION": true, - "PCTFREE": true, - "PCTINCREASE": true, - "PCTTHRESHOLD": true, - "PCTUSED": true, - "PCTVERSION": true, - "PERCENT": true, - "PERMANENT": true, - "PLAN": true, - "PLSQL_DEBUG": true, - "POST_TRANSACTION": true, - "PRECISION": true, - "PRESERVE": true, - "PRIMARY": true, - "PRIOR": true, - "PRIVATE": true, - "PRIVATE_SGA": true, - "PRIVILEGE": true, - "PRIVILEGES": true, - "PROCEDURE": true, - "PROFILE": true, - "PUBLIC": true, - "PURGE": true, - "QUEUE": true, - "QUOTA": true, - "RANGE": true, - "RAW": true, - "RBA": true, - "READ": true, - "READUP": true, - "REAL": true, - "REBUILD": true, - "RECOVER": true, - "RECOVERABLE": true, - "RECOVERY": true, - "REF": true, - "REFERENCES": true, - "REFERENCING": true, - "REFRESH": true, - "RENAME": true, - "REPLACE": true, - "RESET": true, - "RESETLOGS": true, - "RESIZE": true, - "RESOURCE": true, - "RESTRICTED": true, - "RETURN": true, - "RETURNING": true, - "REUSE": true, - "REVERSE": true, - "REVOKE": true, - "ROLE": true, - "ROLES": true, - "ROLLBACK": true, - "ROW": true, - "ROWID": true, - "ROWNUM": true, - "ROWS": true, - "RULE": true, - "SAMPLE": true, - "SAVEPOINT": true, - "SB4": true, - "SCAN_INSTANCES": true, - "SCHEMA": true, - "SCN": true, - "SCOPE": true, - "SD_ALL": true, - "SD_INHIBIT": true, - "SD_SHOW": true, - "SEGMENT": true, - "SEG_BLOCK": true, - "SEG_FILE": true, - "SELECT": true, - "SEQUENCE": true, - "SERIALIZABLE": true, - "SESSION": true, - "SESSION_CACHED_CURSORS": true, - "SESSIONS_PER_USER": true, - "SET": true, - "SHARE": true, - "SHARED": true, - "SHARED_POOL": true, - "SHRINK": true, - "SIZE": true, - "SKIP": true, - "SKIP_UNUSABLE_INDEXES": true, - "SMALLINT": true, - "SNAPSHOT": true, - "SOME": true, - "SORT": true, - "SPECIFICATION": true, - "SPLIT": true, - "SQL_TRACE": true, - "STANDBY": true, - "START": true, - "STATEMENT_ID": true, - "STATISTICS": true, - "STOP": true, - "STORAGE": true, - "STORE": true, - "STRUCTURE": true, - "SUCCESSFUL": true, - "SWITCH": true, - "SYS_OP_ENFORCE_NOT_NULL$": true, - "SYS_OP_NTCIMG$": true, - "SYNONYM": true, - "SYSDATE": true, - "SYSDBA": true, - "SYSOPER": true, - "SYSTEM": true, - "TABLE": true, - "TABLES": true, - "TABLESPACE": true, - "TABLESPACE_NO": true, - "TABNO": true, - "TEMPORARY": true, - "THAN": true, - "THE": true, - "THEN": true, - "THREAD": true, - "TIMESTAMP": true, - "TIME": true, - "TO": true, - "TOPLEVEL": true, - "TRACE": true, - "TRACING": true, - "TRANSACTION": true, - "TRANSITIONAL": true, - "TRIGGER": true, - "TRIGGERS": true, - "TRUE": true, - "TRUNCATE": true, - "TX": true, - "TYPE": true, - "UB2": true, - "UBA": true, - "UID": true, - "UNARCHIVED": true, - "UNDO": true, - "UNION": true, - "UNIQUE": true, - "UNLIMITED": true, - "UNLOCK": true, - "UNRECOVERABLE": true, - "UNTIL": true, - "UNUSABLE": true, - "UNUSED": true, - "UPDATABLE": true, - "UPDATE": true, - "USAGE": true, - "USE": true, - "USER": true, - "USING": true, - "VALIDATE": true, - "VALIDATION": true, - "VALUE": true, - "VALUES": true, - "VARCHAR": true, - "VARCHAR2": true, - "VARYING": true, - "VIEW": true, - "WHEN": true, - "WHENEVER": true, - "WHERE": true, - "WITH": true, - "WITHOUT": true, - "WORK": true, - "WRITE": true, - "WRITEDOWN": true, - "WRITEUP": true, - "XID": true, - "YEAR": true, - "ZONE": true, - } - - damengQuoter = schemas.Quoter{ - Prefix: '"', - Suffix: '"', - IsReserved: schemas.AlwaysReserve, - } -) - -type dameng struct { - Base -} - -func (db *dameng) Init(uri *URI) error { - db.quoter = damengQuoter - return db.Base.Init(db, uri) -} - -func (db *dameng) Version(ctx context.Context, queryer core.Queryer) (*schemas.Version, error) { - rows, err := queryer.QueryContext(ctx, "SELECT * FROM V$VERSION") // select id_code - if err != nil { - return nil, err - } - defer rows.Close() - - var version string - if !rows.Next() { - if rows.Err() != nil { - return nil, rows.Err() - } - return nil, errors.New("unknow version") - } - - if err := rows.Scan(&version); err != nil { - return nil, err - } - return &schemas.Version{ - Number: version, - }, nil -} - -func (db *dameng) Features() *DialectFeatures { - return &DialectFeatures{ - AutoincrMode: SequenceAutoincrMode, - } -} - -// DropIndexSQL returns a SQL to drop index -func (db *dameng) DropIndexSQL(tableName string, index *schemas.Index) string { - quote := db.dialect.Quoter().Quote - var name string - if index.IsRegular { - name = index.XName(tableName) - } else { - name = index.Name - } - return fmt.Sprintf("DROP INDEX %v", quote(name)) -} - -func (db *dameng) SQLType(c *schemas.Column) string { - var res string - switch t := c.SQLType.Name; t { - case schemas.TinyInt, "BYTE": - return "TINYINT" - case schemas.SmallInt, schemas.MediumInt, schemas.Int, schemas.Integer, schemas.UnsignedTinyInt: - return "INTEGER" - case schemas.BigInt, - schemas.UnsignedBigInt, schemas.UnsignedBit, schemas.UnsignedInt, - schemas.Serial, schemas.BigSerial: - return "BIGINT" - case schemas.Bit, schemas.Bool, schemas.Boolean: - return schemas.Bit - case schemas.Uuid: - res = schemas.Varchar - c.Length = 40 - case schemas.Binary: - if c.Length == 0 { - return schemas.Binary + "(MAX)" - } - case schemas.VarBinary, schemas.Blob, schemas.TinyBlob, schemas.MediumBlob, schemas.LongBlob, schemas.Bytea: - return schemas.VarBinary - case schemas.Date: - return schemas.Date - case schemas.Time: - if c.Length > 0 { - return fmt.Sprintf("%s(%d)", schemas.Time, c.Length) - } - return schemas.Time - case schemas.DateTime, schemas.TimeStamp: - res = schemas.TimeStamp - case schemas.TimeStampz: - if c.Length > 0 { - return fmt.Sprintf("TIMESTAMP(%d) WITH TIME ZONE", c.Length) - } - return "TIMESTAMP WITH TIME ZONE" - case schemas.Float: - res = "FLOAT" - case schemas.Real, schemas.Double: - res = "REAL" - case schemas.Numeric, schemas.Decimal, "NUMBER": - res = "NUMERIC" - case schemas.Text, schemas.Json: - return "TEXT" - case schemas.MediumText, schemas.LongText: - res = "CLOB" - case schemas.Char, schemas.Varchar, schemas.TinyText: - res = "VARCHAR2" - default: - res = t - } - - hasLen1 := (c.Length > 0) - hasLen2 := (c.Length2 > 0) - - if hasLen2 { - res += "(" + strconv.FormatInt(c.Length, 10) + "," + strconv.FormatInt(c.Length2, 10) + ")" - } else if hasLen1 { - res += "(" + strconv.FormatInt(c.Length, 10) + ")" - } - return res -} - -func (db *dameng) ColumnTypeKind(t string) int { - switch strings.ToUpper(t) { - case "DATE": - return schemas.TIME_TYPE - case "CHAR", "NCHAR", "VARCHAR", "VARCHAR2", "NVARCHAR2", "LONG", "CLOB", "NCLOB": - return schemas.TEXT_TYPE - case "NUMBER": - return schemas.NUMERIC_TYPE - case "BLOB": - return schemas.BLOB_TYPE - default: - return schemas.UNKNOW_TYPE - } -} - -func (db *dameng) AutoIncrStr() string { - return "IDENTITY" -} - -func (db *dameng) IsReserved(name string) bool { - _, ok := damengReservedWords[strings.ToUpper(name)] - return ok -} - -func (db *dameng) DropTableSQL(tableName string) (string, bool) { - return fmt.Sprintf("DROP TABLE %s", db.quoter.Quote(tableName)), false -} - -// ModifyColumnSQL returns a SQL to modify SQL -func (db *dameng) ModifyColumnSQL(tableName string, col *schemas.Column) string { - s, _ := ColumnString(db.dialect, col, false) - return fmt.Sprintf("ALTER TABLE %s MODIFY %s", db.quoter.Quote(tableName), s) -} - -func (db *dameng) CreateTableSQL(ctx context.Context, queryer core.Queryer, table *schemas.Table, tableName string) (string, bool, error) { - if tableName == "" { - tableName = table.Name - } - - quoter := db.Quoter() - var b strings.Builder - if _, err := b.WriteString("CREATE TABLE "); err != nil { - return "", false, err - } - if err := quoter.QuoteTo(&b, tableName); err != nil { - return "", false, err - } - if _, err := b.WriteString(" ("); err != nil { - return "", false, err - } - - pkList := table.PrimaryKeys - - for i, colName := range table.ColumnsSeq() { - col := table.GetColumn(colName) - if col.SQLType.IsBool() && !col.DefaultIsEmpty { - if col.Default == "true" { - col.Default = "1" - } else if col.Default == "false" { - col.Default = "0" - } - } - - s, _ := ColumnString(db, col, false) - if _, err := b.WriteString(s); err != nil { - return "", false, err - } - if i != len(table.ColumnsSeq())-1 { - if _, err := b.WriteString(", "); err != nil { - return "", false, err - } - } - } - - if len(pkList) > 0 { - if len(table.ColumnsSeq()) > 0 { - if _, err := b.WriteString(", "); err != nil { - return "", false, err - } - } - if _, err := b.WriteString(fmt.Sprintf("CONSTRAINT PK_%s PRIMARY KEY (", tableName)); err != nil { - return "", false, err - } - if err := quoter.JoinWrite(&b, pkList, ","); err != nil { - return "", false, err - } - if _, err := b.WriteString(")"); err != nil { - return "", false, err - } - } - if _, err := b.WriteString(")"); err != nil { - return "", false, err - } - - return b.String(), false, nil -} - -func (db *dameng) SetQuotePolicy(quotePolicy QuotePolicy) { - switch quotePolicy { - case QuotePolicyNone: - q := damengQuoter - q.IsReserved = schemas.AlwaysNoReserve - db.quoter = q - case QuotePolicyReserved: - q := damengQuoter - q.IsReserved = db.IsReserved - db.quoter = q - case QuotePolicyAlways: - fallthrough - default: - db.quoter = damengQuoter - } -} - -func (db *dameng) IndexCheckSQL(tableName, idxName string) (string, []interface{}) { - args := []interface{}{tableName, idxName} - return `SELECT INDEX_NAME FROM USER_INDEXES ` + - `WHERE TABLE_NAME = ? AND INDEX_NAME = ?`, args -} - -func (db *dameng) IsTableExist(queryer core.Queryer, ctx context.Context, tableName string) (bool, error) { - return db.HasRecords(queryer, ctx, `SELECT table_name FROM user_tables WHERE table_name = ?`, tableName) -} - -func (db *dameng) IsSequenceExist(ctx context.Context, queryer core.Queryer, seqName string) (bool, error) { - var cnt int - rows, err := queryer.QueryContext(ctx, "SELECT COUNT(*) FROM user_sequences WHERE sequence_name = ?", seqName) - if err != nil { - return false, err - } - defer rows.Close() - if !rows.Next() { - if rows.Err() != nil { - return false, rows.Err() - } - return false, errors.New("query sequence failed") - } - - if err := rows.Scan(&cnt); err != nil { - return false, err - } - return cnt > 0, nil -} - -func (db *dameng) IsColumnExist(queryer core.Queryer, ctx context.Context, tableName, colName string) (bool, error) { - args := []interface{}{tableName, colName} - query := "SELECT column_name FROM USER_TAB_COLUMNS WHERE table_name = ?" + - " AND column_name = ?" - return db.HasRecords(queryer, ctx, query, args...) -} - -var _ sql.Scanner = &dmClobScanner{} - -type dmClobScanner struct { - valid bool - data string -} - -type dmClobObject interface { - GetLength() (int64, error) - ReadString(int, int) (string, error) -} - -// var _ dmClobObject = &dm.DmClob{} - -func (d *dmClobScanner) Scan(data interface{}) error { - if data == nil { - return nil - } - - switch t := data.(type) { - case dmClobObject: // *dm.DmClob - if t == nil { - return nil - } - l, err := t.GetLength() - if err != nil { - return err - } - if l == 0 { - d.valid = true - return nil - } - d.data, err = t.ReadString(1, int(l)) - if err != nil { - return err - } - d.valid = true - return nil - case []byte: - if t == nil { - return nil - } - d.data = string(t) - d.valid = true - return nil - default: - return fmt.Errorf("cannot convert %T as dmClobScanner", data) - } -} - -func addSingleQuote(name string) string { - if len(name) < 2 { - return name - } - if name[0] == '\'' && name[len(name)-1] == '\'' { - return name - } - return fmt.Sprintf("'%s'", name) -} - -func (db *dameng) GetColumns(queryer core.Queryer, ctx context.Context, tableName string) ([]string, map[string]*schemas.Column, error) { - s := `select column_name from user_cons_columns - where constraint_name = (select constraint_name from user_constraints - where table_name = ? and constraint_type ='P')` - rows, err := queryer.QueryContext(ctx, s, tableName) - if err != nil { - return nil, nil, err - } - defer rows.Close() - - var pkNames []string - for rows.Next() { - var pkName string - err = rows.Scan(&pkName) - if err != nil { - return nil, nil, err - } - pkNames = append(pkNames, pkName) - } - if rows.Err() != nil { - return nil, nil, rows.Err() - } - rows.Close() - - s = `SELECT USER_TAB_COLS.COLUMN_NAME, USER_TAB_COLS.DATA_DEFAULT, USER_TAB_COLS.DATA_TYPE, USER_TAB_COLS.DATA_LENGTH, - USER_TAB_COLS.data_precision, USER_TAB_COLS.data_scale, USER_TAB_COLS.NULLABLE, - user_col_comments.comments - FROM USER_TAB_COLS - LEFT JOIN user_col_comments on user_col_comments.TABLE_NAME=USER_TAB_COLS.TABLE_NAME - AND user_col_comments.COLUMN_NAME=USER_TAB_COLS.COLUMN_NAME - WHERE USER_TAB_COLS.table_name = ?` - rows, err = queryer.QueryContext(ctx, s, tableName) - if err != nil { - return nil, nil, err - } - defer rows.Close() - - cols := make(map[string]*schemas.Column) - colSeq := make([]string, 0) - for rows.Next() { - col := new(schemas.Column) - col.Indexes = make(map[string]int) - - var colDefault dmClobScanner - var colName, nullable, dataType, dataPrecision, comment sql.NullString - var dataScale, dataLen sql.NullInt64 - - err = rows.Scan(&colName, &colDefault, &dataType, &dataLen, &dataPrecision, - &dataScale, &nullable, &comment) - if err != nil { - return nil, nil, err - } - - if !colName.Valid { - return nil, nil, errors.New("column name is nil") - } - - col.Name = strings.Trim(colName.String, `" `) - if colDefault.valid { - col.Default = colDefault.data - } else { - col.DefaultIsEmpty = true - } - - if nullable.String == "Y" { - col.Nullable = true - } else { - col.Nullable = false - } - - if !comment.Valid { - col.Comment = comment.String - } - if utils.IndexSlice(pkNames, col.Name) > -1 { - col.IsPrimaryKey = true - has, err := db.HasRecords(queryer, ctx, "SELECT * FROM USER_SEQUENCES WHERE SEQUENCE_NAME = ?", utils.SeqName(tableName)) - if err != nil { - return nil, nil, err - } - if has { - col.IsAutoIncrement = true - } - } - - var ( - ignore bool - dt string - len1, len2 int64 - ) - - dts := strings.Split(dataType.String, "(") - dt = dts[0] - if len(dts) > 1 { - lens := strings.Split(dts[1][:len(dts[1])-1], ",") - if len(lens) > 1 { - len1, _ = strconv.ParseInt(lens[0], 10, 64) - len2, _ = strconv.ParseInt(lens[1], 10, 64) - } else { - len1, _ = strconv.ParseInt(lens[0], 10, 64) - } - } - - switch dt { - case "VARCHAR2": - col.SQLType = schemas.SQLType{Name: "VARCHAR2", DefaultLength: len1, DefaultLength2: len2} - case "VARCHAR": - col.SQLType = schemas.SQLType{Name: schemas.Varchar, DefaultLength: len1, DefaultLength2: len2} - case "TIMESTAMP WITH TIME ZONE": - col.SQLType = schemas.SQLType{Name: schemas.TimeStampz, DefaultLength: 0, DefaultLength2: 0} - case "NUMBER": - col.SQLType = schemas.SQLType{Name: "NUMBER", DefaultLength: len1, DefaultLength2: len2} - case "LONG", "LONG RAW", "NCLOB", "CLOB", "TEXT": - col.SQLType = schemas.SQLType{Name: schemas.Text, DefaultLength: 0, DefaultLength2: 0} - case "RAW": - col.SQLType = schemas.SQLType{Name: schemas.Binary, DefaultLength: 0, DefaultLength2: 0} - case "ROWID": - col.SQLType = schemas.SQLType{Name: schemas.Varchar, DefaultLength: 18, DefaultLength2: 0} - case "AQ$_SUBSCRIBERS": - ignore = true - default: - col.SQLType = schemas.SQLType{Name: strings.ToUpper(dt), DefaultLength: len1, DefaultLength2: len2} - } - - if ignore { - continue - } - - if _, ok := schemas.SqlTypes[col.SQLType.Name]; !ok { - return nil, nil, fmt.Errorf("unknown colType %v %v", dataType.String, col.SQLType) - } - - if col.SQLType.Name == "TIMESTAMP" { - col.Length = dataScale.Int64 - } else { - col.Length = dataLen.Int64 - } - - if col.SQLType.IsTime() { - if !col.DefaultIsEmpty && !strings.EqualFold(col.Default, "CURRENT_TIMESTAMP") { - col.Default = addSingleQuote(col.Default) - } - } - cols[col.Name] = col - colSeq = append(colSeq, col.Name) - } - if rows.Err() != nil { - return nil, nil, rows.Err() - } - - return colSeq, cols, nil -} - -func (db *dameng) GetTables(queryer core.Queryer, ctx context.Context) ([]*schemas.Table, error) { - s := "SELECT table_name FROM user_tables WHERE temporary = 'N' AND table_name NOT LIKE ?" - args := []interface{}{strings.ToUpper(db.uri.User), "%$%"} - - rows, err := queryer.QueryContext(ctx, s, args...) - if err != nil { - return nil, err - } - defer rows.Close() - - tables := make([]*schemas.Table, 0) - for rows.Next() { - table := schemas.NewEmptyTable() - err = rows.Scan(&table.Name) - if err != nil { - return nil, err - } - - tables = append(tables, table) - } - if rows.Err() != nil { - return nil, rows.Err() - } - return tables, nil -} - -func (db *dameng) GetIndexes(queryer core.Queryer, ctx context.Context, tableName string) (map[string]*schemas.Index, error) { - args := []interface{}{tableName, tableName} - s := "SELECT t.column_name,i.uniqueness,i.index_name FROM user_ind_columns t,user_indexes i " + - "WHERE t.index_name = i.index_name and t.table_name = i.table_name and t.table_name =?" + - " AND t.index_name not in (SELECT index_name FROM ALL_CONSTRAINTS WHERE CONSTRAINT_TYPE='P' AND table_name = ?)" - - rows, err := queryer.QueryContext(ctx, s, args...) - if err != nil { - return nil, err - } - defer rows.Close() - - indexes := make(map[string]*schemas.Index) - for rows.Next() { - var indexType int - var indexName, colName, uniqueness string - - err = rows.Scan(&colName, &uniqueness, &indexName) - if err != nil { - return nil, err - } - - indexName = strings.Trim(indexName, `" `) - - var isRegular bool - if strings.HasPrefix(indexName, "IDX_"+tableName) || strings.HasPrefix(indexName, "UQE_"+tableName) { - indexName = indexName[5+len(tableName):] - isRegular = true - } - - if uniqueness == "UNIQUE" { - indexType = schemas.UniqueType - } else { - indexType = schemas.IndexType - } - - var index *schemas.Index - var ok bool - if index, ok = indexes[indexName]; !ok { - index = new(schemas.Index) - index.Type = indexType - index.Name = indexName - index.IsRegular = isRegular - indexes[indexName] = index - } - index.AddColumn(colName) - } - if rows.Err() != nil { - return nil, rows.Err() - } - return indexes, nil -} - -func (db *dameng) Filters() []Filter { - return []Filter{} -} - -type damengDriver struct { - baseDriver -} - -// Features return features -func (d *damengDriver) Features() *DriverFeatures { - return &DriverFeatures{ - SupportReturnInsertedID: false, - } -} - -// Parse parse the datasource -// dm://userName:password@ip:port -func (d *damengDriver) Parse(driverName, dataSourceName string) (*URI, error) { - u, err := url.Parse(dataSourceName) - if err != nil { - return nil, err - } - - if u.User == nil { - return nil, errors.New("user/password needed") - } - - passwd, _ := u.User.Password() - return &URI{ - DBType: schemas.DAMENG, - Proto: u.Scheme, - Host: u.Hostname(), - Port: u.Port(), - DBName: u.User.Username(), - User: u.User.Username(), - Passwd: passwd, - }, nil -} - -func (d *damengDriver) GenScanResult(colType string) (interface{}, error) { - switch colType { - case "CHAR", "NCHAR", "VARCHAR", "VARCHAR2", "NVARCHAR2", "LONG", "CLOB", "NCLOB": - var s sql.NullString - return &s, nil - case "NUMBER": - var s sql.NullString - return &s, nil - case "BIGINT": - var s sql.NullInt64 - return &s, nil - case "INTEGER": - var s sql.NullInt32 - return &s, nil - case "DATE", "TIMESTAMP": - var s sql.NullString - return &s, nil - case "BLOB": - var r sql.RawBytes - return &r, nil - case "FLOAT": - var s sql.NullFloat64 - return &s, nil - default: - var r sql.RawBytes - return &r, nil - } -} - -func (d *damengDriver) Scan(ctx *ScanContext, rows *core.Rows, types []*sql.ColumnType, vv ...interface{}) error { - scanResults := make([]interface{}, 0, len(types)) - replaces := make([]bool, 0, len(types)) - var err error - for i, v := range vv { - var replaced bool - var scanResult interface{} - switch types[i].DatabaseTypeName() { - case "CLOB", "TEXT": - scanResult = &dmClobScanner{} - replaced = true - case "TIMESTAMP": - scanResult = &sql.NullString{} - replaced = true - default: - scanResult = v - } - - scanResults = append(scanResults, scanResult) - replaces = append(replaces, replaced) - } - - if err = rows.Scan(scanResults...); err != nil { - return err - } - - for i, replaced := range replaces { - if replaced { - switch t := scanResults[i].(type) { - case *dmClobScanner: - var d interface{} - if t.valid { - d = t.data - } else { - d = nil - } - if err := convert.Assign(vv[i], d, ctx.DBLocation, ctx.UserLocation); err != nil { - return err - } - default: - switch types[i].DatabaseTypeName() { - case "TIMESTAMP": - ns := t.(*sql.NullString) - if !ns.Valid { - break - } - s := ns.String - fields := strings.Split(s, "+") - if err := convert.Assign(vv[i], strings.Replace(fields[0], "T", " ", -1), ctx.DBLocation, ctx.UserLocation); err != nil { - return err - } - default: - return fmt.Errorf("don't support convert %T to %T", t, vv[i]) - } - } - } - } - - return nil -} diff --git a/vendor/xorm.io/xorm/dialects/dialect.go b/vendor/xorm.io/xorm/dialects/dialect.go deleted file mode 100644 index 555d96c6..00000000 --- a/vendor/xorm.io/xorm/dialects/dialect.go +++ /dev/null @@ -1,364 +0,0 @@ -// Copyright 2019 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package dialects - -import ( - "context" - "fmt" - "strings" - "time" - - "xorm.io/xorm/core" - "xorm.io/xorm/schemas" -) - -// URI represents an uri to visit database -type URI struct { - DBType schemas.DBType - Proto string - Host string - Port string - DBName string - User string - Passwd string - Charset string - Laddr string - Raddr string - Timeout time.Duration - Schema string -} - -// SetSchema set schema -func (uri *URI) SetSchema(schema string) { - // hack me - if uri.DBType == schemas.POSTGRES { - uri.Schema = strings.TrimSpace(schema) - } -} - -// enumerates all autoincr mode -const ( - IncrAutoincrMode = iota - SequenceAutoincrMode -) - -// DialectFeatures represents a dialect parameters -type DialectFeatures struct { - AutoincrMode int // 0 autoincrement column, 1 sequence -} - -// Dialect represents a kind of database -type Dialect interface { - Init(*URI) error - URI() *URI - Version(ctx context.Context, queryer core.Queryer) (*schemas.Version, error) - Features() *DialectFeatures - - SQLType(*schemas.Column) string - Alias(string) string // return what a sql type's alias of - ColumnTypeKind(string) int // database column type kind - - IsReserved(string) bool - Quoter() schemas.Quoter - SetQuotePolicy(quotePolicy QuotePolicy) - - AutoIncrStr() string - - GetIndexes(queryer core.Queryer, ctx context.Context, tableName string) (map[string]*schemas.Index, error) - IndexCheckSQL(tableName, idxName string) (string, []interface{}) - CreateIndexSQL(tableName string, index *schemas.Index) string - DropIndexSQL(tableName string, index *schemas.Index) string - - GetTables(queryer core.Queryer, ctx context.Context) ([]*schemas.Table, error) - IsTableExist(queryer core.Queryer, ctx context.Context, tableName string) (bool, error) - CreateTableSQL(ctx context.Context, queryer core.Queryer, table *schemas.Table, tableName string) (string, bool, error) - DropTableSQL(tableName string) (string, bool) - - CreateSequenceSQL(ctx context.Context, queryer core.Queryer, seqName string) (string, error) - IsSequenceExist(ctx context.Context, queryer core.Queryer, seqName string) (bool, error) - DropSequenceSQL(seqName string) (string, error) - - GetColumns(queryer core.Queryer, ctx context.Context, tableName string) ([]string, map[string]*schemas.Column, error) - IsColumnExist(queryer core.Queryer, ctx context.Context, tableName string, colName string) (bool, error) - AddColumnSQL(tableName string, col *schemas.Column) string - ModifyColumnSQL(tableName string, col *schemas.Column) string - - ForUpdateSQL(query string) string - - Filters() []Filter - SetParams(params map[string]string) -} - -// Base represents a basic dialect and all real dialects could embed this struct -type Base struct { - dialect Dialect - uri *URI - quoter schemas.Quoter -} - -// Alias returned col itself -func (db *Base) Alias(col string) string { - return col -} - -// Quoter returns the current database Quoter -func (db *Base) Quoter() schemas.Quoter { - return db.quoter -} - -// Init initialize the dialect -func (db *Base) Init(dialect Dialect, uri *URI) error { - db.dialect, db.uri = dialect, uri - return nil -} - -// URI returns the uri of database -func (db *Base) URI() *URI { - return db.uri -} - -// CreateTableSQL implements Dialect -func (db *Base) CreateTableSQL(ctx context.Context, queryer core.Queryer, table *schemas.Table, tableName string) (string, bool, error) { - if tableName == "" { - tableName = table.Name - } - - quoter := db.dialect.Quoter() - var b strings.Builder - b.WriteString("CREATE TABLE IF NOT EXISTS ") - if err := quoter.QuoteTo(&b, tableName); err != nil { - return "", false, err - } - b.WriteString(" (") - - for i, colName := range table.ColumnsSeq() { - col := table.GetColumn(colName) - s, _ := ColumnString(db.dialect, col, col.IsPrimaryKey && len(table.PrimaryKeys) == 1) - b.WriteString(s) - - if i != len(table.ColumnsSeq())-1 { - b.WriteString(", ") - } - } - - if len(table.PrimaryKeys) > 1 { - b.WriteString(", PRIMARY KEY (") - b.WriteString(quoter.Join(table.PrimaryKeys, ",")) - b.WriteString(")") - } - - b.WriteString(")") - - return b.String(), false, nil -} - -func (db *Base) CreateSequenceSQL(ctx context.Context, queryer core.Queryer, seqName string) (string, error) { - return fmt.Sprintf(`CREATE SEQUENCE %s - minvalue 1 - nomaxvalue - start with 1 - increment by 1 - nocycle - nocache`, seqName), nil -} - -func (db *Base) IsSequenceExist(ctx context.Context, queryer core.Queryer, seqName string) (bool, error) { - return false, fmt.Errorf("unsupported sequence feature") -} - -func (db *Base) DropSequenceSQL(seqName string) (string, error) { - return fmt.Sprintf("DROP SEQUENCE %s", seqName), nil -} - -// DropTableSQL returns drop table SQL -func (db *Base) DropTableSQL(tableName string) (string, bool) { - quote := db.dialect.Quoter().Quote - return fmt.Sprintf("DROP TABLE IF EXISTS %s", quote(tableName)), true -} - -// HasRecords returns true if the SQL has records returned -func (db *Base) HasRecords(queryer core.Queryer, ctx context.Context, query string, args ...interface{}) (bool, error) { - rows, err := queryer.QueryContext(ctx, query, args...) - if err != nil { - return false, err - } - defer rows.Close() - - if rows.Next() { - return true, nil - } - return false, rows.Err() -} - -// IsColumnExist returns true if the column of the table exist -func (db *Base) IsColumnExist(queryer core.Queryer, ctx context.Context, tableName, colName string) (bool, error) { - quote := db.dialect.Quoter().Quote - query := fmt.Sprintf( - "SELECT %v FROM %v.%v WHERE %v = ? AND %v = ? AND %v = ?", - quote("COLUMN_NAME"), - quote("INFORMATION_SCHEMA"), - quote("COLUMNS"), - quote("TABLE_SCHEMA"), - quote("TABLE_NAME"), - quote("COLUMN_NAME"), - ) - return db.HasRecords(queryer, ctx, query, db.uri.DBName, tableName, colName) -} - -// AddColumnSQL returns a SQL to add a column -func (db *Base) AddColumnSQL(tableName string, col *schemas.Column) string { - s, _ := ColumnString(db.dialect, col, true) - return fmt.Sprintf("ALTER TABLE %s ADD %s", db.dialect.Quoter().Quote(tableName), s) -} - -// CreateIndexSQL returns a SQL to create index -func (db *Base) CreateIndexSQL(tableName string, index *schemas.Index) string { - quoter := db.dialect.Quoter() - var unique string - var idxName string - if index.Type == schemas.UniqueType { - unique = " UNIQUE" - } - idxName = index.XName(tableName) - return fmt.Sprintf("CREATE%s INDEX %v ON %v (%v)", unique, - quoter.Quote(idxName), quoter.Quote(tableName), - quoter.Join(index.Cols, ",")) -} - -// DropIndexSQL returns a SQL to drop index -func (db *Base) DropIndexSQL(tableName string, index *schemas.Index) string { - quote := db.dialect.Quoter().Quote - var name string - if index.IsRegular { - name = index.XName(tableName) - } else { - name = index.Name - } - return fmt.Sprintf("DROP INDEX %v ON %s", quote(name), quote(tableName)) -} - -// ModifyColumnSQL returns a SQL to modify SQL -func (db *Base) ModifyColumnSQL(tableName string, col *schemas.Column) string { - s, _ := ColumnString(db.dialect, col, false) - return fmt.Sprintf("ALTER TABLE %s MODIFY COLUMN %s", db.quoter.Quote(tableName), s) -} - -// ForUpdateSQL returns for updateSQL -func (db *Base) ForUpdateSQL(query string) string { - return query + " FOR UPDATE" -} - -// SetParams set params -func (db *Base) SetParams(params map[string]string) { -} - -var ( - dialects = map[string]func() Dialect{} -) - -// RegisterDialect register database dialect -func RegisterDialect(dbName schemas.DBType, dialectFunc func() Dialect) { - if dialectFunc == nil { - panic("core: Register dialect is nil") - } - dialects[strings.ToLower(string(dbName))] = dialectFunc // !nashtsai! allow override dialect -} - -// QueryDialect query if registered database dialect -func QueryDialect(dbName schemas.DBType) Dialect { - if d, ok := dialects[strings.ToLower(string(dbName))]; ok { - return d() - } - return nil -} - -func regDrvsNDialects() bool { - providedDrvsNDialects := map[string]struct { - dbType schemas.DBType - getDriver func() Driver - getDialect func() Dialect - }{ - "mssql": {"mssql", func() Driver { return &odbcDriver{} }, func() Dialect { return &mssql{} }}, - "odbc": {"mssql", func() Driver { return &odbcDriver{} }, func() Dialect { return &mssql{} }}, // !nashtsai! TODO change this when supporting MS Access - "mysql": {"mysql", func() Driver { return &mysqlDriver{} }, func() Dialect { return &mysql{} }}, - "mymysql": {"mysql", func() Driver { return &mymysqlDriver{} }, func() Dialect { return &mysql{} }}, - "postgres": {"postgres", func() Driver { return &pqDriver{} }, func() Dialect { return &postgres{} }}, - "pgx": {"postgres", func() Driver { return &pqDriverPgx{} }, func() Dialect { return &postgres{} }}, - "sqlite3": {"sqlite3", func() Driver { return &sqlite3Driver{} }, func() Dialect { return &sqlite3{} }}, - "sqlite": {"sqlite3", func() Driver { return &sqlite3Driver{} }, func() Dialect { return &sqlite3{} }}, - "oci8": {"oracle", func() Driver { return &oci8Driver{} }, func() Dialect { return &oracle{} }}, - "godror": {"oracle", func() Driver { return &godrorDriver{} }, func() Dialect { return &oracle{} }}, - } - - for driverName, v := range providedDrvsNDialects { - if driver := QueryDriver(driverName); driver == nil { - RegisterDriver(driverName, v.getDriver()) - RegisterDialect(v.dbType, v.getDialect) - } - } - return true -} - -func init() { - regDrvsNDialects() -} - -// ColumnString generate column description string according dialect -func ColumnString(dialect Dialect, col *schemas.Column, includePrimaryKey bool) (string, error) { - bd := strings.Builder{} - - if err := dialect.Quoter().QuoteTo(&bd, col.Name); err != nil { - return "", err - } - - if err := bd.WriteByte(' '); err != nil { - return "", err - } - - if _, err := bd.WriteString(dialect.SQLType(col)); err != nil { - return "", err - } - - if includePrimaryKey && col.IsPrimaryKey { - if _, err := bd.WriteString(" PRIMARY KEY"); err != nil { - return "", err - } - if col.IsAutoIncrement { - if err := bd.WriteByte(' '); err != nil { - return "", err - } - if _, err := bd.WriteString(dialect.AutoIncrStr()); err != nil { - return "", err - } - } - } - - if !col.DefaultIsEmpty { - if _, err := bd.WriteString(" DEFAULT "); err != nil { - return "", err - } - if col.Default == "" { - if _, err := bd.WriteString("''"); err != nil { - return "", err - } - } else { - if _, err := bd.WriteString(col.Default); err != nil { - return "", err - } - } - } - - if col.Nullable { - if _, err := bd.WriteString(" NULL"); err != nil { - return "", err - } - } else { - if _, err := bd.WriteString(" NOT NULL"); err != nil { - return "", err - } - } - - return bd.String(), nil -} diff --git a/vendor/xorm.io/xorm/dialects/driver.go b/vendor/xorm.io/xorm/dialects/driver.go deleted file mode 100644 index c63dbfa3..00000000 --- a/vendor/xorm.io/xorm/dialects/driver.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2019 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package dialects - -import ( - "database/sql" - "fmt" - "time" - - "xorm.io/xorm/core" -) - -// ScanContext represents a context when Scan -type ScanContext struct { - DBLocation *time.Location - UserLocation *time.Location -} - -// DriverFeatures represents driver feature -type DriverFeatures struct { - SupportReturnInsertedID bool -} - -// Driver represents a database driver -type Driver interface { - Parse(string, string) (*URI, error) - Features() *DriverFeatures - GenScanResult(string) (interface{}, error) // according given column type generating a suitable scan interface - Scan(*ScanContext, *core.Rows, []*sql.ColumnType, ...interface{}) error -} - -var ( - drivers = map[string]Driver{} -) - -// RegisterDriver register a driver -func RegisterDriver(driverName string, driver Driver) { - if driver == nil { - panic("core: Register driver is nil") - } - if _, dup := drivers[driverName]; dup { - panic("core: Register called twice for driver " + driverName) - } - drivers[driverName] = driver -} - -// QueryDriver query a driver with name -func QueryDriver(driverName string) Driver { - return drivers[driverName] -} - -// RegisteredDriverSize returned all drivers's length -func RegisteredDriverSize() int { - return len(drivers) -} - -// OpenDialect opens a dialect via driver name and connection string -func OpenDialect(driverName, connstr string) (Dialect, error) { - driver := QueryDriver(driverName) - if driver == nil { - return nil, fmt.Errorf("unsupported driver name: %v", driverName) - } - - uri, err := driver.Parse(driverName, connstr) - if err != nil { - return nil, err - } - - dialect := QueryDialect(uri.DBType) - if dialect == nil { - return nil, fmt.Errorf("unsupported dialect type: %v", uri.DBType) - } - - dialect.Init(uri) - - return dialect, nil -} - -type baseDriver struct{} - -func (b *baseDriver) Scan(ctx *ScanContext, rows *core.Rows, types []*sql.ColumnType, v ...interface{}) error { - return rows.Scan(v...) -} diff --git a/vendor/xorm.io/xorm/dialects/filter.go b/vendor/xorm.io/xorm/dialects/filter.go deleted file mode 100644 index bfe2e93e..00000000 --- a/vendor/xorm.io/xorm/dialects/filter.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2019 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package dialects - -import ( - "fmt" - "strings" -) - -// Filter is an interface to filter SQL -type Filter interface { - Do(sql string) string -} - -// SeqFilter filter SQL replace ?, ? ... to $1, $2 ... -type SeqFilter struct { - Prefix string - Start int -} - -func convertQuestionMark(sql, prefix string, start int) string { - var buf strings.Builder - var beginSingleQuote bool - var isLineComment bool - var isComment bool - var isMaybeLineComment bool - var isMaybeComment bool - var isMaybeCommentEnd bool - var index = start - for _, c := range sql { - if !beginSingleQuote && !isLineComment && !isComment && c == '?' { - buf.WriteString(fmt.Sprintf("%s%v", prefix, index)) - index++ - } else { - if isMaybeLineComment { - if c == '-' { - isLineComment = true - } - isMaybeLineComment = false - } else if isMaybeComment { - if c == '*' { - isComment = true - } - isMaybeComment = false - } else if isMaybeCommentEnd { - if c == '/' { - isComment = false - } - isMaybeCommentEnd = false - } else if isLineComment { - if c == '\n' { - isLineComment = false - } - } else if isComment { - if c == '*' { - isMaybeCommentEnd = true - } - } else if !beginSingleQuote && c == '-' { - isMaybeLineComment = true - } else if !beginSingleQuote && c == '/' { - isMaybeComment = true - } else if c == '\'' { - beginSingleQuote = !beginSingleQuote - } - buf.WriteRune(c) - } - } - return buf.String() -} - -// Do implements Filter -func (s *SeqFilter) Do(sql string) string { - return convertQuestionMark(sql, s.Prefix, s.Start) -} diff --git a/vendor/xorm.io/xorm/dialects/mssql.go b/vendor/xorm.io/xorm/dialects/mssql.go deleted file mode 100644 index 1b6fe692..00000000 --- a/vendor/xorm.io/xorm/dialects/mssql.go +++ /dev/null @@ -1,733 +0,0 @@ -// Copyright 2015 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package dialects - -import ( - "context" - "database/sql" - "errors" - "fmt" - "net/url" - "strconv" - "strings" - - "xorm.io/xorm/core" - "xorm.io/xorm/schemas" -) - -var ( - mssqlReservedWords = map[string]bool{ - "ADD": true, - "EXTERNAL": true, - "PROCEDURE": true, - "ALL": true, - "FETCH": true, - "PUBLIC": true, - "ALTER": true, - "FILE": true, - "RAISERROR": true, - "AND": true, - "FILLFACTOR": true, - "READ": true, - "ANY": true, - "FOR": true, - "READTEXT": true, - "AS": true, - "FOREIGN": true, - "RECONFIGURE": true, - "ASC": true, - "FREETEXT": true, - "REFERENCES": true, - "AUTHORIZATION": true, - "FREETEXTTABLE": true, - "REPLICATION": true, - "BACKUP": true, - "FROM": true, - "RESTORE": true, - "BEGIN": true, - "FULL": true, - "RESTRICT": true, - "BETWEEN": true, - "FUNCTION": true, - "RETURN": true, - "BREAK": true, - "GOTO": true, - "REVERT": true, - "BROWSE": true, - "GRANT": true, - "REVOKE": true, - "BULK": true, - "GROUP": true, - "RIGHT": true, - "BY": true, - "HAVING": true, - "ROLLBACK": true, - "CASCADE": true, - "HOLDLOCK": true, - "ROWCOUNT": true, - "CASE": true, - "IDENTITY": true, - "ROWGUIDCOL": true, - "CHECK": true, - "IDENTITY_INSERT": true, - "RULE": true, - "CHECKPOINT": true, - "IDENTITYCOL": true, - "SAVE": true, - "CLOSE": true, - "IF": true, - "SCHEMA": true, - "CLUSTERED": true, - "IN": true, - "SECURITYAUDIT": true, - "COALESCE": true, - "INDEX": true, - "SELECT": true, - "COLLATE": true, - "INNER": true, - "SEMANTICKEYPHRASETABLE": true, - "COLUMN": true, - "INSERT": true, - "SEMANTICSIMILARITYDETAILSTABLE": true, - "COMMIT": true, - "INTERSECT": true, - "SEMANTICSIMILARITYTABLE": true, - "COMPUTE": true, - "INTO": true, - "SESSION_USER": true, - "CONSTRAINT": true, - "IS": true, - "SET": true, - "CONTAINS": true, - "JOIN": true, - "SETUSER": true, - "CONTAINSTABLE": true, - "KEY": true, - "SHUTDOWN": true, - "CONTINUE": true, - "KILL": true, - "SOME": true, - "CONVERT": true, - "LEFT": true, - "STATISTICS": true, - "CREATE": true, - "LIKE": true, - "SYSTEM_USER": true, - "CROSS": true, - "LINENO": true, - "TABLE": true, - "CURRENT": true, - "LOAD": true, - "TABLESAMPLE": true, - "CURRENT_DATE": true, - "MERGE": true, - "TEXTSIZE": true, - "CURRENT_TIME": true, - "NATIONAL": true, - "THEN": true, - "CURRENT_TIMESTAMP": true, - "NOCHECK": true, - "TO": true, - "CURRENT_USER": true, - "NONCLUSTERED": true, - "TOP": true, - "CURSOR": true, - "NOT": true, - "TRAN": true, - "DATABASE": true, - "NULL": true, - "TRANSACTION": true, - "DBCC": true, - "NULLIF": true, - "TRIGGER": true, - "DEALLOCATE": true, - "OF": true, - "TRUNCATE": true, - "DECLARE": true, - "OFF": true, - "TRY_CONVERT": true, - "DEFAULT": true, - "OFFSETS": true, - "TSEQUAL": true, - "DELETE": true, - "ON": true, - "UNION": true, - "DENY": true, - "OPEN": true, - "UNIQUE": true, - "DESC": true, - "OPENDATASOURCE": true, - "UNPIVOT": true, - "DISK": true, - "OPENQUERY": true, - "UPDATE": true, - "DISTINCT": true, - "OPENROWSET": true, - "UPDATETEXT": true, - "DISTRIBUTED": true, - "OPENXML": true, - "USE": true, - "DOUBLE": true, - "OPTION": true, - "USER": true, - "DROP": true, - "OR": true, - "VALUES": true, - "DUMP": true, - "ORDER": true, - "VARYING": true, - "ELSE": true, - "OUTER": true, - "VIEW": true, - "END": true, - "OVER": true, - "WAITFOR": true, - "ERRLVL": true, - "PERCENT": true, - "WHEN": true, - "ESCAPE": true, - "PIVOT": true, - "WHERE": true, - "EXCEPT": true, - "PLAN": true, - "WHILE": true, - "EXEC": true, - "PRECISION": true, - "WITH": true, - "EXECUTE": true, - "PRIMARY": true, - "WITHIN": true, - "EXISTS": true, - "PRINT": true, - "WRITETEXT": true, - "EXIT": true, - "PROC": true, - } - - mssqlQuoter = schemas.Quoter{ - Prefix: '[', - Suffix: ']', - IsReserved: schemas.AlwaysReserve, - } -) - -type mssql struct { - Base - defaultVarchar string - defaultChar string -} - -func (db *mssql) Init(uri *URI) error { - db.quoter = mssqlQuoter - db.defaultChar = "CHAR" - db.defaultVarchar = "VARCHAR" - return db.Base.Init(db, uri) -} - -func (db *mssql) SetParams(params map[string]string) { - defaultVarchar, ok := params["DEFAULT_VARCHAR"] - if ok { - t := strings.ToUpper(defaultVarchar) - switch t { - case "NVARCHAR", "VARCHAR": - db.defaultVarchar = t - default: - db.defaultVarchar = "VARCHAR" - } - } else { - db.defaultVarchar = "VARCHAR" - } - - defaultChar, ok := params["DEFAULT_CHAR"] - if ok { - t := strings.ToUpper(defaultChar) - switch t { - case "NCHAR", "CHAR": - db.defaultChar = t - default: - db.defaultChar = "CHAR" - } - } else { - db.defaultChar = "CHAR" - } -} - -func (db *mssql) Version(ctx context.Context, queryer core.Queryer) (*schemas.Version, error) { - rows, err := queryer.QueryContext(ctx, - "SELECT SERVERPROPERTY('productversion'), SERVERPROPERTY ('productlevel') AS ProductLevel, SERVERPROPERTY ('edition') AS ProductEdition") - if err != nil { - return nil, err - } - defer rows.Close() - - var version, level, edition string - if !rows.Next() { - if rows.Err() != nil { - return nil, rows.Err() - } - return nil, errors.New("unknow version") - } - - if err := rows.Scan(&version, &level, &edition); err != nil { - return nil, err - } - - // MSSQL: Microsoft SQL Server 2017 (RTM-CU13) (KB4466404) - 14.0.3048.4 (X64) Nov 30 2018 12:57:58 Copyright (C) 2017 Microsoft Corporation Developer Edition (64-bit) on Linux (Ubuntu 16.04.5 LTS) - return &schemas.Version{ - Number: version, - Level: level, - Edition: edition, - }, nil -} - -func (db *mssql) Features() *DialectFeatures { - return &DialectFeatures{ - AutoincrMode: IncrAutoincrMode, - } -} - -func (db *mssql) SQLType(c *schemas.Column) string { - var res string - switch t := c.SQLType.Name; t { - case schemas.Bool, schemas.Boolean: - res = schemas.Bit - if strings.EqualFold(c.Default, "true") { - c.Default = "1" - } else if strings.EqualFold(c.Default, "false") { - c.Default = "0" - } - return res - case schemas.Serial: - c.IsAutoIncrement = true - c.IsPrimaryKey = true - c.Nullable = false - res = schemas.Int - case schemas.BigSerial: - c.IsAutoIncrement = true - c.IsPrimaryKey = true - c.Nullable = false - res = schemas.BigInt - case schemas.Bytea, schemas.Binary: - res = schemas.VarBinary - if c.Length == 0 { - c.Length = 50 - } - case schemas.Blob, schemas.TinyBlob, schemas.MediumBlob, schemas.LongBlob: - res = schemas.VarBinary - if c.Length == 0 { - res += "(MAX)" - } - case schemas.TimeStamp, schemas.DateTime: - if c.Length > 3 { - res = "DATETIME2" - } else { - return schemas.DateTime - } - case schemas.TimeStampz: - res = "DATETIMEOFFSET" - c.Length = 7 - case schemas.MediumInt, schemas.TinyInt, schemas.SmallInt, schemas.UnsignedMediumInt, schemas.UnsignedTinyInt, schemas.UnsignedSmallInt: - res = schemas.Int - case schemas.Text, schemas.MediumText, schemas.TinyText, schemas.LongText, schemas.Json: - res = db.defaultVarchar + "(MAX)" - case schemas.Double: - res = schemas.Real - case schemas.Uuid: - res = schemas.Varchar - c.Length = 40 - case schemas.TinyInt: - res = schemas.TinyInt - c.Length = 0 - case schemas.BigInt, schemas.UnsignedBigInt, schemas.UnsignedInt: - res = schemas.BigInt - c.Length = 0 - case schemas.NVarchar: - res = t - if c.Length == -1 { - res += "(MAX)" - } - case schemas.Varchar: - res = db.defaultVarchar - if c.Length == -1 { - res += "(MAX)" - } - case schemas.Char: - res = db.defaultChar - if c.Length == -1 { - res += "(MAX)" - } - case schemas.NChar: - res = t - if c.Length == -1 { - res += "(MAX)" - } - default: - res = t - } - - if res == schemas.Int || res == schemas.Bit { - return res - } - - hasLen1 := (c.Length > 0) - hasLen2 := (c.Length2 > 0) - - if hasLen2 { - res += "(" + strconv.FormatInt(c.Length, 10) + "," + strconv.FormatInt(c.Length2, 10) + ")" - } else if hasLen1 { - res += "(" + strconv.FormatInt(c.Length, 10) + ")" - } - return res -} - -func (db *mssql) ColumnTypeKind(t string) int { - switch strings.ToUpper(t) { - case "DATE", "DATETIME", "DATETIME2", "TIME": - return schemas.TIME_TYPE - case "VARCHAR", "TEXT", "CHAR", "NVARCHAR", "NCHAR", "NTEXT": - return schemas.TEXT_TYPE - case "FLOAT", "REAL", "BIGINT", "DATETIMEOFFSET", "TINYINT", "SMALLINT", "INT": - return schemas.NUMERIC_TYPE - default: - return schemas.UNKNOW_TYPE - } -} - -func (db *mssql) IsReserved(name string) bool { - _, ok := mssqlReservedWords[strings.ToUpper(name)] - return ok -} - -func (db *mssql) SetQuotePolicy(quotePolicy QuotePolicy) { - switch quotePolicy { - case QuotePolicyNone: - q := mssqlQuoter - q.IsReserved = schemas.AlwaysNoReserve - db.quoter = q - case QuotePolicyReserved: - q := mssqlQuoter - q.IsReserved = db.IsReserved - db.quoter = q - case QuotePolicyAlways: - fallthrough - default: - db.quoter = mssqlQuoter - } -} - -func (db *mssql) AutoIncrStr() string { - return "IDENTITY" -} - -func (db *mssql) DropTableSQL(tableName string) (string, bool) { - return fmt.Sprintf("IF EXISTS (SELECT * FROM sysobjects WHERE id = "+ - "object_id(N'%s') and OBJECTPROPERTY(id, N'IsUserTable') = 1) "+ - "DROP TABLE \"%s\"", tableName, tableName), true -} - -func (db *mssql) ModifyColumnSQL(tableName string, col *schemas.Column) string { - s, _ := ColumnString(db.dialect, col, false) - return fmt.Sprintf("ALTER TABLE %s ALTER COLUMN %s", db.quoter.Quote(tableName), s) -} - -func (db *mssql) IndexCheckSQL(tableName, idxName string) (string, []interface{}) { - args := []interface{}{idxName} - sql := "select name from sysindexes where id=object_id('" + tableName + "') and name=?" - return sql, args -} - -func (db *mssql) IsColumnExist(queryer core.Queryer, ctx context.Context, tableName, colName string) (bool, error) { - query := `SELECT "COLUMN_NAME" FROM "INFORMATION_SCHEMA"."COLUMNS" WHERE "TABLE_NAME" = ? AND "COLUMN_NAME" = ?` - - return db.HasRecords(queryer, ctx, query, tableName, colName) -} - -func (db *mssql) IsTableExist(queryer core.Queryer, ctx context.Context, tableName string) (bool, error) { - sql := "select * from sysobjects where id = object_id(N'" + tableName + "') and OBJECTPROPERTY(id, N'IsUserTable') = 1" - return db.HasRecords(queryer, ctx, sql) -} - -func (db *mssql) GetColumns(queryer core.Queryer, ctx context.Context, tableName string) ([]string, map[string]*schemas.Column, error) { - args := []interface{}{} - s := `select a.name as name, b.name as ctype,a.max_length,a.precision,a.scale,a.is_nullable as nullable, - "default_is_null" = (CASE WHEN c.text is null THEN 1 ELSE 0 END), - replace(replace(isnull(c.text,''),'(',''),')','') as vdefault, - ISNULL(p.is_primary_key, 0), a.is_identity as is_identity - from sys.columns a - left join sys.types b on a.user_type_id=b.user_type_id - left join sys.syscomments c on a.default_object_id=c.id - LEFT OUTER JOIN (SELECT i.object_id, ic.column_id, i.is_primary_key - FROM sys.indexes i - LEFT JOIN sys.index_columns ic ON ic.object_id = i.object_id AND ic.index_id = i.index_id - WHERE i.is_primary_key = 1 - ) as p on p.object_id = a.object_id AND p.column_id = a.column_id - where a.object_id=object_id('` + tableName + `')` - - rows, err := queryer.QueryContext(ctx, s, args...) - if err != nil { - return nil, nil, err - } - defer rows.Close() - - cols := make(map[string]*schemas.Column) - colSeq := make([]string, 0) - for rows.Next() { - var name, ctype, vdefault string - var maxLen, precision, scale int64 - var nullable, isPK, defaultIsNull, isIncrement bool - err = rows.Scan(&name, &ctype, &maxLen, &precision, &scale, &nullable, &defaultIsNull, &vdefault, &isPK, &isIncrement) - if err != nil { - return nil, nil, err - } - - col := new(schemas.Column) - col.Indexes = make(map[string]int) - col.Name = strings.Trim(name, "` ") - col.Nullable = nullable - col.DefaultIsEmpty = defaultIsNull - if !defaultIsNull { - col.Default = vdefault - } - col.IsPrimaryKey = isPK - col.IsAutoIncrement = isIncrement - ct := strings.ToUpper(ctype) - if ct == "DECIMAL" { - col.Length = precision - col.Length2 = scale - } else { - col.Length = maxLen - } - switch ct { - case "DATETIMEOFFSET": - col.SQLType = schemas.SQLType{Name: schemas.TimeStampz, DefaultLength: 0, DefaultLength2: 0} - case "NVARCHAR": - col.SQLType = schemas.SQLType{Name: schemas.NVarchar, DefaultLength: 0, DefaultLength2: 0} - if col.Length > 0 { - col.Length /= 2 - col.Length2 /= 2 - } - case "DATETIME2": - col.SQLType = schemas.SQLType{Name: schemas.DateTime, DefaultLength: 7, DefaultLength2: 0} - col.Length = scale - case "DATETIME": - col.SQLType = schemas.SQLType{Name: schemas.DateTime, DefaultLength: 3, DefaultLength2: 0} - col.Length = scale - case "IMAGE": - col.SQLType = schemas.SQLType{Name: schemas.VarBinary, DefaultLength: 0, DefaultLength2: 0} - case "NCHAR": - if col.Length > 0 { - col.Length /= 2 - col.Length2 /= 2 - } - fallthrough - default: - if _, ok := schemas.SqlTypes[ct]; ok { - col.SQLType = schemas.SQLType{Name: ct, DefaultLength: 0, DefaultLength2: 0} - } else { - return nil, nil, fmt.Errorf("Unknown colType %v for %v - %v", ct, tableName, col.Name) - } - } - - cols[col.Name] = col - colSeq = append(colSeq, col.Name) - } - if rows.Err() != nil { - return nil, nil, rows.Err() - } - return colSeq, cols, nil -} - -func (db *mssql) GetTables(queryer core.Queryer, ctx context.Context) ([]*schemas.Table, error) { - args := []interface{}{} - s := `select name from sysobjects where xtype ='U'` - - rows, err := queryer.QueryContext(ctx, s, args...) - if err != nil { - return nil, err - } - defer rows.Close() - - tables := make([]*schemas.Table, 0) - for rows.Next() { - table := schemas.NewEmptyTable() - var name string - err = rows.Scan(&name) - if err != nil { - return nil, err - } - table.Name = strings.Trim(name, "` ") - tables = append(tables, table) - } - if rows.Err() != nil { - return nil, rows.Err() - } - return tables, nil -} - -func (db *mssql) GetIndexes(queryer core.Queryer, ctx context.Context, tableName string) (map[string]*schemas.Index, error) { - args := []interface{}{tableName} - s := `SELECT -IXS.NAME AS [INDEX_NAME], -C.NAME AS [COLUMN_NAME], -IXS.is_unique AS [IS_UNIQUE] -FROM SYS.INDEXES IXS -INNER JOIN SYS.INDEX_COLUMNS IXCS -ON IXS.OBJECT_ID=IXCS.OBJECT_ID AND IXS.INDEX_ID = IXCS.INDEX_ID -INNER JOIN SYS.COLUMNS C ON IXS.OBJECT_ID=C.OBJECT_ID -AND IXCS.COLUMN_ID=C.COLUMN_ID -WHERE IXS.TYPE_DESC='NONCLUSTERED' and OBJECT_NAME(IXS.OBJECT_ID) =? -` - - rows, err := queryer.QueryContext(ctx, s, args...) - if err != nil { - return nil, err - } - defer rows.Close() - - indexes := make(map[string]*schemas.Index) - for rows.Next() { - var indexType int - var indexName, colName, isUnique string - - err = rows.Scan(&indexName, &colName, &isUnique) - if err != nil { - return nil, err - } - - i, err := strconv.ParseBool(isUnique) - if err != nil { - return nil, err - } - - if i { - indexType = schemas.UniqueType - } else { - indexType = schemas.IndexType - } - - colName = strings.Trim(colName, "` ") - var isRegular bool - if (strings.HasPrefix(indexName, "IDX_"+tableName) || strings.HasPrefix(indexName, "UQE_"+tableName)) && len(indexName) > (5+len(tableName)) { - indexName = indexName[5+len(tableName):] - isRegular = true - } - - var index *schemas.Index - var ok bool - if index, ok = indexes[indexName]; !ok { - index = new(schemas.Index) - index.Type = indexType - index.Name = indexName - index.IsRegular = isRegular - indexes[indexName] = index - } - index.AddColumn(colName) - } - if rows.Err() != nil { - return nil, rows.Err() - } - return indexes, nil -} - -func (db *mssql) CreateTableSQL(ctx context.Context, queryer core.Queryer, table *schemas.Table, tableName string) (string, bool, error) { - if tableName == "" { - tableName = table.Name - } - - quoter := db.dialect.Quoter() - var b strings.Builder - b.WriteString("IF NOT EXISTS (SELECT [name] FROM sys.tables WHERE [name] = '") - quoter.QuoteTo(&b, tableName) - b.WriteString("' ) CREATE TABLE ") - quoter.QuoteTo(&b, tableName) - b.WriteString(" (") - - for i, colName := range table.ColumnsSeq() { - col := table.GetColumn(colName) - s, _ := ColumnString(db.dialect, col, col.IsPrimaryKey && len(table.PrimaryKeys) == 1) - b.WriteString(s) - - if i != len(table.ColumnsSeq())-1 { - b.WriteString(", ") - } - } - - if len(table.PrimaryKeys) > 1 { - b.WriteString(", PRIMARY KEY (") - b.WriteString(quoter.Join(table.PrimaryKeys, ",")) - b.WriteString(")") - } - - b.WriteString(")") - - return b.String(), true, nil -} - -func (db *mssql) ForUpdateSQL(query string) string { - return query -} - -func (db *mssql) Filters() []Filter { - return []Filter{} -} - -type odbcDriver struct { - baseDriver -} - -func (p *odbcDriver) Features() *DriverFeatures { - return &DriverFeatures{ - SupportReturnInsertedID: false, - } -} - -func (p *odbcDriver) Parse(driverName, dataSourceName string) (*URI, error) { - var dbName string - - if strings.HasPrefix(dataSourceName, "sqlserver://") { - u, err := url.Parse(dataSourceName) - if err != nil { - return nil, err - } - dbName = u.Query().Get("database") - } else { - kv := strings.Split(dataSourceName, ";") - for _, c := range kv { - vv := strings.Split(strings.TrimSpace(c), "=") - if len(vv) == 2 { - if strings.ToLower(vv[0]) == "database" { - dbName = vv[1] - } - } - } - } - if dbName == "" { - return nil, errors.New("no db name provided") - } - return &URI{DBName: dbName, DBType: schemas.MSSQL}, nil -} - -func (p *odbcDriver) GenScanResult(colType string) (interface{}, error) { - switch colType { - case "VARCHAR", "TEXT", "CHAR", "NVARCHAR", "NCHAR", "NTEXT": - fallthrough - case "DATE", "DATETIME", "DATETIME2", "TIME": - var s sql.NullString - return &s, nil - case "FLOAT", "REAL": - var s sql.NullFloat64 - return &s, nil - case "BIGINT", "DATETIMEOFFSET": - var s sql.NullInt64 - return &s, nil - case "TINYINT", "SMALLINT", "INT": - var s sql.NullInt32 - return &s, nil - - default: - var r sql.RawBytes - return &r, nil - } -} diff --git a/vendor/xorm.io/xorm/dialects/mysql.go b/vendor/xorm.io/xorm/dialects/mysql.go deleted file mode 100644 index 6ed4a1be..00000000 --- a/vendor/xorm.io/xorm/dialects/mysql.go +++ /dev/null @@ -1,823 +0,0 @@ -// Copyright 2015 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package dialects - -import ( - "context" - "database/sql" - "errors" - "fmt" - "regexp" - "strconv" - "strings" - "time" - - "xorm.io/xorm/core" - "xorm.io/xorm/schemas" -) - -var ( - mysqlReservedWords = map[string]bool{ - "ADD": true, - "ALL": true, - "ALTER": true, - "ANALYZE": true, - "AND": true, - "AS": true, - "ASC": true, - "ASENSITIVE": true, - "BEFORE": true, - "BETWEEN": true, - "BIGINT": true, - "BINARY": true, - "BLOB": true, - "BOTH": true, - "BY": true, - "CALL": true, - "CASCADE": true, - "CASE": true, - "CHANGE": true, - "CHAR": true, - "CHARACTER": true, - "CHECK": true, - "COLLATE": true, - "COLUMN": true, - "CONDITION": true, - "CONNECTION": true, - "CONSTRAINT": true, - "CONTINUE": true, - "CONVERT": true, - "CREATE": true, - "CROSS": true, - "CURRENT_DATE": true, - "CURRENT_TIME": true, - "CURRENT_TIMESTAMP": true, - "CURRENT_USER": true, - "CURSOR": true, - "DATABASE": true, - "DATABASES": true, - "DAY_HOUR": true, - "DAY_MICROSECOND": true, - "DAY_MINUTE": true, - "DAY_SECOND": true, - "DEC": true, - "DECIMAL": true, - "DECLARE": true, - "DEFAULT": true, - "DELAYED": true, - "DELETE": true, - "DESC": true, - "DESCRIBE": true, - "DETERMINISTIC": true, - "DISTINCT": true, - "DISTINCTROW": true, - "DIV": true, - "DOUBLE": true, - "DROP": true, - "DUAL": true, - "EACH": true, - "ELSE": true, - "ELSEIF": true, - "ENCLOSED": true, - "ESCAPED": true, - "EXISTS": true, - "EXIT": true, - "EXPLAIN": true, - "FALSE": true, - "FETCH": true, - "FLOAT": true, - "FLOAT4": true, - "FLOAT8": true, - "FOR": true, - "FORCE": true, - "FOREIGN": true, - "FROM": true, - "FULLTEXT": true, - "GOTO": true, - "GRANT": true, - "GROUP": true, - "HAVING": true, - "HIGH_PRIORITY": true, - "HOUR_MICROSECOND": true, - "HOUR_MINUTE": true, - "HOUR_SECOND": true, - "IF": true, - "IGNORE": true, - "IN": true, "INDEX": true, - "INFILE": true, "INNER": true, "INOUT": true, - "INSENSITIVE": true, "INSERT": true, "INT": true, - "INT1": true, "INT2": true, "INT3": true, - "INT4": true, "INT8": true, "INTEGER": true, - "INTERVAL": true, "INTO": true, "IS": true, - "ITERATE": true, "JOIN": true, "KEY": true, - "KEYS": true, "KILL": true, "LABEL": true, - "LEADING": true, "LEAVE": true, "LEFT": true, - "LIKE": true, "LIMIT": true, "LINEAR": true, - "LINES": true, "LOAD": true, "LOCALTIME": true, - "LOCALTIMESTAMP": true, "LOCK": true, "LONG": true, - "LONGBLOB": true, "LONGTEXT": true, "LOOP": true, - "LOW_PRIORITY": true, "MATCH": true, "MEDIUMBLOB": true, - "MEDIUMINT": true, "MEDIUMTEXT": true, "MIDDLEINT": true, - "MINUTE_MICROSECOND": true, "MINUTE_SECOND": true, "MOD": true, - "MODIFIES": true, "NATURAL": true, "NOT": true, - "NO_WRITE_TO_BINLOG": true, "NULL": true, "NUMERIC": true, - "ON OPTIMIZE": true, "OPTION": true, - "OPTIONALLY": true, "OR": true, "ORDER": true, - "OUT": true, "OUTER": true, "OUTFILE": true, - "PRECISION": true, "PRIMARY": true, "PROCEDURE": true, - "PURGE": true, "RAID0": true, "RANGE": true, - "READ": true, "READS": true, "REAL": true, - "REFERENCES": true, "REGEXP": true, "RELEASE": true, - "RENAME": true, "REPEAT": true, "REPLACE": true, - "REQUIRE": true, "RESTRICT": true, "RETURN": true, - "REVOKE": true, "RIGHT": true, "RLIKE": true, - "SCHEMA": true, "SCHEMAS": true, "SECOND_MICROSECOND": true, - "SELECT": true, "SENSITIVE": true, "SEPARATOR": true, - "SET": true, "SHOW": true, "SMALLINT": true, - "SPATIAL": true, "SPECIFIC": true, "SQL": true, - "SQLEXCEPTION": true, "SQLSTATE": true, "SQLWARNING": true, - "SQL_BIG_RESULT": true, "SQL_CALC_FOUND_ROWS": true, "SQL_SMALL_RESULT": true, - "SSL": true, "STARTING": true, "STRAIGHT_JOIN": true, - "TABLE": true, "TERMINATED": true, "THEN": true, - "TINYBLOB": true, "TINYINT": true, "TINYTEXT": true, - "TO": true, "TRAILING": true, "TRIGGER": true, - "TRUE": true, "UNDO": true, "UNION": true, - "UNIQUE": true, "UNLOCK": true, "UNSIGNED": true, - "UPDATE": true, "USAGE": true, "USE": true, - "USING": true, "UTC_DATE": true, "UTC_TIME": true, - "UTC_TIMESTAMP": true, "VALUES": true, "VARBINARY": true, - "VARCHAR": true, - "VARCHARACTER": true, - "VARYING": true, - "WHEN": true, - "WHERE": true, - "WHILE": true, - "WITH": true, - "WRITE": true, - "X509": true, - "XOR": true, - "YEAR_MONTH": true, - "ZEROFILL": true, - } - - mysqlQuoter = schemas.Quoter{ - Prefix: '`', - Suffix: '`', - IsReserved: schemas.AlwaysReserve, - } -) - -type mysql struct { - Base - rowFormat string -} - -func (db *mysql) Init(uri *URI) error { - db.quoter = mysqlQuoter - return db.Base.Init(db, uri) -} - -var mysqlColAliases = map[string]string{ - "numeric": "decimal", -} - -// Alias returns a alias of column -func (db *mysql) Alias(col string) string { - v, ok := mysqlColAliases[strings.ToLower(col)] - if ok { - return v - } - return col -} - -func (db *mysql) Version(ctx context.Context, queryer core.Queryer) (*schemas.Version, error) { - rows, err := queryer.QueryContext(ctx, "SELECT @@VERSION") - if err != nil { - return nil, err - } - defer rows.Close() - - var version string - if !rows.Next() { - if rows.Err() != nil { - return nil, rows.Err() - } - return nil, errors.New("unknow version") - } - - if err := rows.Scan(&version); err != nil { - return nil, err - } - - fields := strings.Split(version, "-") - if len(fields) == 3 && fields[1] == "TiDB" { - // 5.7.25-TiDB-v3.0.3 - return &schemas.Version{ - Number: strings.TrimPrefix(fields[2], "v"), - Level: fields[0], - Edition: fields[1], - }, nil - } - - var edition string - if len(fields) == 2 { - edition = fields[1] - } - - return &schemas.Version{ - Number: fields[0], - Edition: edition, - }, nil -} - -func (db *mysql) Features() *DialectFeatures { - return &DialectFeatures{ - AutoincrMode: IncrAutoincrMode, - } -} - -func (db *mysql) SetParams(params map[string]string) { - rowFormat, ok := params["rowFormat"] - if ok { - t := strings.ToUpper(rowFormat) - switch t { - case "COMPACT": - fallthrough - case "REDUNDANT": - fallthrough - case "DYNAMIC": - fallthrough - case "COMPRESSED": - db.rowFormat = t - } - } -} - -func (db *mysql) SQLType(c *schemas.Column) string { - var res string - var isUnsigned bool - switch t := c.SQLType.Name; t { - case schemas.Bool: - res = schemas.TinyInt - c.Length = 1 - case schemas.Serial: - c.IsAutoIncrement = true - c.IsPrimaryKey = true - c.Nullable = false - res = schemas.Int - case schemas.BigSerial: - c.IsAutoIncrement = true - c.IsPrimaryKey = true - c.Nullable = false - res = schemas.BigInt - case schemas.Bytea: - res = schemas.Blob - case schemas.TimeStampz: - res = schemas.Char - c.Length = 64 - case schemas.Enum: // mysql enum - res = schemas.Enum - res += "(" - opts := "" - for v := range c.EnumOptions { - opts += fmt.Sprintf(",'%v'", v) - } - res += strings.TrimLeft(opts, ",") - res += ")" - case schemas.Set: // mysql set - res = schemas.Set - res += "(" - opts := "" - for v := range c.SetOptions { - opts += fmt.Sprintf(",'%v'", v) - } - res += strings.TrimLeft(opts, ",") - res += ")" - case schemas.NVarchar: - res = schemas.Varchar - case schemas.Uuid: - res = schemas.Varchar - c.Length = 40 - case schemas.Json: - res = schemas.Text - case schemas.UnsignedInt: - res = schemas.Int - isUnsigned = true - case schemas.UnsignedBigInt: - res = schemas.BigInt - isUnsigned = true - case schemas.UnsignedMediumInt: - res = schemas.MediumInt - isUnsigned = true - case schemas.UnsignedSmallInt: - res = schemas.SmallInt - isUnsigned = true - case schemas.UnsignedTinyInt: - res = schemas.TinyInt - isUnsigned = true - default: - res = t - } - - hasLen1 := (c.Length > 0) - hasLen2 := (c.Length2 > 0) - - if res == schemas.BigInt && !hasLen1 && !hasLen2 { - c.Length = 20 - hasLen1 = true - } - - if hasLen2 { - res += "(" + strconv.FormatInt(c.Length, 10) + "," + strconv.FormatInt(c.Length2, 10) + ")" - } else if hasLen1 { - res += "(" + strconv.FormatInt(c.Length, 10) + ")" - } - - if isUnsigned { - res += " UNSIGNED" - } - - return res -} - -func (db *mysql) ColumnTypeKind(t string) int { - switch strings.ToUpper(t) { - case "DATETIME": - return schemas.TIME_TYPE - case "CHAR", "VARCHAR", "TINYTEXT", "TEXT", "MEDIUMTEXT", "LONGTEXT", "ENUM", "SET": - return schemas.TEXT_TYPE - case "BIGINT", "TINYINT", "SMALLINT", "MEDIUMINT", "INT", "FLOAT", "REAL", "DOUBLE PRECISION", "DECIMAL", "NUMERIC", "BIT": - return schemas.NUMERIC_TYPE - case "BINARY", "VARBINARY", "TINYBLOB", "BLOB", "MEDIUMBLOB", "LONGBLOB": - return schemas.BLOB_TYPE - default: - return schemas.UNKNOW_TYPE - } -} - -func (db *mysql) IsReserved(name string) bool { - _, ok := mysqlReservedWords[strings.ToUpper(name)] - return ok -} - -func (db *mysql) AutoIncrStr() string { - return "AUTO_INCREMENT" -} - -func (db *mysql) IndexCheckSQL(tableName, idxName string) (string, []interface{}) { - args := []interface{}{db.uri.DBName, tableName, idxName} - sql := "SELECT `INDEX_NAME` FROM `INFORMATION_SCHEMA`.`STATISTICS`" - sql += " WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` = ? AND `INDEX_NAME`=?" - return sql, args -} - -func (db *mysql) IsTableExist(queryer core.Queryer, ctx context.Context, tableName string) (bool, error) { - sql := "SELECT `TABLE_NAME` from `INFORMATION_SCHEMA`.`TABLES` WHERE `TABLE_SCHEMA`=? and `TABLE_NAME`=?" - return db.HasRecords(queryer, ctx, sql, db.uri.DBName, tableName) -} - -func (db *mysql) AddColumnSQL(tableName string, col *schemas.Column) string { - quoter := db.dialect.Quoter() - s, _ := ColumnString(db, col, true) - sql := fmt.Sprintf("ALTER TABLE %v ADD %v", quoter.Quote(tableName), s) - if len(col.Comment) > 0 { - sql += " COMMENT '" + col.Comment + "'" - } - return sql -} - -func (db *mysql) GetColumns(queryer core.Queryer, ctx context.Context, tableName string) ([]string, map[string]*schemas.Column, error) { - args := []interface{}{db.uri.DBName, tableName} - alreadyQuoted := "(INSTR(VERSION(), 'maria') > 0 && " + - "(SUBSTRING_INDEX(VERSION(), '.', 1) > 10 || " + - "(SUBSTRING_INDEX(VERSION(), '.', 1) = 10 && " + - "(SUBSTRING_INDEX(SUBSTRING(VERSION(), 4), '.', 1) > 2 || " + - "(SUBSTRING_INDEX(SUBSTRING(VERSION(), 4), '.', 1) = 2 && " + - "SUBSTRING_INDEX(SUBSTRING(VERSION(), 6), '-', 1) >= 7)))))" - s := "SELECT `COLUMN_NAME`, `IS_NULLABLE`, `COLUMN_DEFAULT`, `COLUMN_TYPE`," + - " `COLUMN_KEY`, `EXTRA`, `COLUMN_COMMENT`, `CHARACTER_MAXIMUM_LENGTH`, " + - alreadyQuoted + " AS NEEDS_QUOTE " + - "FROM `INFORMATION_SCHEMA`.`COLUMNS` WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` = ?" + - " ORDER BY `COLUMNS`.ORDINAL_POSITION ASC" - - rows, err := queryer.QueryContext(ctx, s, args...) - if err != nil { - return nil, nil, err - } - defer rows.Close() - - cols := make(map[string]*schemas.Column) - colSeq := make([]string, 0) - for rows.Next() { - col := new(schemas.Column) - col.Indexes = make(map[string]int) - - var columnName, nullableStr, colType, colKey, extra, comment string - var alreadyQuoted, isUnsigned bool - var colDefault, maxLength *string - err = rows.Scan(&columnName, &nullableStr, &colDefault, &colType, &colKey, &extra, &comment, &maxLength, &alreadyQuoted) - if err != nil { - return nil, nil, err - } - col.Name = strings.Trim(columnName, "` ") - col.Comment = comment - if nullableStr == "YES" { - col.Nullable = true - } - - if colDefault != nil && (!alreadyQuoted || *colDefault != "NULL") { - col.Default = *colDefault - col.DefaultIsEmpty = false - } else { - col.DefaultIsEmpty = true - } - - fields := strings.Fields(colType) - if len(fields) == 2 && fields[1] == "unsigned" { - isUnsigned = true - } - colType = fields[0] - cts := strings.Split(colType, "(") - colName := cts[0] - // Remove the /* mariadb-5.3 */ suffix from coltypes - colName = strings.TrimSuffix(colName, "/* mariadb-5.3 */") - colType = strings.ToUpper(colName) - var len1, len2 int64 - if len(cts) == 2 { - idx := strings.Index(cts[1], ")") - if colType == schemas.Enum && cts[1][0] == '\'' { // enum - options := strings.Split(cts[1][0:idx], ",") - col.EnumOptions = make(map[string]int) - for k, v := range options { - v = strings.TrimSpace(v) - v = strings.Trim(v, "'") - col.EnumOptions[v] = k - } - } else if colType == schemas.Set && cts[1][0] == '\'' { - options := strings.Split(cts[1][0:idx], ",") - col.SetOptions = make(map[string]int) - for k, v := range options { - v = strings.TrimSpace(v) - v = strings.Trim(v, "'") - col.SetOptions[v] = k - } - } else { - lens := strings.Split(cts[1][0:idx], ",") - len1, err = strconv.ParseInt(strings.TrimSpace(lens[0]), 10, 64) - if err != nil { - return nil, nil, err - } - if len(lens) == 2 { - len2, err = strconv.ParseInt(lens[1], 10, 64) - if err != nil { - return nil, nil, err - } - } - } - } else { - switch colType { - case "MEDIUMTEXT", "LONGTEXT", "TEXT": - len1, err = strconv.ParseInt(*maxLength, 10, 64) - if err != nil { - return nil, nil, err - } - } - } - if isUnsigned { - colType = "UNSIGNED " + colType - } - col.Length = len1 - col.Length2 = len2 - if _, ok := schemas.SqlTypes[colType]; ok { - col.SQLType = schemas.SQLType{Name: colType, DefaultLength: len1, DefaultLength2: len2} - } else { - return nil, nil, fmt.Errorf("unknown colType %v", colType) - } - - if colKey == "PRI" { - col.IsPrimaryKey = true - } - // if colKey == "UNI" { - // col.is - // } - - if extra == "auto_increment" { - col.IsAutoIncrement = true - } - - if !col.DefaultIsEmpty { - if !alreadyQuoted && col.SQLType.IsText() { - col.Default = "'" + col.Default + "'" - } else if col.SQLType.IsTime() && !alreadyQuoted && col.Default != "CURRENT_TIMESTAMP" { - col.Default = "'" + col.Default + "'" - } - } - cols[col.Name] = col - colSeq = append(colSeq, col.Name) - } - if rows.Err() != nil { - return nil, nil, rows.Err() - } - return colSeq, cols, nil -} - -func (db *mysql) GetTables(queryer core.Queryer, ctx context.Context) ([]*schemas.Table, error) { - args := []interface{}{db.uri.DBName} - s := "SELECT `TABLE_NAME`, `ENGINE`, `AUTO_INCREMENT`, `TABLE_COMMENT` from " + - "`INFORMATION_SCHEMA`.`TABLES` WHERE `TABLE_SCHEMA`=? AND (`ENGINE`='MyISAM' OR `ENGINE` = 'InnoDB' OR `ENGINE` = 'TokuDB')" - - rows, err := queryer.QueryContext(ctx, s, args...) - if err != nil { - return nil, err - } - defer rows.Close() - - tables := make([]*schemas.Table, 0) - for rows.Next() { - table := schemas.NewEmptyTable() - var name, engine string - var autoIncr, comment *string - err = rows.Scan(&name, &engine, &autoIncr, &comment) - if err != nil { - return nil, err - } - - table.Name = name - if comment != nil { - table.Comment = *comment - } - table.StoreEngine = engine - tables = append(tables, table) - } - if rows.Err() != nil { - return nil, rows.Err() - } - return tables, nil -} - -func (db *mysql) SetQuotePolicy(quotePolicy QuotePolicy) { - switch quotePolicy { - case QuotePolicyNone: - q := mysqlQuoter - q.IsReserved = schemas.AlwaysNoReserve - db.quoter = q - case QuotePolicyReserved: - q := mysqlQuoter - q.IsReserved = db.IsReserved - db.quoter = q - case QuotePolicyAlways: - fallthrough - default: - db.quoter = mysqlQuoter - } -} - -func (db *mysql) GetIndexes(queryer core.Queryer, ctx context.Context, tableName string) (map[string]*schemas.Index, error) { - args := []interface{}{db.uri.DBName, tableName} - s := "SELECT `INDEX_NAME`, `NON_UNIQUE`, `COLUMN_NAME` FROM `INFORMATION_SCHEMA`.`STATISTICS` WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` = ? ORDER BY `SEQ_IN_INDEX`" - - rows, err := queryer.QueryContext(ctx, s, args...) - if err != nil { - return nil, err - } - defer rows.Close() - - indexes := make(map[string]*schemas.Index) - for rows.Next() { - var indexType int - var indexName, colName, nonUnique string - err = rows.Scan(&indexName, &nonUnique, &colName) - if err != nil { - return nil, err - } - - if indexName == "PRIMARY" { - continue - } - - if nonUnique == "YES" || nonUnique == "1" { - indexType = schemas.IndexType - } else { - indexType = schemas.UniqueType - } - - colName = strings.Trim(colName, "` ") - var isRegular bool - if strings.HasPrefix(indexName, "IDX_"+tableName) || strings.HasPrefix(indexName, "UQE_"+tableName) { - indexName = indexName[5+len(tableName):] - isRegular = true - } - - var index *schemas.Index - var ok bool - if index, ok = indexes[indexName]; !ok { - index = new(schemas.Index) - index.IsRegular = isRegular - index.Type = indexType - index.Name = indexName - indexes[indexName] = index - } - index.AddColumn(colName) - } - if rows.Err() != nil { - return nil, rows.Err() - } - return indexes, nil -} - -func (db *mysql) CreateTableSQL(ctx context.Context, queryer core.Queryer, table *schemas.Table, tableName string) (string, bool, error) { - if tableName == "" { - tableName = table.Name - } - - quoter := db.dialect.Quoter() - var b strings.Builder - b.WriteString("CREATE TABLE IF NOT EXISTS ") - quoter.QuoteTo(&b, tableName) - b.WriteString(" (") - - for i, colName := range table.ColumnsSeq() { - col := table.GetColumn(colName) - s, _ := ColumnString(db.dialect, col, col.IsPrimaryKey && len(table.PrimaryKeys) == 1) - b.WriteString(s) - - if len(col.Comment) > 0 { - b.WriteString(" COMMENT '") - b.WriteString(col.Comment) - b.WriteString("'") - } - - if i != len(table.ColumnsSeq())-1 { - b.WriteString(", ") - } - } - - if len(table.PrimaryKeys) > 1 { - b.WriteString(", PRIMARY KEY (") - b.WriteString(quoter.Join(table.PrimaryKeys, ",")) - b.WriteString(")") - } - - b.WriteString(")") - - if table.StoreEngine != "" { - b.WriteString(" ENGINE=") - b.WriteString(table.StoreEngine) - } - - charset := table.Charset - if len(charset) == 0 { - charset = db.URI().Charset - } - if len(charset) != 0 { - b.WriteString(" DEFAULT CHARSET ") - b.WriteString(charset) - } - - if db.rowFormat != "" { - b.WriteString(" ROW_FORMAT=") - b.WriteString(db.rowFormat) - } - - if table.Comment != "" { - b.WriteString(" COMMENT='") - b.WriteString(table.Comment) - b.WriteString("'") - } - - return b.String(), true, nil -} - -func (db *mysql) Filters() []Filter { - return []Filter{} -} - -type mysqlDriver struct { - baseDriver -} - -func (p *mysqlDriver) Features() *DriverFeatures { - return &DriverFeatures{ - SupportReturnInsertedID: true, - } -} - -func (p *mysqlDriver) Parse(driverName, dataSourceName string) (*URI, error) { - dsnPattern := regexp.MustCompile( - `^(?:(?P.*?)(?::(?P.*))?@)?` + // [user[:password]@] - `(?:(?P[^\(]*)(?:\((?P[^\)]*)\))?)?` + // [net[(addr)]] - `\/(?P.*?)` + // /dbname - `(?:\?(?P[^\?]*))?$`) // [?param1=value1¶mN=valueN] - matches := dsnPattern.FindStringSubmatch(dataSourceName) - // tlsConfigRegister := make(map[string]*tls.Config) - names := dsnPattern.SubexpNames() - - uri := &URI{DBType: schemas.MYSQL} - - for i, match := range matches { - switch names[i] { - case "dbname": - uri.DBName = match - case "params": - if len(match) > 0 { - kvs := strings.Split(match, "&") - for _, kv := range kvs { - splits := strings.Split(kv, "=") - if len(splits) == 2 { - if splits[0] == "charset" { - uri.Charset = splits[1] - } - } - } - } - } - } - return uri, nil -} - -func (p *mysqlDriver) GenScanResult(colType string) (interface{}, error) { - switch colType { - case "CHAR", "VARCHAR", "TINYTEXT", "TEXT", "MEDIUMTEXT", "LONGTEXT", "ENUM", "SET": - var s sql.NullString - return &s, nil - case "BIGINT": - var s sql.NullInt64 - return &s, nil - case "TINYINT", "SMALLINT", "MEDIUMINT", "INT": - var s sql.NullInt32 - return &s, nil - case "FLOAT", "REAL", "DOUBLE PRECISION", "DOUBLE": - var s sql.NullFloat64 - return &s, nil - case "DECIMAL", "NUMERIC": - var s sql.NullString - return &s, nil - case "DATETIME", "TIMESTAMP": - var s sql.NullTime - return &s, nil - case "BIT": - var s sql.RawBytes - return &s, nil - case "BINARY", "VARBINARY", "TINYBLOB", "BLOB", "MEDIUMBLOB", "LONGBLOB": - var r sql.RawBytes - return &r, nil - default: - var r sql.RawBytes - return &r, nil - } -} - -type mymysqlDriver struct { - mysqlDriver -} - -func (p *mymysqlDriver) Parse(driverName, dataSourceName string) (*URI, error) { - uri := &URI{DBType: schemas.MYSQL} - - pd := strings.SplitN(dataSourceName, "*", 2) - if len(pd) == 2 { - // Parse protocol part of URI - p := strings.SplitN(pd[0], ":", 2) - if len(p) != 2 { - return nil, errors.New("wrong protocol part of URI") - } - uri.Proto = p[0] - options := strings.Split(p[1], ",") - uri.Raddr = options[0] - for _, o := range options[1:] { - kv := strings.SplitN(o, "=", 2) - var k, v string - if len(kv) == 2 { - k, v = kv[0], kv[1] - } else { - k, v = o, "true" - } - switch k { - case "laddr": - uri.Laddr = v - case "timeout": - to, err := time.ParseDuration(v) - if err != nil { - return nil, err - } - uri.Timeout = to - default: - return nil, errors.New("unknown option: " + k) - } - } - // Remove protocol part - pd = pd[1:] - } - // Parse database part of URI - dup := strings.SplitN(pd[0], "/", 3) - if len(dup) != 3 { - return nil, errors.New("Wrong database part of URI") - } - uri.DBName = dup[0] - uri.User = dup[1] - uri.Passwd = dup[2] - - return uri, nil -} diff --git a/vendor/xorm.io/xorm/dialects/oracle.go b/vendor/xorm.io/xorm/dialects/oracle.go deleted file mode 100644 index 8328ff15..00000000 --- a/vendor/xorm.io/xorm/dialects/oracle.go +++ /dev/null @@ -1,934 +0,0 @@ -// Copyright 2015 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package dialects - -import ( - "context" - "database/sql" - "errors" - "fmt" - "regexp" - "strconv" - "strings" - - "xorm.io/xorm/core" - "xorm.io/xorm/schemas" -) - -var ( - oracleReservedWords = map[string]bool{ - "ACCESS": true, - "ACCOUNT": true, - "ACTIVATE": true, - "ADD": true, - "ADMIN": true, - "ADVISE": true, - "AFTER": true, - "ALL": true, - "ALL_ROWS": true, - "ALLOCATE": true, - "ALTER": true, - "ANALYZE": true, - "AND": true, - "ANY": true, - "ARCHIVE": true, - "ARCHIVELOG": true, - "ARRAY": true, - "AS": true, - "ASC": true, - "AT": true, - "AUDIT": true, - "AUTHENTICATED": true, - "AUTHORIZATION": true, - "AUTOEXTEND": true, - "AUTOMATIC": true, - "BACKUP": true, - "BECOME": true, - "BEFORE": true, - "BEGIN": true, - "BETWEEN": true, - "BFILE": true, - "BITMAP": true, - "BLOB": true, - "BLOCK": true, - "BODY": true, - "BY": true, - "CACHE": true, - "CACHE_INSTANCES": true, - "CANCEL": true, - "CASCADE": true, - "CAST": true, - "CFILE": true, - "CHAINED": true, - "CHANGE": true, - "CHAR": true, - "CHAR_CS": true, - "CHARACTER": true, - "CHECK": true, - "CHECKPOINT": true, - "CHOOSE": true, - "CHUNK": true, - "CLEAR": true, - "CLOB": true, - "CLONE": true, - "CLOSE": true, - "CLOSE_CACHED_OPEN_CURSORS": true, - "CLUSTER": true, - "COALESCE": true, - "COLUMN": true, - "COLUMNS": true, - "COMMENT": true, - "COMMIT": true, - "COMMITTED": true, - "COMPATIBILITY": true, - "COMPILE": true, - "COMPLETE": true, - "COMPOSITE_LIMIT": true, - "COMPRESS": true, - "COMPUTE": true, - "CONNECT": true, - "CONNECT_TIME": true, - "CONSTRAINT": true, - "CONSTRAINTS": true, - "CONTENTS": true, - "CONTINUE": true, - "CONTROLFILE": true, - "CONVERT": true, - "COST": true, - "CPU_PER_CALL": true, - "CPU_PER_SESSION": true, - "CREATE": true, - "CURRENT": true, - "CURRENT_SCHEMA": true, - "CURREN_USER": true, - "CURSOR": true, - "CYCLE": true, - "DANGLING": true, - "DATABASE": true, - "DATAFILE": true, - "DATAFILES": true, - "DATAOBJNO": true, - "DATE": true, - "DBA": true, - "DBHIGH": true, - "DBLOW": true, - "DBMAC": true, - "DEALLOCATE": true, - "DEBUG": true, - "DEC": true, - "DECIMAL": true, - "DECLARE": true, - "DEFAULT": true, - "DEFERRABLE": true, - "DEFERRED": true, - "DEGREE": true, - "DELETE": true, - "DEREF": true, - "DESC": true, - "DIRECTORY": true, - "DISABLE": true, - "DISCONNECT": true, - "DISMOUNT": true, - "DISTINCT": true, - "DISTRIBUTED": true, - "DML": true, - "DOUBLE": true, - "DROP": true, - "DUMP": true, - "EACH": true, - "ELSE": true, - "ENABLE": true, - "END": true, - "ENFORCE": true, - "ENTRY": true, - "ESCAPE": true, - "EXCEPT": true, - "EXCEPTIONS": true, - "EXCHANGE": true, - "EXCLUDING": true, - "EXCLUSIVE": true, - "EXECUTE": true, - "EXISTS": true, - "EXPIRE": true, - "EXPLAIN": true, - "EXTENT": true, - "EXTENTS": true, - "EXTERNALLY": true, - "FAILED_LOGIN_ATTEMPTS": true, - "FALSE": true, - "FAST": true, - "FILE": true, - "FIRST_ROWS": true, - "FLAGGER": true, - "FLOAT": true, - "FLOB": true, - "FLUSH": true, - "FOR": true, - "FORCE": true, - "FOREIGN": true, - "FREELIST": true, - "FREELISTS": true, - "FROM": true, - "FULL": true, - "FUNCTION": true, - "GLOBAL": true, - "GLOBALLY": true, - "GLOBAL_NAME": true, - "GRANT": true, - "GROUP": true, - "GROUPS": true, - "HASH": true, - "HASHKEYS": true, - "HAVING": true, - "HEADER": true, - "HEAP": true, - "IDENTIFIED": true, - "IDGENERATORS": true, - "IDLE_TIME": true, - "IF": true, - "IMMEDIATE": true, - "IN": true, - "INCLUDING": true, - "INCREMENT": true, - "INDEX": true, - "INDEXED": true, - "INDEXES": true, - "INDICATOR": true, - "IND_PARTITION": true, - "INITIAL": true, - "INITIALLY": true, - "INITRANS": true, - "INSERT": true, - "INSTANCE": true, - "INSTANCES": true, - "INSTEAD": true, - "INT": true, - "INTEGER": true, - "INTERMEDIATE": true, - "INTERSECT": true, - "INTO": true, - "IS": true, - "ISOLATION": true, - "ISOLATION_LEVEL": true, - "KEEP": true, - "KEY": true, - "KILL": true, - "LABEL": true, - "LAYER": true, - "LESS": true, - "LEVEL": true, - "LIBRARY": true, - "LIKE": true, - "LIMIT": true, - "LINK": true, - "LIST": true, - "LOB": true, - "LOCAL": true, - "LOCK": true, - "LOCKED": true, - "LOG": true, - "LOGFILE": true, - "LOGGING": true, - "LOGICAL_READS_PER_CALL": true, - "LOGICAL_READS_PER_SESSION": true, - "LONG": true, - "MANAGE": true, - "MASTER": true, - "MAX": true, - "MAXARCHLOGS": true, - "MAXDATAFILES": true, - "MAXEXTENTS": true, - "MAXINSTANCES": true, - "MAXLOGFILES": true, - "MAXLOGHISTORY": true, - "MAXLOGMEMBERS": true, - "MAXSIZE": true, - "MAXTRANS": true, - "MAXVALUE": true, - "MIN": true, - "MEMBER": true, - "MINIMUM": true, - "MINEXTENTS": true, - "MINUS": true, - "MINVALUE": true, - "MLSLABEL": true, - "MLS_LABEL_FORMAT": true, - "MODE": true, - "MODIFY": true, - "MOUNT": true, - "MOVE": true, - "MTS_DISPATCHERS": true, - "MULTISET": true, - "NATIONAL": true, - "NCHAR": true, - "NCHAR_CS": true, - "NCLOB": true, - "NEEDED": true, - "NESTED": true, - "NETWORK": true, - "NEW": true, - "NEXT": true, - "NOARCHIVELOG": true, - "NOAUDIT": true, - "NOCACHE": true, - "NOCOMPRESS": true, - "NOCYCLE": true, - "NOFORCE": true, - "NOLOGGING": true, - "NOMAXVALUE": true, - "NOMINVALUE": true, - "NONE": true, - "NOORDER": true, - "NOOVERRIDE": true, - "NOPARALLEL": true, - "NOREVERSE": true, - "NORMAL": true, - "NOSORT": true, - "NOT": true, - "NOTHING": true, - "NOWAIT": true, - "NULL": true, - "NUMBER": true, - "NUMERIC": true, - "NVARCHAR2": true, - "OBJECT": true, - "OBJNO": true, - "OBJNO_REUSE": true, - "OF": true, - "OFF": true, - "OFFLINE": true, - "OID": true, - "OIDINDEX": true, - "OLD": true, - "ON": true, - "ONLINE": true, - "ONLY": true, - "OPCODE": true, - "OPEN": true, - "OPTIMAL": true, - "OPTIMIZER_GOAL": true, - "OPTION": true, - "OR": true, - "ORDER": true, - "ORGANIZATION": true, - "OSLABEL": true, - "OVERFLOW": true, - "OWN": true, - "PACKAGE": true, - "PARALLEL": true, - "PARTITION": true, - "PASSWORD": true, - "PASSWORD_GRACE_TIME": true, - "PASSWORD_LIFE_TIME": true, - "PASSWORD_LOCK_TIME": true, - "PASSWORD_REUSE_MAX": true, - "PASSWORD_REUSE_TIME": true, - "PASSWORD_VERIFY_FUNCTION": true, - "PCTFREE": true, - "PCTINCREASE": true, - "PCTTHRESHOLD": true, - "PCTUSED": true, - "PCTVERSION": true, - "PERCENT": true, - "PERMANENT": true, - "PLAN": true, - "PLSQL_DEBUG": true, - "POST_TRANSACTION": true, - "PRECISION": true, - "PRESERVE": true, - "PRIMARY": true, - "PRIOR": true, - "PRIVATE": true, - "PRIVATE_SGA": true, - "PRIVILEGE": true, - "PRIVILEGES": true, - "PROCEDURE": true, - "PROFILE": true, - "PUBLIC": true, - "PURGE": true, - "QUEUE": true, - "QUOTA": true, - "RANGE": true, - "RAW": true, - "RBA": true, - "READ": true, - "READUP": true, - "REAL": true, - "REBUILD": true, - "RECOVER": true, - "RECOVERABLE": true, - "RECOVERY": true, - "REF": true, - "REFERENCES": true, - "REFERENCING": true, - "REFRESH": true, - "RENAME": true, - "REPLACE": true, - "RESET": true, - "RESETLOGS": true, - "RESIZE": true, - "RESOURCE": true, - "RESTRICTED": true, - "RETURN": true, - "RETURNING": true, - "REUSE": true, - "REVERSE": true, - "REVOKE": true, - "ROLE": true, - "ROLES": true, - "ROLLBACK": true, - "ROW": true, - "ROWID": true, - "ROWNUM": true, - "ROWS": true, - "RULE": true, - "SAMPLE": true, - "SAVEPOINT": true, - "SB4": true, - "SCAN_INSTANCES": true, - "SCHEMA": true, - "SCN": true, - "SCOPE": true, - "SD_ALL": true, - "SD_INHIBIT": true, - "SD_SHOW": true, - "SEGMENT": true, - "SEG_BLOCK": true, - "SEG_FILE": true, - "SELECT": true, - "SEQUENCE": true, - "SERIALIZABLE": true, - "SESSION": true, - "SESSION_CACHED_CURSORS": true, - "SESSIONS_PER_USER": true, - "SET": true, - "SHARE": true, - "SHARED": true, - "SHARED_POOL": true, - "SHRINK": true, - "SIZE": true, - "SKIP": true, - "SKIP_UNUSABLE_INDEXES": true, - "SMALLINT": true, - "SNAPSHOT": true, - "SOME": true, - "SORT": true, - "SPECIFICATION": true, - "SPLIT": true, - "SQL_TRACE": true, - "STANDBY": true, - "START": true, - "STATEMENT_ID": true, - "STATISTICS": true, - "STOP": true, - "STORAGE": true, - "STORE": true, - "STRUCTURE": true, - "SUCCESSFUL": true, - "SWITCH": true, - "SYS_OP_ENFORCE_NOT_NULL$": true, - "SYS_OP_NTCIMG$": true, - "SYNONYM": true, - "SYSDATE": true, - "SYSDBA": true, - "SYSOPER": true, - "SYSTEM": true, - "TABLE": true, - "TABLES": true, - "TABLESPACE": true, - "TABLESPACE_NO": true, - "TABNO": true, - "TEMPORARY": true, - "THAN": true, - "THE": true, - "THEN": true, - "THREAD": true, - "TIMESTAMP": true, - "TIME": true, - "TO": true, - "TOPLEVEL": true, - "TRACE": true, - "TRACING": true, - "TRANSACTION": true, - "TRANSITIONAL": true, - "TRIGGER": true, - "TRIGGERS": true, - "TRUE": true, - "TRUNCATE": true, - "TX": true, - "TYPE": true, - "UB2": true, - "UBA": true, - "UID": true, - "UNARCHIVED": true, - "UNDO": true, - "UNION": true, - "UNIQUE": true, - "UNLIMITED": true, - "UNLOCK": true, - "UNRECOVERABLE": true, - "UNTIL": true, - "UNUSABLE": true, - "UNUSED": true, - "UPDATABLE": true, - "UPDATE": true, - "USAGE": true, - "USE": true, - "USER": true, - "USING": true, - "VALIDATE": true, - "VALIDATION": true, - "VALUE": true, - "VALUES": true, - "VARCHAR": true, - "VARCHAR2": true, - "VARYING": true, - "VIEW": true, - "WHEN": true, - "WHENEVER": true, - "WHERE": true, - "WITH": true, - "WITHOUT": true, - "WORK": true, - "WRITE": true, - "WRITEDOWN": true, - "WRITEUP": true, - "XID": true, - "YEAR": true, - "ZONE": true, - } - - oracleQuoter = schemas.Quoter{ - Prefix: '"', - Suffix: '"', - IsReserved: schemas.AlwaysReserve, - } -) - -type oracle struct { - Base -} - -func (db *oracle) Init(uri *URI) error { - db.quoter = oracleQuoter - return db.Base.Init(db, uri) -} - -func (db *oracle) Version(ctx context.Context, queryer core.Queryer) (*schemas.Version, error) { - rows, err := queryer.QueryContext(ctx, "select * from v$version where banner like 'Oracle%'") - if err != nil { - return nil, err - } - defer rows.Close() - - var version string - if !rows.Next() { - if rows.Err() != nil { - return nil, rows.Err() - } - return nil, errors.New("unknow version") - } - - if err := rows.Scan(&version); err != nil { - return nil, err - } - return &schemas.Version{ - Number: version, - }, nil -} - -func (db *oracle) Features() *DialectFeatures { - return &DialectFeatures{ - AutoincrMode: SequenceAutoincrMode, - } -} - -func (db *oracle) SQLType(c *schemas.Column) string { - var res string - switch t := c.SQLType.Name; t { - case schemas.Bit, schemas.TinyInt, schemas.SmallInt, schemas.MediumInt, schemas.Int, schemas.Integer, schemas.BigInt, schemas.Bool, schemas.Serial, schemas.BigSerial: - res = "NUMBER" - case schemas.Binary, schemas.VarBinary, schemas.Blob, schemas.TinyBlob, schemas.MediumBlob, schemas.LongBlob, schemas.Bytea: - return schemas.Blob - case schemas.Time, schemas.DateTime, schemas.TimeStamp: - res = schemas.TimeStamp - case schemas.TimeStampz: - res = "TIMESTAMP WITH TIME ZONE" - case schemas.Float, schemas.Double, schemas.Numeric, schemas.Decimal: - res = "NUMBER" - case schemas.Text, schemas.MediumText, schemas.LongText, schemas.Json: - res = "CLOB" - case schemas.Char, schemas.Varchar, schemas.TinyText: - res = "VARCHAR2" - default: - res = t - } - - hasLen1 := (c.Length > 0) - hasLen2 := (c.Length2 > 0) - - if hasLen2 { - res += "(" + strconv.FormatInt(c.Length, 10) + "," + strconv.FormatInt(c.Length2, 10) + ")" - } else if hasLen1 { - res += "(" + strconv.FormatInt(c.Length, 10) + ")" - } - return res -} - -func (db *oracle) ColumnTypeKind(t string) int { - switch strings.ToUpper(t) { - case "DATE": - return schemas.TIME_TYPE - case "CHAR", "NCHAR", "VARCHAR", "VARCHAR2", "NVARCHAR2", "LONG", "CLOB", "NCLOB": - return schemas.TEXT_TYPE - case "NUMBER": - return schemas.NUMERIC_TYPE - case "BLOB": - return schemas.BLOB_TYPE - default: - return schemas.UNKNOW_TYPE - } -} - -func (db *oracle) AutoIncrStr() string { - return "AUTO_INCREMENT" -} - -func (db *oracle) IsReserved(name string) bool { - _, ok := oracleReservedWords[strings.ToUpper(name)] - return ok -} - -func (db *oracle) DropTableSQL(tableName string) (string, bool) { - return fmt.Sprintf("DROP TABLE `%s`", tableName), false -} - -func (db *oracle) CreateTableSQL(ctx context.Context, queryer core.Queryer, table *schemas.Table, tableName string) (string, bool, error) { - sql := "CREATE TABLE " - if tableName == "" { - tableName = table.Name - } - - quoter := db.Quoter() - sql += quoter.Quote(tableName) + " (" - - pkList := table.PrimaryKeys - - for _, colName := range table.ColumnsSeq() { - col := table.GetColumn(colName) - /*if col.IsPrimaryKey && len(pkList) == 1 { - sql += col.String(b.dialect) - } else {*/ - s, _ := ColumnString(db, col, false) - sql += s - // } - sql = strings.TrimSpace(sql) - sql += ", " - } - - if len(pkList) > 0 { - sql += "PRIMARY KEY ( " - sql += quoter.Join(pkList, ",") - sql += " ), " - } - - sql = sql[:len(sql)-2] + ")" - return sql, false, nil -} - -func (db *oracle) SetQuotePolicy(quotePolicy QuotePolicy) { - switch quotePolicy { - case QuotePolicyNone: - q := oracleQuoter - q.IsReserved = schemas.AlwaysNoReserve - db.quoter = q - case QuotePolicyReserved: - q := oracleQuoter - q.IsReserved = db.IsReserved - db.quoter = q - case QuotePolicyAlways: - fallthrough - default: - db.quoter = oracleQuoter - } -} - -func (db *oracle) IndexCheckSQL(tableName, idxName string) (string, []interface{}) { - args := []interface{}{tableName, idxName} - return `SELECT INDEX_NAME FROM USER_INDEXES ` + - `WHERE TABLE_NAME = :1 AND INDEX_NAME = :2`, args -} - -func (db *oracle) IsTableExist(queryer core.Queryer, ctx context.Context, tableName string) (bool, error) { - return db.HasRecords(queryer, ctx, `SELECT table_name FROM user_tables WHERE table_name = :1`, tableName) -} - -func (db *oracle) IsColumnExist(queryer core.Queryer, ctx context.Context, tableName, colName string) (bool, error) { - args := []interface{}{tableName, colName} - query := "SELECT column_name FROM USER_TAB_COLUMNS WHERE table_name = :1" + - " AND column_name = :2" - return db.HasRecords(queryer, ctx, query, args...) -} - -func (db *oracle) GetColumns(queryer core.Queryer, ctx context.Context, tableName string) ([]string, map[string]*schemas.Column, error) { - args := []interface{}{tableName} - s := "SELECT column_name,data_default,data_type,data_length,data_precision,data_scale," + - "nullable FROM USER_TAB_COLUMNS WHERE table_name = :1" - - rows, err := queryer.QueryContext(ctx, s, args...) - if err != nil { - return nil, nil, err - } - defer rows.Close() - - cols := make(map[string]*schemas.Column) - colSeq := make([]string, 0) - for rows.Next() { - col := new(schemas.Column) - col.Indexes = make(map[string]int) - - var colName, colDefault, nullable, dataType, dataPrecision, dataScale *string - var dataLen int64 - - err = rows.Scan(&colName, &colDefault, &dataType, &dataLen, &dataPrecision, - &dataScale, &nullable) - if err != nil { - return nil, nil, err - } - - col.Name = strings.Trim(*colName, `" `) - if colDefault != nil { - col.Default = *colDefault - col.DefaultIsEmpty = false - } - - if *nullable == "Y" { - col.Nullable = true - } else { - col.Nullable = false - } - - var ignore bool - - var dt string - var len1, len2 int64 - dts := strings.Split(*dataType, "(") - dt = dts[0] - if len(dts) > 1 { - lens := strings.Split(dts[1][:len(dts[1])-1], ",") - if len(lens) > 1 { - len1, _ = strconv.ParseInt(lens[0], 10, 64) - len2, _ = strconv.ParseInt(lens[1], 10, 64) - } else { - len1, _ = strconv.ParseInt(lens[0], 10, 64) - } - } - - switch dt { - case "VARCHAR2": - col.SQLType = schemas.SQLType{Name: schemas.Varchar, DefaultLength: len1, DefaultLength2: len2} - case "NVARCHAR2": - col.SQLType = schemas.SQLType{Name: schemas.NVarchar, DefaultLength: len1, DefaultLength2: len2} - case "TIMESTAMP WITH TIME ZONE": - col.SQLType = schemas.SQLType{Name: schemas.TimeStampz, DefaultLength: 0, DefaultLength2: 0} - case "NUMBER": - col.SQLType = schemas.SQLType{Name: schemas.Double, DefaultLength: len1, DefaultLength2: len2} - case "LONG", "LONG RAW": - col.SQLType = schemas.SQLType{Name: schemas.Text, DefaultLength: 0, DefaultLength2: 0} - case "RAW": - col.SQLType = schemas.SQLType{Name: schemas.Binary, DefaultLength: 0, DefaultLength2: 0} - case "ROWID": - col.SQLType = schemas.SQLType{Name: schemas.Varchar, DefaultLength: 18, DefaultLength2: 0} - case "AQ$_SUBSCRIBERS": - ignore = true - default: - col.SQLType = schemas.SQLType{Name: strings.ToUpper(dt), DefaultLength: len1, DefaultLength2: len2} - } - - if ignore { - continue - } - - if _, ok := schemas.SqlTypes[col.SQLType.Name]; !ok { - return nil, nil, fmt.Errorf("Unknown colType %v %v", *dataType, col.SQLType) - } - - col.Length = dataLen - - if col.SQLType.IsText() || col.SQLType.IsTime() { - if !col.DefaultIsEmpty { - col.Default = "'" + col.Default + "'" - } - } - cols[col.Name] = col - colSeq = append(colSeq, col.Name) - } - if rows.Err() != nil { - return nil, nil, rows.Err() - } - - return colSeq, cols, nil -} - -func (db *oracle) GetTables(queryer core.Queryer, ctx context.Context) ([]*schemas.Table, error) { - args := []interface{}{} - s := "SELECT table_name FROM user_tables" - - rows, err := queryer.QueryContext(ctx, s, args...) - if err != nil { - return nil, err - } - defer rows.Close() - - tables := make([]*schemas.Table, 0) - for rows.Next() { - table := schemas.NewEmptyTable() - err = rows.Scan(&table.Name) - if err != nil { - return nil, err - } - - tables = append(tables, table) - } - if rows.Err() != nil { - return nil, rows.Err() - } - return tables, nil -} - -func (db *oracle) GetIndexes(queryer core.Queryer, ctx context.Context, tableName string) (map[string]*schemas.Index, error) { - args := []interface{}{tableName} - s := "SELECT t.column_name,i.uniqueness,i.index_name FROM user_ind_columns t,user_indexes i " + - "WHERE t.index_name = i.index_name and t.table_name = i.table_name and t.table_name =:1" - - rows, err := queryer.QueryContext(ctx, s, args...) - if err != nil { - return nil, err - } - defer rows.Close() - - indexes := make(map[string]*schemas.Index) - for rows.Next() { - var indexType int - var indexName, colName, uniqueness string - - err = rows.Scan(&colName, &uniqueness, &indexName) - if err != nil { - return nil, err - } - - indexName = strings.Trim(indexName, `" `) - - var isRegular bool - if strings.HasPrefix(indexName, "IDX_"+tableName) || strings.HasPrefix(indexName, "UQE_"+tableName) { - indexName = indexName[5+len(tableName):] - isRegular = true - } - - if uniqueness == "UNIQUE" { - indexType = schemas.UniqueType - } else { - indexType = schemas.IndexType - } - - var index *schemas.Index - var ok bool - if index, ok = indexes[indexName]; !ok { - index = new(schemas.Index) - index.Type = indexType - index.Name = indexName - index.IsRegular = isRegular - indexes[indexName] = index - } - index.AddColumn(colName) - } - if rows.Err() != nil { - return nil, rows.Err() - } - return indexes, nil -} - -func (db *oracle) Filters() []Filter { - return []Filter{ - &SeqFilter{Prefix: ":", Start: 1}, - } -} - -type godrorDriver struct { - baseDriver -} - -func (g *godrorDriver) Features() *DriverFeatures { - return &DriverFeatures{ - SupportReturnInsertedID: false, - } -} - -func (g *godrorDriver) Parse(driverName, dataSourceName string) (*URI, error) { - db := &URI{DBType: schemas.ORACLE} - dsnPattern := regexp.MustCompile( - `^(?:(?P.*?)(?::(?P.*))?@)?` + // [user[:password]@] - `(?:(?P[^\(]*)(?:\((?P[^\)]*)\))?)?` + // [net[(addr)]] - `\/(?P.*?)` + // /dbname - `(?:\?(?P[^\?]*))?$`) // [?param1=value1¶mN=valueN] - matches := dsnPattern.FindStringSubmatch(dataSourceName) - // tlsConfigRegister := make(map[string]*tls.Config) - names := dsnPattern.SubexpNames() - - for i, match := range matches { - if names[i] == "dbname" { - db.DBName = match - } - } - if db.DBName == "" { - return nil, errors.New("dbname is empty") - } - return db, nil -} - -func (g *godrorDriver) GenScanResult(colType string) (interface{}, error) { - switch colType { - case "CHAR", "NCHAR", "VARCHAR", "VARCHAR2", "NVARCHAR2", "LONG", "CLOB", "NCLOB": - var s sql.NullString - return &s, nil - case "NUMBER": - var s sql.NullString - return &s, nil - case "DATE": - var s sql.NullTime - return &s, nil - case "BLOB": - var r sql.RawBytes - return &r, nil - default: - var r sql.RawBytes - return &r, nil - } -} - -type oci8Driver struct { - godrorDriver -} - -// dataSourceName=user/password@ipv4:port/dbname -// dataSourceName=user/password@[ipv6]:port/dbname -func (o *oci8Driver) Parse(driverName, dataSourceName string) (*URI, error) { - db := &URI{DBType: schemas.ORACLE} - dsnPattern := regexp.MustCompile( - `^(?P.*)\/(?P.*)@` + // user:password@ - `(?P.*)` + // ip:port - `\/(?P.*)`) // dbname - matches := dsnPattern.FindStringSubmatch(dataSourceName) - names := dsnPattern.SubexpNames() - for i, match := range matches { - if names[i] == "dbname" { - db.DBName = match - } - } - if db.DBName == "" && len(matches) != 0 { - return nil, errors.New("dbname is empty") - } - return db, nil -} diff --git a/vendor/xorm.io/xorm/dialects/pg_reserved.txt b/vendor/xorm.io/xorm/dialects/pg_reserved.txt deleted file mode 100644 index 720ed377..00000000 --- a/vendor/xorm.io/xorm/dialects/pg_reserved.txt +++ /dev/null @@ -1,746 +0,0 @@ -A non-reserved non-reserved -ABORT non-reserved -ABS reserved reserved -ABSENT non-reserved non-reserved -ABSOLUTE non-reserved non-reserved non-reserved reserved -ACCESS non-reserved -ACCORDING non-reserved non-reserved -ACTION non-reserved non-reserved non-reserved reserved -ADA non-reserved non-reserved non-reserved -ADD non-reserved non-reserved non-reserved reserved -ADMIN non-reserved non-reserved non-reserved -AFTER non-reserved non-reserved non-reserved -AGGREGATE non-reserved -ALL reserved reserved reserved reserved -ALLOCATE reserved reserved reserved -ALSO non-reserved -ALTER non-reserved reserved reserved reserved -ALWAYS non-reserved non-reserved non-reserved -ANALYSE reserved -ANALYZE reserved -AND reserved reserved reserved reserved -ANY reserved reserved reserved reserved -ARE reserved reserved reserved -ARRAY reserved reserved reserved -ARRAY_AGG reserved reserved -ARRAY_MAX_CARDINALITY reserved -AS reserved reserved reserved reserved -ASC reserved non-reserved non-reserved reserved -ASENSITIVE reserved reserved -ASSERTION non-reserved non-reserved non-reserved reserved -ASSIGNMENT non-reserved non-reserved non-reserved -ASYMMETRIC reserved reserved reserved -AT non-reserved reserved reserved reserved -ATOMIC reserved reserved -ATTRIBUTE non-reserved non-reserved non-reserved -ATTRIBUTES non-reserved non-reserved -AUTHORIZATION reserved (can be function or type) reserved reserved reserved -AVG reserved reserved reserved -BACKWARD non-reserved -BASE64 non-reserved non-reserved -BEFORE non-reserved non-reserved non-reserved -BEGIN non-reserved reserved reserved reserved -BEGIN_FRAME reserved -BEGIN_PARTITION reserved -BERNOULLI non-reserved non-reserved -BETWEEN non-reserved (cannot be function or type) reserved reserved reserved -BIGINT non-reserved (cannot be function or type) reserved reserved -BINARY reserved (can be function or type) reserved reserved -BIT non-reserved (cannot be function or type) reserved -BIT_LENGTH reserved -BLOB reserved reserved -BLOCKED non-reserved non-reserved -BOM non-reserved non-reserved -BOOLEAN non-reserved (cannot be function or type) reserved reserved -BOTH reserved reserved reserved reserved -BREADTH non-reserved non-reserved -BY non-reserved reserved reserved reserved -C non-reserved non-reserved non-reserved -CACHE non-reserved -CALL reserved reserved -CALLED non-reserved reserved reserved -CARDINALITY reserved reserved -CASCADE non-reserved non-reserved non-reserved reserved -CASCADED non-reserved reserved reserved reserved -CASE reserved reserved reserved reserved -CAST reserved reserved reserved reserved -CATALOG non-reserved non-reserved non-reserved reserved -CATALOG_NAME non-reserved non-reserved non-reserved -CEIL reserved reserved -CEILING reserved reserved -CHAIN non-reserved non-reserved non-reserved -CHAR non-reserved (cannot be function or type) reserved reserved reserved -CHARACTER non-reserved (cannot be function or type) reserved reserved reserved -CHARACTERISTICS non-reserved non-reserved non-reserved -CHARACTERS non-reserved non-reserved -CHARACTER_LENGTH reserved reserved reserved -CHARACTER_SET_CATALOG non-reserved non-reserved non-reserved -CHARACTER_SET_NAME non-reserved non-reserved non-reserved -CHARACTER_SET_SCHEMA non-reserved non-reserved non-reserved -CHAR_LENGTH reserved reserved reserved -CHECK reserved reserved reserved reserved -CHECKPOINT non-reserved -CLASS non-reserved -CLASS_ORIGIN non-reserved non-reserved non-reserved -CLOB reserved reserved -CLOSE non-reserved reserved reserved reserved -CLUSTER non-reserved -COALESCE non-reserved (cannot be function or type) reserved reserved reserved -COBOL non-reserved non-reserved non-reserved -COLLATE reserved reserved reserved reserved -COLLATION reserved (can be function or type) non-reserved non-reserved reserved -COLLATION_CATALOG non-reserved non-reserved non-reserved -COLLATION_NAME non-reserved non-reserved non-reserved -COLLATION_SCHEMA non-reserved non-reserved non-reserved -COLLECT reserved reserved -COLUMN reserved reserved reserved reserved -COLUMNS non-reserved non-reserved -COLUMN_NAME non-reserved non-reserved non-reserved -COMMAND_FUNCTION non-reserved non-reserved non-reserved -COMMAND_FUNCTION_CODE non-reserved non-reserved -COMMENT non-reserved -COMMENTS non-reserved -COMMIT non-reserved reserved reserved reserved -COMMITTED non-reserved non-reserved non-reserved non-reserved -CONCURRENTLY reserved (can be function or type) -CONDITION reserved reserved -CONDITION_NUMBER non-reserved non-reserved non-reserved -CONFIGURATION non-reserved -CONNECT reserved reserved reserved -CONNECTION non-reserved non-reserved non-reserved reserved -CONNECTION_NAME non-reserved non-reserved non-reserved -CONSTRAINT reserved reserved reserved reserved -CONSTRAINTS non-reserved non-reserved non-reserved reserved -CONSTRAINT_CATALOG non-reserved non-reserved non-reserved -CONSTRAINT_NAME non-reserved non-reserved non-reserved -CONSTRAINT_SCHEMA non-reserved non-reserved non-reserved -CONSTRUCTOR non-reserved non-reserved -CONTAINS reserved non-reserved -CONTENT non-reserved non-reserved non-reserved -CONTINUE non-reserved non-reserved non-reserved reserved -CONTROL non-reserved non-reserved -CONVERSION non-reserved -CONVERT reserved reserved reserved -COPY non-reserved -CORR reserved reserved -CORRESPONDING reserved reserved reserved -COST non-reserved -COUNT reserved reserved reserved -COVAR_POP reserved reserved -COVAR_SAMP reserved reserved -CREATE reserved reserved reserved reserved -CROSS reserved (can be function or type) reserved reserved reserved -CSV non-reserved -CUBE reserved reserved -CUME_DIST reserved reserved -CURRENT non-reserved reserved reserved reserved -CURRENT_CATALOG reserved reserved reserved -CURRENT_DATE reserved reserved reserved reserved -CURRENT_DEFAULT_TRANSFORM_GROUP reserved reserved -CURRENT_PATH reserved reserved -CURRENT_ROLE reserved reserved reserved -CURRENT_ROW reserved -CURRENT_SCHEMA reserved (can be function or type) reserved reserved -CURRENT_TIME reserved reserved reserved reserved -CURRENT_TIMESTAMP reserved reserved reserved reserved -CURRENT_TRANSFORM_GROUP_FOR_TYPE reserved reserved -CURRENT_USER reserved reserved reserved reserved -CURSOR non-reserved reserved reserved reserved -CURSOR_NAME non-reserved non-reserved non-reserved -CYCLE non-reserved reserved reserved -DATA non-reserved non-reserved non-reserved non-reserved -DATABASE non-reserved -DATALINK reserved reserved -DATE reserved reserved reserved -DATETIME_INTERVAL_CODE non-reserved non-reserved non-reserved -DATETIME_INTERVAL_PRECISION non-reserved non-reserved non-reserved -DAY non-reserved reserved reserved reserved -DB non-reserved non-reserved -DEALLOCATE non-reserved reserved reserved reserved -DEC non-reserved (cannot be function or type) reserved reserved reserved -DECIMAL non-reserved (cannot be function or type) reserved reserved reserved -DECLARE non-reserved reserved reserved reserved -DEFAULT reserved reserved reserved reserved -DEFAULTS non-reserved non-reserved non-reserved -DEFERRABLE reserved non-reserved non-reserved reserved -DEFERRED non-reserved non-reserved non-reserved reserved -DEFINED non-reserved non-reserved -DEFINER non-reserved non-reserved non-reserved -DEGREE non-reserved non-reserved -DELETE non-reserved reserved reserved reserved -DELIMITER non-reserved -DELIMITERS non-reserved -DENSE_RANK reserved reserved -DEPTH non-reserved non-reserved -DEREF reserved reserved -DERIVED non-reserved non-reserved -DESC reserved non-reserved non-reserved reserved -DESCRIBE reserved reserved reserved -DESCRIPTOR non-reserved non-reserved reserved -DETERMINISTIC reserved reserved -DIAGNOSTICS non-reserved non-reserved reserved -DICTIONARY non-reserved -DISABLE non-reserved -DISCARD non-reserved -DISCONNECT reserved reserved reserved -DISPATCH non-reserved non-reserved -DISTINCT reserved reserved reserved reserved -DLNEWCOPY reserved reserved -DLPREVIOUSCOPY reserved reserved -DLURLCOMPLETE reserved reserved -DLURLCOMPLETEONLY reserved reserved -DLURLCOMPLETEWRITE reserved reserved -DLURLPATH reserved reserved -DLURLPATHONLY reserved reserved -DLURLPATHWRITE reserved reserved -DLURLSCHEME reserved reserved -DLURLSERVER reserved reserved -DLVALUE reserved reserved -DO reserved -DOCUMENT non-reserved non-reserved non-reserved -DOMAIN non-reserved non-reserved non-reserved reserved -DOUBLE non-reserved reserved reserved reserved -DROP non-reserved reserved reserved reserved -DYNAMIC reserved reserved -DYNAMIC_FUNCTION non-reserved non-reserved non-reserved -DYNAMIC_FUNCTION_CODE non-reserved non-reserved -EACH non-reserved reserved reserved -ELEMENT reserved reserved -ELSE reserved reserved reserved reserved -EMPTY non-reserved non-reserved -ENABLE non-reserved -ENCODING non-reserved non-reserved non-reserved -ENCRYPTED non-reserved -END reserved reserved reserved reserved -END-EXEC reserved reserved reserved -END_FRAME reserved -END_PARTITION reserved -ENFORCED non-reserved -ENUM non-reserved -EQUALS reserved non-reserved -ESCAPE non-reserved reserved reserved reserved -EVENT non-reserved -EVERY reserved reserved -EXCEPT reserved reserved reserved reserved -EXCEPTION reserved -EXCLUDE non-reserved non-reserved non-reserved -EXCLUDING non-reserved non-reserved non-reserved -EXCLUSIVE non-reserved -EXEC reserved reserved reserved -EXECUTE non-reserved reserved reserved reserved -EXISTS non-reserved (cannot be function or type) reserved reserved reserved -EXP reserved reserved -EXPLAIN non-reserved -EXPRESSION non-reserved -EXTENSION non-reserved -EXTERNAL non-reserved reserved reserved reserved -EXTRACT non-reserved (cannot be function or type) reserved reserved reserved -FALSE reserved reserved reserved reserved -FAMILY non-reserved -FETCH reserved reserved reserved reserved -FILE non-reserved non-reserved -FILTER reserved reserved -FINAL non-reserved non-reserved -FIRST non-reserved non-reserved non-reserved reserved -FIRST_VALUE reserved reserved -FLAG non-reserved non-reserved -FLOAT non-reserved (cannot be function or type) reserved reserved reserved -FLOOR reserved reserved -FOLLOWING non-reserved non-reserved non-reserved -FOR reserved reserved reserved reserved -FORCE non-reserved -FOREIGN reserved reserved reserved reserved -FORTRAN non-reserved non-reserved non-reserved -FORWARD non-reserved -FOUND non-reserved non-reserved reserved -FRAME_ROW reserved -FREE reserved reserved -FREEZE reserved (can be function or type) -FROM reserved reserved reserved reserved -FS non-reserved non-reserved -FULL reserved (can be function or type) reserved reserved reserved -FUNCTION non-reserved reserved reserved -FUNCTIONS non-reserved -FUSION reserved reserved -G non-reserved non-reserved -GENERAL non-reserved non-reserved -GENERATED non-reserved non-reserved -GET reserved reserved reserved -GLOBAL non-reserved reserved reserved reserved -GO non-reserved non-reserved reserved -GOTO non-reserved non-reserved reserved -GRANT reserved reserved reserved reserved -GRANTED non-reserved non-reserved non-reserved -GREATEST non-reserved (cannot be function or type) -GROUP reserved reserved reserved reserved -GROUPING reserved reserved -GROUPS reserved -HANDLER non-reserved -HAVING reserved reserved reserved reserved -HEADER non-reserved -HEX non-reserved non-reserved -HIERARCHY non-reserved non-reserved -HOLD non-reserved reserved reserved -HOUR non-reserved reserved reserved reserved -ID non-reserved non-reserved -IDENTITY non-reserved reserved reserved reserved -IF non-reserved -IGNORE non-reserved non-reserved -ILIKE reserved (can be function or type) -IMMEDIATE non-reserved non-reserved non-reserved reserved -IMMEDIATELY non-reserved -IMMUTABLE non-reserved -IMPLEMENTATION non-reserved non-reserved -IMPLICIT non-reserved -IMPORT reserved reserved -IN reserved reserved reserved reserved -INCLUDING non-reserved non-reserved non-reserved -INCREMENT non-reserved non-reserved non-reserved -INDENT non-reserved non-reserved -INDEX non-reserved -INDEXES non-reserved -INDICATOR reserved reserved reserved -INHERIT non-reserved -INHERITS non-reserved -INITIALLY reserved non-reserved non-reserved reserved -INLINE non-reserved -INNER reserved (can be function or type) reserved reserved reserved -INOUT non-reserved (cannot be function or type) reserved reserved -INPUT non-reserved non-reserved non-reserved reserved -INSENSITIVE non-reserved reserved reserved reserved -INSERT non-reserved reserved reserved reserved -INSTANCE non-reserved non-reserved -INSTANTIABLE non-reserved non-reserved -INSTEAD non-reserved non-reserved non-reserved -INT non-reserved (cannot be function or type) reserved reserved reserved -INTEGER non-reserved (cannot be function or type) reserved reserved reserved -INTEGRITY non-reserved non-reserved -INTERSECT reserved reserved reserved reserved -INTERSECTION reserved reserved -INTERVAL non-reserved (cannot be function or type) reserved reserved reserved -INTO reserved reserved reserved reserved -INVOKER non-reserved non-reserved non-reserved -IS reserved (can be function or type) reserved reserved reserved -ISNULL reserved (can be function or type) -ISOLATION non-reserved non-reserved non-reserved reserved -JOIN reserved (can be function or type) reserved reserved reserved -K non-reserved non-reserved -KEY non-reserved non-reserved non-reserved reserved -KEY_MEMBER non-reserved non-reserved -KEY_TYPE non-reserved non-reserved -LABEL non-reserved -LAG reserved reserved -LANGUAGE non-reserved reserved reserved reserved -LARGE non-reserved reserved reserved -LAST non-reserved non-reserved non-reserved reserved -LAST_VALUE reserved reserved -LATERAL reserved reserved reserved -LC_COLLATE non-reserved -LC_CTYPE non-reserved -LEAD reserved reserved -LEADING reserved reserved reserved reserved -LEAKPROOF non-reserved -LEAST non-reserved (cannot be function or type) -LEFT reserved (can be function or type) reserved reserved reserved -LENGTH non-reserved non-reserved non-reserved -LEVEL non-reserved non-reserved non-reserved reserved -LIBRARY non-reserved non-reserved -LIKE reserved (can be function or type) reserved reserved reserved -LIKE_REGEX reserved reserved -LIMIT reserved non-reserved non-reserved -LINK non-reserved non-reserved -LISTEN non-reserved -LN reserved reserved -LOAD non-reserved -LOCAL non-reserved reserved reserved reserved -LOCALTIME reserved reserved reserved -LOCALTIMESTAMP reserved reserved reserved -LOCATION non-reserved non-reserved non-reserved -LOCATOR non-reserved non-reserved -LOCK non-reserved -LOWER reserved reserved reserved -M non-reserved non-reserved -MAP non-reserved non-reserved -MAPPING non-reserved non-reserved non-reserved -MATCH non-reserved reserved reserved reserved -MATCHED non-reserved non-reserved -MATERIALIZED non-reserved -MAX reserved reserved reserved -MAXVALUE non-reserved non-reserved non-reserved -MAX_CARDINALITY reserved -MEMBER reserved reserved -MERGE reserved reserved -MESSAGE_LENGTH non-reserved non-reserved non-reserved -MESSAGE_OCTET_LENGTH non-reserved non-reserved non-reserved -MESSAGE_TEXT non-reserved non-reserved non-reserved -METHOD reserved reserved -MIN reserved reserved reserved -MINUTE non-reserved reserved reserved reserved -MINVALUE non-reserved non-reserved non-reserved -MOD reserved reserved -MODE non-reserved -MODIFIES reserved reserved -MODULE reserved reserved reserved -MONTH non-reserved reserved reserved reserved -MORE non-reserved non-reserved non-reserved -MOVE non-reserved -MULTISET reserved reserved -MUMPS non-reserved non-reserved non-reserved -NAME non-reserved non-reserved non-reserved non-reserved -NAMES non-reserved non-reserved non-reserved reserved -NAMESPACE non-reserved non-reserved -NATIONAL non-reserved (cannot be function or type) reserved reserved reserved -NATURAL reserved (can be function or type) reserved reserved reserved -NCHAR non-reserved (cannot be function or type) reserved reserved reserved -NCLOB reserved reserved -NESTING non-reserved non-reserved -NEW reserved reserved -NEXT non-reserved non-reserved non-reserved reserved -NFC non-reserved non-reserved -NFD non-reserved non-reserved -NFKC non-reserved non-reserved -NFKD non-reserved non-reserved -NIL non-reserved non-reserved -NO non-reserved reserved reserved reserved -NONE non-reserved (cannot be function or type) reserved reserved -NORMALIZE reserved reserved -NORMALIZED non-reserved non-reserved -NOT reserved reserved reserved reserved -NOTHING non-reserved -NOTIFY non-reserved -NOTNULL reserved (can be function or type) -NOWAIT non-reserved -NTH_VALUE reserved reserved -NTILE reserved reserved -NULL reserved reserved reserved reserved -NULLABLE non-reserved non-reserved non-reserved -NULLIF non-reserved (cannot be function or type) reserved reserved reserved -NULLS non-reserved non-reserved non-reserved -NUMBER non-reserved non-reserved non-reserved -NUMERIC non-reserved (cannot be function or type) reserved reserved reserved -OBJECT non-reserved non-reserved non-reserved -OCCURRENCES_REGEX reserved reserved -OCTETS non-reserved non-reserved -OCTET_LENGTH reserved reserved reserved -OF non-reserved reserved reserved reserved -OFF non-reserved non-reserved non-reserved -OFFSET reserved reserved reserved -OIDS non-reserved -OLD reserved reserved -ON reserved reserved reserved reserved -ONLY reserved reserved reserved reserved -OPEN reserved reserved reserved -OPERATOR non-reserved -OPTION non-reserved non-reserved non-reserved reserved -OPTIONS non-reserved non-reserved non-reserved -OR reserved reserved reserved reserved -ORDER reserved reserved reserved reserved -ORDERING non-reserved non-reserved -ORDINALITY non-reserved non-reserved -OTHERS non-reserved non-reserved -OUT non-reserved (cannot be function or type) reserved reserved -OUTER reserved (can be function or type) reserved reserved reserved -OUTPUT non-reserved non-reserved reserved -OVER reserved (can be function or type) reserved reserved -OVERLAPS reserved (can be function or type) reserved reserved reserved -OVERLAY non-reserved (cannot be function or type) reserved reserved -OVERRIDING non-reserved non-reserved -OWNED non-reserved -OWNER non-reserved -P non-reserved non-reserved -PAD non-reserved non-reserved reserved -PARAMETER reserved reserved -PARAMETER_MODE non-reserved non-reserved -PARAMETER_NAME non-reserved non-reserved -PARAMETER_ORDINAL_POSITION non-reserved non-reserved -PARAMETER_SPECIFIC_CATALOG non-reserved non-reserved -PARAMETER_SPECIFIC_NAME non-reserved non-reserved -PARAMETER_SPECIFIC_SCHEMA non-reserved non-reserved -PARSER non-reserved -PARTIAL non-reserved non-reserved non-reserved reserved -PARTITION non-reserved reserved reserved -PASCAL non-reserved non-reserved non-reserved -PASSING non-reserved non-reserved non-reserved -PASSTHROUGH non-reserved non-reserved -PASSWORD non-reserved -PATH non-reserved non-reserved -PERCENT reserved -PERCENTILE_CONT reserved reserved -PERCENTILE_DISC reserved reserved -PERCENT_RANK reserved reserved -PERIOD reserved -PERMISSION non-reserved non-reserved -PLACING reserved non-reserved non-reserved -PLANS non-reserved -PLI non-reserved non-reserved non-reserved -PORTION reserved -POSITION non-reserved (cannot be function or type) reserved reserved reserved -POSITION_REGEX reserved reserved -POWER reserved reserved -PRECEDES reserved -PRECEDING non-reserved non-reserved non-reserved -PRECISION non-reserved (cannot be function or type) reserved reserved reserved -PREPARE non-reserved reserved reserved reserved -PREPARED non-reserved -PRESERVE non-reserved non-reserved non-reserved reserved -PRIMARY reserved reserved reserved reserved -PRIOR non-reserved non-reserved non-reserved reserved -PRIVILEGES non-reserved non-reserved non-reserved reserved -PROCEDURAL non-reserved -PROCEDURE non-reserved reserved reserved reserved -PROGRAM non-reserved -PUBLIC non-reserved non-reserved reserved -QUOTE non-reserved -RANGE non-reserved reserved reserved -RANK reserved reserved -READ non-reserved non-reserved non-reserved reserved -READS reserved reserved -REAL non-reserved (cannot be function or type) reserved reserved reserved -REASSIGN non-reserved -RECHECK non-reserved -RECOVERY non-reserved non-reserved -RECURSIVE non-reserved reserved reserved -REF non-reserved reserved reserved -REFERENCES reserved reserved reserved reserved -REFERENCING reserved reserved -REFRESH non-reserved -REGR_AVGX reserved reserved -REGR_AVGY reserved reserved -REGR_COUNT reserved reserved -REGR_INTERCEPT reserved reserved -REGR_R2 reserved reserved -REGR_SLOPE reserved reserved -REGR_SXX reserved reserved -REGR_SXY reserved reserved -REGR_SYY reserved reserved -REINDEX non-reserved -RELATIVE non-reserved non-reserved non-reserved reserved -RELEASE non-reserved reserved reserved -RENAME non-reserved -REPEATABLE non-reserved non-reserved non-reserved non-reserved -REPLACE non-reserved -REPLICA non-reserved -REQUIRING non-reserved non-reserved -RESET non-reserved -RESPECT non-reserved non-reserved -RESTART non-reserved non-reserved non-reserved -RESTORE non-reserved non-reserved -RESTRICT non-reserved non-reserved non-reserved reserved -RESULT reserved reserved -RETURN reserved reserved -RETURNED_CARDINALITY non-reserved non-reserved -RETURNED_LENGTH non-reserved non-reserved non-reserved -RETURNED_OCTET_LENGTH non-reserved non-reserved non-reserved -RETURNED_SQLSTATE non-reserved non-reserved non-reserved -RETURNING reserved non-reserved non-reserved -RETURNS non-reserved reserved reserved -REVOKE non-reserved reserved reserved reserved -RIGHT reserved (can be function or type) reserved reserved reserved -ROLE non-reserved non-reserved non-reserved -ROLLBACK non-reserved reserved reserved reserved -ROLLUP reserved reserved -ROUTINE non-reserved non-reserved -ROUTINE_CATALOG non-reserved non-reserved -ROUTINE_NAME non-reserved non-reserved -ROUTINE_SCHEMA non-reserved non-reserved -ROW non-reserved (cannot be function or type) reserved reserved -ROWS non-reserved reserved reserved reserved -ROW_COUNT non-reserved non-reserved non-reserved -ROW_NUMBER reserved reserved -RULE non-reserved -SAVEPOINT non-reserved reserved reserved -SCALE non-reserved non-reserved non-reserved -SCHEMA non-reserved non-reserved non-reserved reserved -SCHEMA_NAME non-reserved non-reserved non-reserved -SCOPE reserved reserved -SCOPE_CATALOG non-reserved non-reserved -SCOPE_NAME non-reserved non-reserved -SCOPE_SCHEMA non-reserved non-reserved -SCROLL non-reserved reserved reserved reserved -SEARCH non-reserved reserved reserved -SECOND non-reserved reserved reserved reserved -SECTION non-reserved non-reserved reserved -SECURITY non-reserved non-reserved non-reserved -SELECT reserved reserved reserved reserved -SELECTIVE non-reserved non-reserved -SELF non-reserved non-reserved -SENSITIVE reserved reserved -SEQUENCE non-reserved non-reserved non-reserved -SEQUENCES non-reserved -SERIALIZABLE non-reserved non-reserved non-reserved non-reserved -SERVER non-reserved non-reserved non-reserved -SERVER_NAME non-reserved non-reserved non-reserved -SESSION non-reserved non-reserved non-reserved reserved -SESSION_USER reserved reserved reserved reserved -SET non-reserved reserved reserved reserved -SETOF non-reserved (cannot be function or type) -SETS non-reserved non-reserved -SHARE non-reserved -SHOW non-reserved -SIMILAR reserved (can be function or type) reserved reserved -SIMPLE non-reserved non-reserved non-reserved -SIZE non-reserved non-reserved reserved -SMALLINT non-reserved (cannot be function or type) reserved reserved reserved -SNAPSHOT non-reserved -SOME reserved reserved reserved reserved -SOURCE non-reserved non-reserved -SPACE non-reserved non-reserved reserved -SPECIFIC reserved reserved -SPECIFICTYPE reserved reserved -SPECIFIC_NAME non-reserved non-reserved -SQL reserved reserved reserved -SQLCODE reserved -SQLERROR reserved -SQLEXCEPTION reserved reserved -SQLSTATE reserved reserved reserved -SQLWARNING reserved reserved -SQRT reserved reserved -STABLE non-reserved -STANDALONE non-reserved non-reserved non-reserved -START non-reserved reserved reserved -STATE non-reserved non-reserved -STATEMENT non-reserved non-reserved non-reserved -STATIC reserved reserved -STATISTICS non-reserved -STDDEV_POP reserved reserved -STDDEV_SAMP reserved reserved -STDIN non-reserved -STDOUT non-reserved -STORAGE non-reserved -STRICT non-reserved -STRIP non-reserved non-reserved non-reserved -STRUCTURE non-reserved non-reserved -STYLE non-reserved non-reserved -SUBCLASS_ORIGIN non-reserved non-reserved non-reserved -SUBMULTISET reserved reserved -SUBSTRING non-reserved (cannot be function or type) reserved reserved reserved -SUBSTRING_REGEX reserved reserved -SUCCEEDS reserved -SUM reserved reserved reserved -SYMMETRIC reserved reserved reserved -SYSID non-reserved -SYSTEM non-reserved reserved reserved -SYSTEM_TIME reserved -SYSTEM_USER reserved reserved reserved -T non-reserved non-reserved -TABLE reserved reserved reserved reserved -TABLES non-reserved -TABLESAMPLE reserved reserved -TABLESPACE non-reserved -TABLE_NAME non-reserved non-reserved non-reserved -TEMP non-reserved -TEMPLATE non-reserved -TEMPORARY non-reserved non-reserved non-reserved reserved -TEXT non-reserved -THEN reserved reserved reserved reserved -TIES non-reserved non-reserved -TIME non-reserved (cannot be function or type) reserved reserved reserved -TIMESTAMP non-reserved (cannot be function or type) reserved reserved reserved -TIMEZONE_HOUR reserved reserved reserved -TIMEZONE_MINUTE reserved reserved reserved -TO reserved reserved reserved reserved -TOKEN non-reserved non-reserved -TOP_LEVEL_COUNT non-reserved non-reserved -TRAILING reserved reserved reserved reserved -TRANSACTION non-reserved non-reserved non-reserved reserved -TRANSACTIONS_COMMITTED non-reserved non-reserved -TRANSACTIONS_ROLLED_BACK non-reserved non-reserved -TRANSACTION_ACTIVE non-reserved non-reserved -TRANSFORM non-reserved non-reserved -TRANSFORMS non-reserved non-reserved -TRANSLATE reserved reserved reserved -TRANSLATE_REGEX reserved reserved -TRANSLATION reserved reserved reserved -TREAT non-reserved (cannot be function or type) reserved reserved -TRIGGER non-reserved reserved reserved -TRIGGER_CATALOG non-reserved non-reserved -TRIGGER_NAME non-reserved non-reserved -TRIGGER_SCHEMA non-reserved non-reserved -TRIM non-reserved (cannot be function or type) reserved reserved reserved -TRIM_ARRAY reserved reserved -TRUE reserved reserved reserved reserved -TRUNCATE non-reserved reserved reserved -TRUSTED non-reserved -TYPE non-reserved non-reserved non-reserved non-reserved -TYPES non-reserved -UESCAPE reserved reserved -UNBOUNDED non-reserved non-reserved non-reserved -UNCOMMITTED non-reserved non-reserved non-reserved non-reserved -UNDER non-reserved non-reserved -UNENCRYPTED non-reserved -UNION reserved reserved reserved reserved -UNIQUE reserved reserved reserved reserved -UNKNOWN non-reserved reserved reserved reserved -UNLINK non-reserved non-reserved -UNLISTEN non-reserved -UNLOGGED non-reserved -UNNAMED non-reserved non-reserved non-reserved -UNNEST reserved reserved -UNTIL non-reserved -UNTYPED non-reserved non-reserved -UPDATE non-reserved reserved reserved reserved -UPPER reserved reserved reserved -URI non-reserved non-reserved -USAGE non-reserved non-reserved reserved -USER reserved reserved reserved reserved -USER_DEFINED_TYPE_CATALOG non-reserved non-reserved -USER_DEFINED_TYPE_CODE non-reserved non-reserved -USER_DEFINED_TYPE_NAME non-reserved non-reserved -USER_DEFINED_TYPE_SCHEMA non-reserved non-reserved -USING reserved reserved reserved reserved -VACUUM non-reserved -VALID non-reserved non-reserved non-reserved -VALIDATE non-reserved -VALIDATOR non-reserved -VALUE non-reserved reserved reserved reserved -VALUES non-reserved (cannot be function or type) reserved reserved reserved -VALUE_OF reserved -VARBINARY reserved reserved -VARCHAR non-reserved (cannot be function or type) reserved reserved reserved -VARIADIC reserved -VARYING non-reserved reserved reserved reserved -VAR_POP reserved reserved -VAR_SAMP reserved reserved -VERBOSE reserved (can be function or type) -VERSION non-reserved non-reserved non-reserved -VERSIONING reserved -VIEW non-reserved non-reserved non-reserved reserved -VOLATILE non-reserved -WHEN reserved reserved reserved reserved -WHENEVER reserved reserved reserved -WHERE reserved reserved reserved reserved -WHITESPACE non-reserved non-reserved non-reserved -WIDTH_BUCKET reserved reserved -WINDOW reserved reserved reserved -WITH reserved reserved reserved reserved -WITHIN reserved reserved -WITHOUT non-reserved reserved reserved -WORK non-reserved non-reserved non-reserved reserved -WRAPPER non-reserved non-reserved non-reserved -WRITE non-reserved non-reserved non-reserved reserved -XML non-reserved reserved reserved -XMLAGG reserved reserved -XMLATTRIBUTES non-reserved (cannot be function or type) reserved reserved -XMLBINARY reserved reserved -XMLCAST reserved reserved -XMLCOMMENT reserved reserved -XMLCONCAT non-reserved (cannot be function or type) reserved reserved -XMLDECLARATION non-reserved non-reserved -XMLDOCUMENT reserved reserved -XMLELEMENT non-reserved (cannot be function or type) reserved reserved -XMLEXISTS non-reserved (cannot be function or type) reserved reserved -XMLFOREST non-reserved (cannot be function or type) reserved reserved -XMLITERATE reserved reserved -XMLNAMESPACES reserved reserved -XMLPARSE non-reserved (cannot be function or type) reserved reserved -XMLPI non-reserved (cannot be function or type) reserved reserved -XMLQUERY reserved reserved -XMLROOT non-reserved (cannot be function or type) -XMLSCHEMA non-reserved non-reserved -XMLSERIALIZE non-reserved (cannot be function or type) reserved reserved -XMLTABLE reserved reserved -XMLTEXT reserved reserved -XMLVALIDATE reserved reserved -YEAR non-reserved reserved reserved reserved -YES non-reserved non-reserved non-reserved -ZONE non-reserved non-reserved non-reserved reserved \ No newline at end of file diff --git a/vendor/xorm.io/xorm/dialects/postgres.go b/vendor/xorm.io/xorm/dialects/postgres.go deleted file mode 100644 index f9de5859..00000000 --- a/vendor/xorm.io/xorm/dialects/postgres.go +++ /dev/null @@ -1,1556 +0,0 @@ -// Copyright 2015 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package dialects - -import ( - "context" - "database/sql" - "errors" - "fmt" - "net/url" - "strconv" - "strings" - - "xorm.io/xorm/core" - "xorm.io/xorm/schemas" -) - -// from http://www.postgresql.org/docs/current/static/sql-keywords-appendix.html -var ( - postgresReservedWords = map[string]bool{ - "A": true, - "ABORT": true, - "ABS": true, - "ABSENT": true, - "ABSOLUTE": true, - "ACCESS": true, - "ACCORDING": true, - "ACTION": true, - "ADA": true, - "ADD": true, - "ADMIN": true, - "AFTER": true, - "AGGREGATE": true, - "ALL": true, - "ALLOCATE": true, - "ALSO": true, - "ALTER": true, - "ALWAYS": true, - "ANALYSE": true, - "ANALYZE": true, - "AND": true, - "ANY": true, - "ARE": true, - "ARRAY": true, - "ARRAY_AGG": true, - "ARRAY_MAX_CARDINALITY": true, - "AS": true, - "ASC": true, - "ASENSITIVE": true, - "ASSERTION": true, - "ASSIGNMENT": true, - "ASYMMETRIC": true, - "AT": true, - "ATOMIC": true, - "ATTRIBUTE": true, - "ATTRIBUTES": true, - "AUTHORIZATION": true, - "AVG": true, - "BACKWARD": true, - "BASE64": true, - "BEFORE": true, - "BEGIN": true, - "BEGIN_FRAME": true, - "BEGIN_PARTITION": true, - "BERNOULLI": true, - "BETWEEN": true, - "BIGINT": true, - "BINARY": true, - "BIT": true, - "BIT_LENGTH": true, - "BLOB": true, - "BLOCKED": true, - "BOM": true, - "BOOLEAN": true, - "BOTH": true, - "BREADTH": true, - "BY": true, - "C": true, - "CACHE": true, - "CALL": true, - "CALLED": true, - "CARDINALITY": true, - "CASCADE": true, - "CASCADED": true, - "CASE": true, - "CAST": true, - "CATALOG": true, - "CATALOG_NAME": true, - "CEIL": true, - "CEILING": true, - "CHAIN": true, - "CHAR": true, - "CHARACTER": true, - "CHARACTERISTICS": true, - "CHARACTERS": true, - "CHARACTER_LENGTH": true, - "CHARACTER_SET_CATALOG": true, - "CHARACTER_SET_NAME": true, - "CHARACTER_SET_SCHEMA": true, - "CHAR_LENGTH": true, - "CHECK": true, - "CHECKPOINT": true, - "CLASS": true, - "CLASS_ORIGIN": true, - "CLOB": true, - "CLOSE": true, - "CLUSTER": true, - "COALESCE": true, - "COBOL": true, - "COLLATE": true, - "COLLATION": true, - "COLLATION_CATALOG": true, - "COLLATION_NAME": true, - "COLLATION_SCHEMA": true, - "COLLECT": true, - "COLUMN": true, - "COLUMNS": true, - "COLUMN_NAME": true, - "COMMAND_FUNCTION": true, - "COMMAND_FUNCTION_CODE": true, - "COMMENT": true, - "COMMENTS": true, - "COMMIT": true, - "COMMITTED": true, - "CONCURRENTLY": true, - "CONDITION": true, - "CONDITION_NUMBER": true, - "CONFIGURATION": true, - "CONNECT": true, - "CONNECTION": true, - "CONNECTION_NAME": true, - "CONSTRAINT": true, - "CONSTRAINTS": true, - "CONSTRAINT_CATALOG": true, - "CONSTRAINT_NAME": true, - "CONSTRAINT_SCHEMA": true, - "CONSTRUCTOR": true, - "CONTAINS": true, - "CONTENT": true, - "CONTINUE": true, - "CONTROL": true, - "CONVERSION": true, - "CONVERT": true, - "COPY": true, - "CORR": true, - "CORRESPONDING": true, - "COST": true, - "COUNT": true, - "COVAR_POP": true, - "COVAR_SAMP": true, - "CREATE": true, - "CROSS": true, - "CSV": true, - "CUBE": true, - "CUME_DIST": true, - "CURRENT": true, - "CURRENT_CATALOG": true, - "CURRENT_DATE": true, - "CURRENT_DEFAULT_TRANSFORM_GROUP": true, - "CURRENT_PATH": true, - "CURRENT_ROLE": true, - "CURRENT_ROW": true, - "CURRENT_SCHEMA": true, - "CURRENT_TIME": true, - "CURRENT_TIMESTAMP": true, - "CURRENT_TRANSFORM_GROUP_FOR_TYPE": true, - "CURRENT_USER": true, - "CURSOR": true, - "CURSOR_NAME": true, - "CYCLE": true, - "DATA": true, - "DATABASE": true, - "DATALINK": true, - "DATE": true, - "DATETIME_INTERVAL_CODE": true, - "DATETIME_INTERVAL_PRECISION": true, - "DAY": true, - "DB": true, - "DEALLOCATE": true, - "DEC": true, - "DECIMAL": true, - "DECLARE": true, - "DEFAULT": true, - "DEFAULTS": true, - "DEFERRABLE": true, - "DEFERRED": true, - "DEFINED": true, - "DEFINER": true, - "DEGREE": true, - "DELETE": true, - "DELIMITER": true, - "DELIMITERS": true, - "DENSE_RANK": true, - "DEPTH": true, - "DEREF": true, - "DERIVED": true, - "DESC": true, - "DESCRIBE": true, - "DESCRIPTOR": true, - "DETERMINISTIC": true, - "DIAGNOSTICS": true, - "DICTIONARY": true, - "DISABLE": true, - "DISCARD": true, - "DISCONNECT": true, - "DISPATCH": true, - "DISTINCT": true, - "DLNEWCOPY": true, - "DLPREVIOUSCOPY": true, - "DLURLCOMPLETE": true, - "DLURLCOMPLETEONLY": true, - "DLURLCOMPLETEWRITE": true, - "DLURLPATH": true, - "DLURLPATHONLY": true, - "DLURLPATHWRITE": true, - "DLURLSCHEME": true, - "DLURLSERVER": true, - "DLVALUE": true, - "DO": true, - "DOCUMENT": true, - "DOMAIN": true, - "DOUBLE": true, - "DROP": true, - "DYNAMIC": true, - "DYNAMIC_FUNCTION": true, - "DYNAMIC_FUNCTION_CODE": true, - "EACH": true, - "ELEMENT": true, - "ELSE": true, - "EMPTY": true, - "ENABLE": true, - "ENCODING": true, - "ENCRYPTED": true, - "END": true, - "END-EXEC": true, - "END_FRAME": true, - "END_PARTITION": true, - "ENFORCED": true, - "ENUM": true, - "EQUALS": true, - "ESCAPE": true, - "EVENT": true, - "EVERY": true, - "EXCEPT": true, - "EXCEPTION": true, - "EXCLUDE": true, - "EXCLUDING": true, - "EXCLUSIVE": true, - "EXEC": true, - "EXECUTE": true, - "EXISTS": true, - "EXP": true, - "EXPLAIN": true, - "EXPRESSION": true, - "EXTENSION": true, - "EXTERNAL": true, - "EXTRACT": true, - "FALSE": true, - "FAMILY": true, - "FETCH": true, - "FILE": true, - "FILTER": true, - "FINAL": true, - "FIRST": true, - "FIRST_VALUE": true, - "FLAG": true, - "FLOAT": true, - "FLOOR": true, - "FOLLOWING": true, - "FOR": true, - "FORCE": true, - "FOREIGN": true, - "FORTRAN": true, - "FORWARD": true, - "FOUND": true, - "FRAME_ROW": true, - "FREE": true, - "FREEZE": true, - "FROM": true, - "FS": true, - "FULL": true, - "FUNCTION": true, - "FUNCTIONS": true, - "FUSION": true, - "G": true, - "GENERAL": true, - "GENERATED": true, - "GET": true, - "GLOBAL": true, - "GO": true, - "GOTO": true, - "GRANT": true, - "GRANTED": true, - "GREATEST": true, - "GROUP": true, - "GROUPING": true, - "GROUPS": true, - "HANDLER": true, - "HAVING": true, - "HEADER": true, - "HEX": true, - "HIERARCHY": true, - "HOLD": true, - "HOUR": true, - "ID": true, - "IDENTITY": true, - "IF": true, - "IGNORE": true, - "ILIKE": true, - "IMMEDIATE": true, - "IMMEDIATELY": true, - "IMMUTABLE": true, - "IMPLEMENTATION": true, - "IMPLICIT": true, - "IMPORT": true, - "IN": true, - "INCLUDING": true, - "INCREMENT": true, - "INDENT": true, - "INDEX": true, - "INDEXES": true, - "INDICATOR": true, - "INHERIT": true, - "INHERITS": true, - "INITIALLY": true, - "INLINE": true, - "INNER": true, - "INOUT": true, - "INPUT": true, - "INSENSITIVE": true, - "INSERT": true, - "INSTANCE": true, - "INSTANTIABLE": true, - "INSTEAD": true, - "INT": true, - "INTEGER": true, - "INTEGRITY": true, - "INTERSECT": true, - "INTERSECTION": true, - "INTERVAL": true, - "INTO": true, - "INVOKER": true, - "IS": true, - "ISNULL": true, - "ISOLATION": true, - "JOIN": true, - "K": true, - "KEY": true, - "KEY_MEMBER": true, - "KEY_TYPE": true, - "LABEL": true, - "LAG": true, - "LANGUAGE": true, - "LARGE": true, - "LAST": true, - "LAST_VALUE": true, - "LATERAL": true, - "LC_COLLATE": true, - "LC_CTYPE": true, - "LEAD": true, - "LEADING": true, - "LEAKPROOF": true, - "LEAST": true, - "LEFT": true, - "LENGTH": true, - "LEVEL": true, - "LIBRARY": true, - "LIKE": true, - "LIKE_REGEX": true, - "LIMIT": true, - "LINK": true, - "LISTEN": true, - "LN": true, - "LOAD": true, - "LOCAL": true, - "LOCALTIME": true, - "LOCALTIMESTAMP": true, - "LOCATION": true, - "LOCATOR": true, - "LOCK": true, - "LOWER": true, - "M": true, - "MAP": true, - "MAPPING": true, - "MATCH": true, - "MATCHED": true, - "MATERIALIZED": true, - "MAX": true, - "MAXVALUE": true, - "MAX_CARDINALITY": true, - "MEMBER": true, - "MERGE": true, - "MESSAGE_LENGTH": true, - "MESSAGE_OCTET_LENGTH": true, - "MESSAGE_TEXT": true, - "METHOD": true, - "MIN": true, - "MINUTE": true, - "MINVALUE": true, - "MOD": true, - "MODE": true, - "MODIFIES": true, - "MODULE": true, - "MONTH": true, - "MORE": true, - "MOVE": true, - "MULTISET": true, - "MUMPS": true, - "NAME": true, - "NAMES": true, - "NAMESPACE": true, - "NATIONAL": true, - "NATURAL": true, - "NCHAR": true, - "NCLOB": true, - "NESTING": true, - "NEW": true, - "NEXT": true, - "NFC": true, - "NFD": true, - "NFKC": true, - "NFKD": true, - "NIL": true, - "NO": true, - "NONE": true, - "NORMALIZE": true, - "NORMALIZED": true, - "NOT": true, - "NOTHING": true, - "NOTIFY": true, - "NOTNULL": true, - "NOWAIT": true, - "NTH_VALUE": true, - "NTILE": true, - "NULL": true, - "NULLABLE": true, - "NULLIF": true, - "NULLS": true, - "NUMBER": true, - "NUMERIC": true, - "OBJECT": true, - "OCCURRENCES_REGEX": true, - "OCTETS": true, - "OCTET_LENGTH": true, - "OF": true, - "OFF": true, - "OFFSET": true, - "OIDS": true, - "OLD": true, - "ON": true, - "ONLY": true, - "OPEN": true, - "OPERATOR": true, - "OPTION": true, - "OPTIONS": true, - "OR": true, - "ORDER": true, - "ORDERING": true, - "ORDINALITY": true, - "OTHERS": true, - "OUT": true, - "OUTER": true, - "OUTPUT": true, - "OVER": true, - "OVERLAPS": true, - "OVERLAY": true, - "OVERRIDING": true, - "OWNED": true, - "OWNER": true, - "P": true, - "PAD": true, - "PARAMETER": true, - "PARAMETER_MODE": true, - "PARAMETER_NAME": true, - "PARAMETER_ORDINAL_POSITION": true, - "PARAMETER_SPECIFIC_CATALOG": true, - "PARAMETER_SPECIFIC_NAME": true, - "PARAMETER_SPECIFIC_SCHEMA": true, - "PARSER": true, - "PARTIAL": true, - "PARTITION": true, - "PASCAL": true, - "PASSING": true, - "PASSTHROUGH": true, - "PASSWORD": true, - "PATH": true, - "PERCENT": true, - "PERCENTILE_CONT": true, - "PERCENTILE_DISC": true, - "PERCENT_RANK": true, - "PERIOD": true, - "PERMISSION": true, - "PLACING": true, - "PLANS": true, - "PLI": true, - "PORTION": true, - "POSITION": true, - "POSITION_REGEX": true, - "POWER": true, - "PRECEDES": true, - "PRECEDING": true, - "PRECISION": true, - "PREPARE": true, - "PREPARED": true, - "PRESERVE": true, - "PRIMARY": true, - "PRIOR": true, - "PRIVILEGES": true, - "PROCEDURAL": true, - "PROCEDURE": true, - "PROGRAM": true, - "PUBLIC": true, - "QUOTE": true, - "RANGE": true, - "RANK": true, - "READ": true, - "READS": true, - "REAL": true, - "REASSIGN": true, - "RECHECK": true, - "RECOVERY": true, - "RECURSIVE": true, - "REF": true, - "REFERENCES": true, - "REFERENCING": true, - "REFRESH": true, - "REGR_AVGX": true, - "REGR_AVGY": true, - "REGR_COUNT": true, - "REGR_INTERCEPT": true, - "REGR_R2": true, - "REGR_SLOPE": true, - "REGR_SXX": true, - "REGR_SXY": true, - "REGR_SYY": true, - "REINDEX": true, - "RELATIVE": true, - "RELEASE": true, - "RENAME": true, - "REPEATABLE": true, - "REPLACE": true, - "REPLICA": true, - "REQUIRING": true, - "RESET": true, - "RESPECT": true, - "RESTART": true, - "RESTORE": true, - "RESTRICT": true, - "RESULT": true, - "RETURN": true, - "RETURNED_CARDINALITY": true, - "RETURNED_LENGTH": true, - "RETURNED_OCTET_LENGTH": true, - "RETURNED_SQLSTATE": true, - "RETURNING": true, - "RETURNS": true, - "REVOKE": true, - "RIGHT": true, - "ROLE": true, - "ROLLBACK": true, - "ROLLUP": true, - "ROUTINE": true, - "ROUTINE_CATALOG": true, - "ROUTINE_NAME": true, - "ROUTINE_SCHEMA": true, - "ROW": true, - "ROWS": true, - "ROW_COUNT": true, - "ROW_NUMBER": true, - "RULE": true, - "SAVEPOINT": true, - "SCALE": true, - "SCHEMA": true, - "SCHEMA_NAME": true, - "SCOPE": true, - "SCOPE_CATALOG": true, - "SCOPE_NAME": true, - "SCOPE_SCHEMA": true, - "SCROLL": true, - "SEARCH": true, - "SECOND": true, - "SECTION": true, - "SECURITY": true, - "SELECT": true, - "SELECTIVE": true, - "SELF": true, - "SENSITIVE": true, - "SEQUENCE": true, - "SEQUENCES": true, - "SERIALIZABLE": true, - "SERVER": true, - "SERVER_NAME": true, - "SESSION": true, - "SESSION_USER": true, - "SET": true, - "SETOF": true, - "SETS": true, - "SHARE": true, - "SHOW": true, - "SIMILAR": true, - "SIMPLE": true, - "SIZE": true, - "SMALLINT": true, - "SNAPSHOT": true, - "SOME": true, - "SOURCE": true, - "SPACE": true, - "SPECIFIC": true, - "SPECIFICTYPE": true, - "SPECIFIC_NAME": true, - "SQL": true, - "SQLCODE": true, - "SQLERROR": true, - "SQLEXCEPTION": true, - "SQLSTATE": true, - "SQLWARNING": true, - "SQRT": true, - "STABLE": true, - "STANDALONE": true, - "START": true, - "STATE": true, - "STATEMENT": true, - "STATIC": true, - "STATISTICS": true, - "STDDEV_POP": true, - "STDDEV_SAMP": true, - "STDIN": true, - "STDOUT": true, - "STORAGE": true, - "STRICT": true, - "STRIP": true, - "STRUCTURE": true, - "STYLE": true, - "SUBCLASS_ORIGIN": true, - "SUBMULTISET": true, - "SUBSTRING": true, - "SUBSTRING_REGEX": true, - "SUCCEEDS": true, - "SUM": true, - "SYMMETRIC": true, - "SYSID": true, - "SYSTEM": true, - "SYSTEM_TIME": true, - "SYSTEM_USER": true, - "T": true, - "TABLE": true, - "TABLES": true, - "TABLESAMPLE": true, - "TABLESPACE": true, - "TABLE_NAME": true, - "TEMP": true, - "TEMPLATE": true, - "TEMPORARY": true, - "TEXT": true, - "THEN": true, - "TIES": true, - "TIME": true, - "TIMESTAMP": true, - "TIMEZONE_HOUR": true, - "TIMEZONE_MINUTE": true, - "TO": true, - "TOKEN": true, - "TOP_LEVEL_COUNT": true, - "TRAILING": true, - "TRANSACTION": true, - "TRANSACTIONS_COMMITTED": true, - "TRANSACTIONS_ROLLED_BACK": true, - "TRANSACTION_ACTIVE": true, - "TRANSFORM": true, - "TRANSFORMS": true, - "TRANSLATE": true, - "TRANSLATE_REGEX": true, - "TRANSLATION": true, - "TREAT": true, - "TRIGGER": true, - "TRIGGER_CATALOG": true, - "TRIGGER_NAME": true, - "TRIGGER_SCHEMA": true, - "TRIM": true, - "TRIM_ARRAY": true, - "TRUE": true, - "TRUNCATE": true, - "TRUSTED": true, - "TYPE": true, - "TYPES": true, - "UESCAPE": true, - "UNBOUNDED": true, - "UNCOMMITTED": true, - "UNDER": true, - "UNENCRYPTED": true, - "UNION": true, - "UNIQUE": true, - "UNKNOWN": true, - "UNLINK": true, - "UNLISTEN": true, - "UNLOGGED": true, - "UNNAMED": true, - "UNNEST": true, - "UNTIL": true, - "UNTYPED": true, - "UPDATE": true, - "UPPER": true, - "URI": true, - "USAGE": true, - "USER": true, - "USER_DEFINED_TYPE_CATALOG": true, - "USER_DEFINED_TYPE_CODE": true, - "USER_DEFINED_TYPE_NAME": true, - "USER_DEFINED_TYPE_SCHEMA": true, - "USING": true, - "VACUUM": true, - "VALID": true, - "VALIDATE": true, - "VALIDATOR": true, - "VALUE": true, - "VALUES": true, - "VALUE_OF": true, - "VARBINARY": true, - "VARCHAR": true, - "VARIADIC": true, - "VARYING": true, - "VAR_POP": true, - "VAR_SAMP": true, - "VERBOSE": true, - "VERSION": true, - "VERSIONING": true, - "VIEW": true, - "VOLATILE": true, - "WHEN": true, - "WHENEVER": true, - "WHERE": true, - "WHITESPACE": true, - "WIDTH_BUCKET": true, - "WINDOW": true, - "WITH": true, - "WITHIN": true, - "WITHOUT": true, - "WORK": true, - "WRAPPER": true, - "WRITE": true, - "XML": true, - "XMLAGG": true, - "XMLATTRIBUTES": true, - "XMLBINARY": true, - "XMLCAST": true, - "XMLCOMMENT": true, - "XMLCONCAT": true, - "XMLDECLARATION": true, - "XMLDOCUMENT": true, - "XMLELEMENT": true, - "XMLEXISTS": true, - "XMLFOREST": true, - "XMLITERATE": true, - "XMLNAMESPACES": true, - "XMLPARSE": true, - "XMLPI": true, - "XMLQUERY": true, - "XMLROOT": true, - "XMLSCHEMA": true, - "XMLSERIALIZE": true, - "XMLTABLE": true, - "XMLTEXT": true, - "XMLVALIDATE": true, - "YEAR": true, - "YES": true, - "ZONE": true, - } - - postgresQuoter = schemas.Quoter{ - Prefix: '"', - Suffix: '"', - IsReserved: schemas.AlwaysReserve, - } -) - -var ( - // DefaultPostgresSchema default postgres schema - DefaultPostgresSchema = "public" - postgresColAliases = map[string]string{ - "numeric": "decimal", - } -) - -type postgres struct { - Base -} - -// Alias returns a alias of column -func (db *postgres) Alias(col string) string { - v, ok := postgresColAliases[strings.ToLower(col)] - if ok { - return v - } - return col -} - -func (db *postgres) Init(uri *URI) error { - db.quoter = postgresQuoter - return db.Base.Init(db, uri) -} - -func (db *postgres) Version(ctx context.Context, queryer core.Queryer) (*schemas.Version, error) { - rows, err := queryer.QueryContext(ctx, "SELECT version()") - if err != nil { - return nil, err - } - defer rows.Close() - - var version string - if !rows.Next() { - if rows.Err() != nil { - return nil, rows.Err() - } - return nil, errors.New("unknow version") - } - - if err := rows.Scan(&version); err != nil { - return nil, err - } - - // Postgres: 9.5.22 on x86_64-pc-linux-gnu (Debian 9.5.22-1.pgdg90+1), compiled by gcc (Debian 6.3.0-18+deb9u1) 6.3.0 20170516, 64-bit - // CockroachDB CCL v19.2.4 (x86_64-unknown-linux-gnu, built - if strings.HasPrefix(version, "CockroachDB") { - versions := strings.Split(strings.TrimPrefix(version, "CockroachDB CCL "), " ") - return &schemas.Version{ - Number: strings.TrimPrefix(versions[0], "v"), - Edition: "CockroachDB", - }, nil - } else if strings.HasPrefix(version, "PostgreSQL") { - versions := strings.Split(strings.TrimPrefix(version, "PostgreSQL "), " on ") - return &schemas.Version{ - Number: versions[0], - Level: versions[1], - Edition: "PostgreSQL", - }, nil - } - - return nil, errors.New("unknow database version") -} - -func (db *postgres) getSchema() string { - if db.uri.Schema != "" { - return db.uri.Schema - } - return DefaultPostgresSchema -} - -func (db *postgres) needQuote(name string) bool { - if db.IsReserved(name) { - return true - } - for _, c := range name { - if c >= 'A' && c <= 'Z' { - return true - } - } - return false -} - -func (db *postgres) SetQuotePolicy(quotePolicy QuotePolicy) { - switch quotePolicy { - case QuotePolicyNone: - q := postgresQuoter - q.IsReserved = schemas.AlwaysNoReserve - db.quoter = q - case QuotePolicyReserved: - q := postgresQuoter - q.IsReserved = db.needQuote - db.quoter = q - case QuotePolicyAlways: - fallthrough - default: - db.quoter = postgresQuoter - } -} - -func (db *postgres) SQLType(c *schemas.Column) string { - var res string - switch t := c.SQLType.Name; t { - case schemas.TinyInt, schemas.UnsignedTinyInt: - res = schemas.SmallInt - return res - case schemas.Bit: - res = schemas.Boolean - return res - case schemas.MediumInt, schemas.Int, schemas.Integer, schemas.UnsignedMediumInt, schemas.UnsignedSmallInt: - if c.IsAutoIncrement { - return schemas.Serial - } - return schemas.Integer - case schemas.BigInt, schemas.UnsignedBigInt, schemas.UnsignedInt: - if c.IsAutoIncrement { - return schemas.BigSerial - } - return schemas.BigInt - case schemas.Serial, schemas.BigSerial: - c.IsAutoIncrement = true - c.Nullable = false - res = t - case schemas.Binary, schemas.VarBinary: - return schemas.Bytea - case schemas.DateTime: - res = schemas.TimeStamp - case schemas.TimeStampz: - return "timestamp with time zone" - case schemas.Float: - res = schemas.Real - case schemas.TinyText, schemas.MediumText, schemas.LongText: - res = schemas.Text - case schemas.NChar: - res = schemas.Char - case schemas.NVarchar: - res = schemas.Varchar - case schemas.Uuid: - return schemas.Uuid - case schemas.Blob, schemas.TinyBlob, schemas.MediumBlob, schemas.LongBlob: - return schemas.Bytea - case schemas.Double: - return "DOUBLE PRECISION" - default: - if c.IsAutoIncrement { - return schemas.Serial - } - res = t - } - - if strings.EqualFold(res, "bool") { - // for bool, we don't need length information - return res - } - hasLen1 := (c.Length > 0) - hasLen2 := (c.Length2 > 0) - - if hasLen2 { - res += "(" + strconv.FormatInt(c.Length, 10) + "," + strconv.FormatInt(c.Length2, 10) + ")" - } else if hasLen1 { - res += "(" + strconv.FormatInt(c.Length, 10) + ")" - } - return res -} - -func (db *postgres) Features() *DialectFeatures { - return &DialectFeatures{ - AutoincrMode: IncrAutoincrMode, - } -} - -func (db *postgres) ColumnTypeKind(t string) int { - switch strings.ToUpper(t) { - case "DATETIME", "TIMESTAMP": - return schemas.TIME_TYPE - case "VARCHAR", "TEXT": - return schemas.TEXT_TYPE - case "BIGINT", "BIGSERIAL", "SMALLINT", "INT", "INT8", "INT4", "INTEGER", "SERIAL", "FLOAT", "FLOAT4", "REAL", "DOUBLE PRECISION": - return schemas.NUMERIC_TYPE - case "BOOL": - return schemas.BOOL_TYPE - default: - return schemas.UNKNOW_TYPE - } -} - -func (db *postgres) IsReserved(name string) bool { - _, ok := postgresReservedWords[strings.ToUpper(name)] - return ok -} - -func (db *postgres) AutoIncrStr() string { - return "" -} - -func (db *postgres) IndexCheckSQL(tableName, idxName string) (string, []interface{}) { - if len(db.getSchema()) == 0 { - args := []interface{}{tableName, idxName} - return `SELECT indexname FROM pg_indexes WHERE tablename = ? AND indexname = ?`, args - } - - args := []interface{}{db.getSchema(), tableName, idxName} - return `SELECT indexname FROM pg_indexes ` + - `WHERE schemaname = ? AND tablename = ? AND indexname = ?`, args -} - -func (db *postgres) IsTableExist(queryer core.Queryer, ctx context.Context, tableName string) (bool, error) { - if len(db.getSchema()) == 0 { - return db.HasRecords(queryer, ctx, `SELECT tablename FROM pg_tables WHERE tablename = $1`, tableName) - } - - return db.HasRecords(queryer, ctx, `SELECT tablename FROM pg_tables WHERE schemaname = $1 AND tablename = $2`, - db.getSchema(), tableName) -} - -func (db *postgres) AddColumnSQL(tableName string, col *schemas.Column) string { - s, _ := ColumnString(db.dialect, col, true) - - quoter := db.dialect.Quoter() - addColumnSQL := "" - commentSQL := "; " - if len(db.getSchema()) == 0 || strings.Contains(tableName, ".") { - addColumnSQL = fmt.Sprintf("ALTER TABLE %s ADD %s", quoter.Quote(tableName), s) - commentSQL += fmt.Sprintf("COMMENT ON COLUMN %s.%s IS '%s'", quoter.Quote(tableName), quoter.Quote(col.Name), col.Comment) - return addColumnSQL + commentSQL - } - - addColumnSQL = fmt.Sprintf("ALTER TABLE %s.%s ADD %s", quoter.Quote(db.getSchema()), quoter.Quote(tableName), s) - commentSQL += fmt.Sprintf("COMMENT ON COLUMN %s.%s.%s IS '%s'", quoter.Quote(db.getSchema()), quoter.Quote(tableName), quoter.Quote(col.Name), col.Comment) - return addColumnSQL + commentSQL -} - -func (db *postgres) ModifyColumnSQL(tableName string, col *schemas.Column) string { - quoter := db.dialect.Quoter() - modifyColumnSQL := "" - commentSQL := "; " - - if len(db.getSchema()) == 0 || strings.Contains(tableName, ".") { - modifyColumnSQL = fmt.Sprintf("ALTER TABLE %s ALTER COLUMN %s TYPE %s", quoter.Quote(tableName), quoter.Quote(col.Name), db.SQLType(col)) - commentSQL += fmt.Sprintf("COMMENT ON COLUMN %s.%s IS '%s'", quoter.Quote(tableName), quoter.Quote(col.Name), col.Comment) - return modifyColumnSQL + commentSQL - } - - modifyColumnSQL = fmt.Sprintf("ALTER TABLE %s.%s ALTER COLUMN %s TYPE %s", quoter.Quote(db.getSchema()), quoter.Quote(tableName), quoter.Quote(col.Name), db.SQLType(col)) - commentSQL += fmt.Sprintf("COMMENT ON COLUMN %s.%s.%s IS '%s'", quoter.Quote(db.getSchema()), quoter.Quote(tableName), quoter.Quote(col.Name), col.Comment) - return modifyColumnSQL + commentSQL -} - -func (db *postgres) DropIndexSQL(tableName string, index *schemas.Index) string { - idxName := index.Name - - tableParts := strings.Split(strings.Replace(tableName, `"`, "", -1), ".") - tableName = tableParts[len(tableParts)-1] - - if index.IsRegular { - if index.Type == schemas.UniqueType && !strings.HasPrefix(idxName, "UQE_") { - idxName = fmt.Sprintf("UQE_%v_%v", tableName, index.Name) - } else if index.Type == schemas.IndexType && !strings.HasPrefix(idxName, "IDX_") { - idxName = fmt.Sprintf("IDX_%v_%v", tableName, index.Name) - } - } - if db.getSchema() != "" { - idxName = db.getSchema() + "." + idxName - } - return fmt.Sprintf("DROP INDEX %v", db.Quoter().Quote(idxName)) -} - -func (db *postgres) IsColumnExist(queryer core.Queryer, ctx context.Context, tableName, colName string) (bool, error) { - args := []interface{}{db.getSchema(), tableName, colName} - query := "SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema = $1 AND table_name = $2" + - " AND column_name = $3" - if len(db.getSchema()) == 0 { - args = []interface{}{tableName, colName} - query = "SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = $1" + - " AND column_name = $2" - } - - rows, err := queryer.QueryContext(ctx, query, args...) - if err != nil { - return false, err - } - defer rows.Close() - - if rows.Next() { - return true, nil - } - return false, rows.Err() -} - -func (db *postgres) GetColumns(queryer core.Queryer, ctx context.Context, tableName string) ([]string, map[string]*schemas.Column, error) { - args := []interface{}{tableName} - s := `SELECT column_name, column_default, is_nullable, data_type, character_maximum_length, description, - CASE WHEN p.contype = 'p' THEN true ELSE false END AS primarykey, - CASE WHEN p.contype = 'u' THEN true ELSE false END AS uniquekey -FROM pg_attribute f - JOIN pg_class c ON c.oid = f.attrelid JOIN pg_type t ON t.oid = f.atttypid - LEFT JOIN pg_attrdef d ON d.adrelid = c.oid AND d.adnum = f.attnum - LEFT JOIN pg_description de ON f.attrelid=de.objoid AND f.attnum=de.objsubid - LEFT JOIN pg_namespace n ON n.oid = c.relnamespace - LEFT JOIN pg_constraint p ON p.conrelid = c.oid AND f.attnum = ANY (p.conkey) - LEFT JOIN pg_class AS g ON p.confrelid = g.oid - LEFT JOIN INFORMATION_SCHEMA.COLUMNS s ON s.column_name=f.attname AND c.relname=s.table_name -WHERE n.nspname= s.table_schema AND c.relkind = 'r'::char AND c.relname = $1%s AND f.attnum > 0 ORDER BY f.attnum;` - - schema := db.getSchema() - if schema != "" { - s = fmt.Sprintf(s, " AND s.table_schema = $2") - args = append(args, schema) - } else { - s = fmt.Sprintf(s, "") - } - - rows, err := queryer.QueryContext(ctx, s, args...) - if err != nil { - return nil, nil, err - } - defer rows.Close() - - cols := make(map[string]*schemas.Column) - colSeq := make([]string, 0) - - for rows.Next() { - col := new(schemas.Column) - col.Indexes = make(map[string]int) - - var colName, isNullable, dataType string - var maxLenStr, colDefault, description *string - var isPK, isUnique bool - err = rows.Scan(&colName, &colDefault, &isNullable, &dataType, &maxLenStr, &description, &isPK, &isUnique) - if err != nil { - return nil, nil, err - } - - var maxLen int64 - if maxLenStr != nil { - maxLen, err = strconv.ParseInt(*maxLenStr, 10, 64) - if err != nil { - return nil, nil, err - } - } - - if colDefault != nil && *colDefault == "unique_rowid()" { // ignore the system column added by cockroach - continue - } - - col.Name = strings.Trim(colName, `" `) - - if colDefault != nil { - theDefault := *colDefault - // cockroach has type with the default value with ::: - // and postgres with ::, we should remove them before store them - idx := strings.Index(theDefault, ":::") - if idx == -1 { - idx = strings.Index(theDefault, "::") - } - if idx > -1 { - theDefault = theDefault[:idx] - } - - if strings.HasSuffix(theDefault, "+00:00'") { - theDefault = theDefault[:len(theDefault)-7] + "'" - } - - col.Default = theDefault - col.DefaultIsEmpty = false - if strings.HasPrefix(col.Default, "nextval(") { - col.IsAutoIncrement = true - col.Default = "" - col.DefaultIsEmpty = true - } - } else { - col.DefaultIsEmpty = true - } - - if description != nil { - col.Comment = *description - } - - if isPK { - col.IsPrimaryKey = true - } - - col.Nullable = (isNullable == "YES") - - switch strings.ToLower(dataType) { - case "character varying", "string": - col.SQLType = schemas.SQLType{Name: schemas.Varchar, DefaultLength: 0, DefaultLength2: 0} - case "character": - col.SQLType = schemas.SQLType{Name: schemas.Char, DefaultLength: 0, DefaultLength2: 0} - case "timestamp without time zone": - col.SQLType = schemas.SQLType{Name: schemas.DateTime, DefaultLength: 0, DefaultLength2: 0} - case "timestamp with time zone": - col.SQLType = schemas.SQLType{Name: schemas.TimeStampz, DefaultLength: 0, DefaultLength2: 0} - case "double precision": - col.SQLType = schemas.SQLType{Name: schemas.Double, DefaultLength: 0, DefaultLength2: 0} - case "boolean": - col.SQLType = schemas.SQLType{Name: schemas.Bool, DefaultLength: 0, DefaultLength2: 0} - case "time without time zone": - col.SQLType = schemas.SQLType{Name: schemas.Time, DefaultLength: 0, DefaultLength2: 0} - case "bytes": - col.SQLType = schemas.SQLType{Name: schemas.Binary, DefaultLength: 0, DefaultLength2: 0} - case "oid": - col.SQLType = schemas.SQLType{Name: schemas.BigInt, DefaultLength: 0, DefaultLength2: 0} - case "array": - col.SQLType = schemas.SQLType{Name: schemas.Array, DefaultLength: 0, DefaultLength2: 0} - default: - startIdx := strings.Index(strings.ToLower(dataType), "string(") - if startIdx != -1 && strings.HasSuffix(dataType, ")") { - length := dataType[startIdx+8 : len(dataType)-1] - l, _ := strconv.ParseInt(length, 10, 64) - col.SQLType = schemas.SQLType{Name: "STRING", DefaultLength: l, DefaultLength2: 0} - } else { - col.SQLType = schemas.SQLType{Name: strings.ToUpper(dataType), DefaultLength: 0, DefaultLength2: 0} - } - } - if _, ok := schemas.SqlTypes[col.SQLType.Name]; !ok { - return nil, nil, fmt.Errorf("unknown colType: %s - %s", dataType, col.SQLType.Name) - } - - col.Length = maxLen - - if !col.DefaultIsEmpty { - if col.SQLType.IsText() { - if strings.HasSuffix(col.Default, "::character varying") { - col.Default = strings.TrimSuffix(col.Default, "::character varying") - } else if !strings.HasPrefix(col.Default, "'") { - col.Default = "'" + col.Default + "'" - } - } else if col.SQLType.IsTime() { - col.Default = strings.TrimSuffix(col.Default, "::timestamp without time zone") - } - } - cols[col.Name] = col - colSeq = append(colSeq, col.Name) - } - if rows.Err() != nil { - return nil, nil, rows.Err() - } - - return colSeq, cols, nil -} - -func (db *postgres) GetTables(queryer core.Queryer, ctx context.Context) ([]*schemas.Table, error) { - args := []interface{}{} - s := "SELECT tablename FROM pg_tables" - schema := db.getSchema() - if schema != "" { - args = append(args, schema) - s = s + " WHERE schemaname = $1" - } - - rows, err := queryer.QueryContext(ctx, s, args...) - if err != nil { - return nil, err - } - defer rows.Close() - - tables := make([]*schemas.Table, 0) - for rows.Next() { - table := schemas.NewEmptyTable() - var name string - err = rows.Scan(&name) - if err != nil { - return nil, err - } - table.Name = name - tables = append(tables, table) - } - if rows.Err() != nil { - return nil, rows.Err() - } - return tables, nil -} - -func getIndexColName(indexdef string) []string { - var colNames []string - - cs := strings.Split(indexdef, "(") - for _, v := range strings.Split(strings.Split(cs[1], ")")[0], ",") { - colNames = append(colNames, strings.Split(strings.TrimLeft(v, " "), " ")[0]) - } - - return colNames -} - -func (db *postgres) GetIndexes(queryer core.Queryer, ctx context.Context, tableName string) (map[string]*schemas.Index, error) { - args := []interface{}{tableName} - s := "SELECT indexname, indexdef FROM pg_indexes WHERE tablename=$1" - if len(db.getSchema()) != 0 { - args = append(args, db.getSchema()) - s += " AND schemaname=$2" - } - - rows, err := queryer.QueryContext(ctx, s, args...) - if err != nil { - return nil, err - } - defer rows.Close() - - indexes := make(map[string]*schemas.Index) - for rows.Next() { - var indexType int - var indexName, indexdef string - var colNames []string - err = rows.Scan(&indexName, &indexdef) - if err != nil { - return nil, err - } - - if indexName == "primary" { - continue - } - indexName = strings.Trim(indexName, `" `) - // ignore primary index - if strings.HasSuffix(indexName, "_pkey") || strings.EqualFold(indexName, "primary") { - continue - } - if strings.HasPrefix(indexdef, "CREATE UNIQUE INDEX") { - indexType = schemas.UniqueType - } else { - indexType = schemas.IndexType - } - colNames = getIndexColName(indexdef) - - // Oid It's a special index. You can't put it in. TODO: This is not perfect. - if indexName == tableName+"_oid_index" && len(colNames) == 1 && colNames[0] == "oid" { - continue - } - - var isRegular bool - if strings.HasPrefix(indexName, "IDX_"+tableName) || strings.HasPrefix(indexName, "UQE_"+tableName) { - newIdxName := indexName[5+len(tableName):] - isRegular = true - if newIdxName != "" { - indexName = newIdxName - } - } - - index := &schemas.Index{Name: indexName, Type: indexType, Cols: make([]string, 0)} - for _, colName := range colNames { - col := strings.TrimSpace(strings.Replace(colName, `"`, "", -1)) - fields := strings.Split(col, " ") - index.Cols = append(index.Cols, fields[0]) - } - index.IsRegular = isRegular - indexes[index.Name] = index - } - if rows.Err() != nil { - return nil, rows.Err() - } - return indexes, nil -} - -func (db *postgres) CreateTableSQL(ctx context.Context, queryer core.Queryer, table *schemas.Table, tableName string) (string, bool, error) { - quoter := db.dialect.Quoter() - if len(db.getSchema()) != 0 && !strings.Contains(tableName, ".") { - tableName = fmt.Sprintf("%s.%s", db.getSchema(), tableName) - } - - createTableSQL, ok, err := db.Base.CreateTableSQL(ctx, queryer, table, tableName) - if err != nil { - return "", ok, err - } - - commentSQL := "; " - if table.Comment != "" { - // support schema.table -> "schema"."table" - commentSQL += fmt.Sprintf("COMMENT ON TABLE %s IS '%s'", quoter.Quote(tableName), table.Comment) - } - - for _, colName := range table.ColumnsSeq() { - col := table.GetColumn(colName) - - if len(col.Comment) > 0 { - commentSQL += fmt.Sprintf("COMMENT ON COLUMN %s.%s IS '%s'", quoter.Quote(tableName), quoter.Quote(col.Name), col.Comment) - } - } - - return createTableSQL + commentSQL, true, nil -} - -func (db *postgres) Filters() []Filter { - return []Filter{&SeqFilter{Prefix: "$", Start: 1}} -} - -type pqDriver struct { - baseDriver -} - -type values map[string]string - -func parseURL(connstr string) (string, error) { - u, err := url.Parse(connstr) - if err != nil { - return "", err - } - - if u.Scheme != "postgresql" && u.Scheme != "postgres" { - return "", fmt.Errorf("invalid connection protocol: %s", u.Scheme) - } - - escaper := strings.NewReplacer(` `, `\ `, `'`, `\'`, `\`, `\\`) - - if u.Path != "" { - return escaper.Replace(u.Path[1:]), nil - } - - return "", nil -} - -func parseOpts(urlStr string, o values) error { - if len(urlStr) == 0 { - return fmt.Errorf("invalid options: %s", urlStr) - } - - urlStr = strings.TrimSpace(urlStr) - - var ( - inQuote bool - state int // 0 key, 1 space, 2 value, 3 equal - start int - key string - ) - for i, c := range urlStr { - switch c { - case ' ': - if !inQuote { - if state == 2 { - state = 1 - v := urlStr[start:i] - if strings.HasPrefix(v, "'") && strings.HasSuffix(v, "'") { - v = v[1 : len(v)-1] - } else if strings.HasPrefix(v, "'") || strings.HasSuffix(v, "'") { - return fmt.Errorf("wrong single quote in %d of %s", i, urlStr) - } - o[key] = v - } else if state != 1 { - return fmt.Errorf("wrong format: %v", urlStr) - } - } - case '\'': - if state == 3 { - state = 2 - start = i - } else if state != 2 { - return fmt.Errorf("wrong format: %v", urlStr) - } - inQuote = !inQuote - case '=': - if !inQuote { - if state != 0 { - return fmt.Errorf("wrong format: %v", urlStr) - } - key = urlStr[start:i] - state = 3 - } - default: - if state == 3 { - state = 2 - start = i - } else if state == 1 { - state = 0 - start = i - } - } - - if i == len(urlStr)-1 { - if state != 2 { - return errors.New("no value matched key") - } - v := urlStr[start : i+1] - if strings.HasPrefix(v, "'") && strings.HasSuffix(v, "'") { - v = v[1 : len(v)-1] - } else if strings.HasPrefix(v, "'") || strings.HasSuffix(v, "'") { - return fmt.Errorf("wrong single quote in %d of %s", i, urlStr) - } - o[key] = v - } - } - - return nil -} - -func (p *pqDriver) Features() *DriverFeatures { - return &DriverFeatures{ - SupportReturnInsertedID: false, - } -} - -func (p *pqDriver) Parse(driverName, dataSourceName string) (*URI, error) { - db := &URI{DBType: schemas.POSTGRES} - - var err error - if strings.Contains(dataSourceName, "://") { - if !strings.HasPrefix(dataSourceName, "postgresql://") && !strings.HasPrefix(dataSourceName, "postgres://") { - return nil, fmt.Errorf("unsupported protocol %v", dataSourceName) - } - - db.DBName, err = parseURL(dataSourceName) - if err != nil { - return nil, err - } - } else { - o := make(values) - err = parseOpts(dataSourceName, o) - if err != nil { - return nil, err - } - - db.DBName = o["dbname"] - } - - if db.DBName == "" { - return nil, errors.New("dbname is empty") - } - - return db, nil -} - -func (p *pqDriver) GenScanResult(colType string) (interface{}, error) { - switch colType { - case "VARCHAR", "TEXT": - var s sql.NullString - return &s, nil - case "BIGINT", "BIGSERIAL": - var s sql.NullInt64 - return &s, nil - case "SMALLINT", "INT", "INT8", "INT4", "INTEGER", "SERIAL": - var s sql.NullInt32 - return &s, nil - case "FLOAT", "FLOAT4", "REAL", "DOUBLE PRECISION": - var s sql.NullFloat64 - return &s, nil - case "DATETIME", "TIMESTAMP": - var s sql.NullTime - return &s, nil - case "BOOL": - var s sql.NullBool - return &s, nil - default: - var r sql.RawBytes - return &r, nil - } -} - -type pqDriverPgx struct { - pqDriver -} - -func (pgx *pqDriverPgx) Parse(driverName, dataSourceName string) (*URI, error) { - // Remove the leading characters for driver to work - if len(dataSourceName) >= 9 && dataSourceName[0] == 0 { - dataSourceName = dataSourceName[9:] - } - return pgx.pqDriver.Parse(driverName, dataSourceName) -} - -// QueryDefaultPostgresSchema returns the default postgres schema -func QueryDefaultPostgresSchema(ctx context.Context, queryer core.Queryer) (string, error) { - rows, err := queryer.QueryContext(ctx, "SHOW SEARCH_PATH") - if err != nil { - return "", err - } - defer rows.Close() - if rows.Next() { - var defaultSchema string - if err = rows.Scan(&defaultSchema); err != nil { - return "", err - } - parts := strings.Split(defaultSchema, ",") - return strings.TrimSpace(parts[len(parts)-1]), nil - } - if rows.Err() != nil { - return "", rows.Err() - } - - return "", errors.New("no default schema") -} diff --git a/vendor/xorm.io/xorm/dialects/quote.go b/vendor/xorm.io/xorm/dialects/quote.go deleted file mode 100644 index da4e0dd6..00000000 --- a/vendor/xorm.io/xorm/dialects/quote.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2020 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package dialects - -// QuotePolicy describes quote handle policy -type QuotePolicy int - -// All QuotePolicies -const ( - QuotePolicyAlways QuotePolicy = iota - QuotePolicyNone - QuotePolicyReserved -) diff --git a/vendor/xorm.io/xorm/dialects/sqlite3.go b/vendor/xorm.io/xorm/dialects/sqlite3.go deleted file mode 100644 index 4ff9a39e..00000000 --- a/vendor/xorm.io/xorm/dialects/sqlite3.go +++ /dev/null @@ -1,578 +0,0 @@ -// Copyright 2015 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package dialects - -import ( - "context" - "database/sql" - "errors" - "fmt" - "regexp" - "strings" - - "xorm.io/xorm/core" - "xorm.io/xorm/schemas" -) - -var ( - sqlite3ReservedWords = map[string]bool{ - "ABORT": true, - "ACTION": true, - "ADD": true, - "AFTER": true, - "ALL": true, - "ALTER": true, - "ANALYZE": true, - "AND": true, - "AS": true, - "ASC": true, - "ATTACH": true, - "AUTOINCREMENT": true, - "BEFORE": true, - "BEGIN": true, - "BETWEEN": true, - "BY": true, - "CASCADE": true, - "CASE": true, - "CAST": true, - "CHECK": true, - "COLLATE": true, - "COLUMN": true, - "COMMIT": true, - "CONFLICT": true, - "CONSTRAINT": true, - "CREATE": true, - "CROSS": true, - "CURRENT_DATE": true, - "CURRENT_TIME": true, - "CURRENT_TIMESTAMP": true, - "DATABASE": true, - "DEFAULT": true, - "DEFERRABLE": true, - "DEFERRED": true, - "DELETE": true, - "DESC": true, - "DETACH": true, - "DISTINCT": true, - "DROP": true, - "EACH": true, - "ELSE": true, - "END": true, - "ESCAPE": true, - "EXCEPT": true, - "EXCLUSIVE": true, - "EXISTS": true, - "EXPLAIN": true, - "FAIL": true, - "FOR": true, - "FOREIGN": true, - "FROM": true, - "FULL": true, - "GLOB": true, - "GROUP": true, - "HAVING": true, - "IF": true, - "IGNORE": true, - "IMMEDIATE": true, - "IN": true, - "INDEX": true, - "INDEXED": true, - "INITIALLY": true, - "INNER": true, - "INSERT": true, - "INSTEAD": true, - "INTERSECT": true, - "INTO": true, - "IS": true, - "ISNULL": true, - "JOIN": true, - "KEY": true, - "LEFT": true, - "LIKE": true, - "LIMIT": true, - "MATCH": true, - "NATURAL": true, - "NO": true, - "NOT": true, - "NOTNULL": true, - "NULL": true, - "OF": true, - "OFFSET": true, - "ON": true, - "OR": true, - "ORDER": true, - "OUTER": true, - "PLAN": true, - "PRAGMA": true, - "PRIMARY": true, - "QUERY": true, - "RAISE": true, - "RECURSIVE": true, - "REFERENCES": true, - "REGEXP": true, - "REINDEX": true, - "RELEASE": true, - "RENAME": true, - "REPLACE": true, - "RESTRICT": true, - "RIGHT": true, - "ROLLBACK": true, - "ROW": true, - "SAVEPOINT": true, - "SELECT": true, - "SET": true, - "TABLE": true, - "TEMP": true, - "TEMPORARY": true, - "THEN": true, - "TO": true, - "TRANSACTI": true, - "TRIGGER": true, - "UNION": true, - "UNIQUE": true, - "UPDATE": true, - "USING": true, - "VACUUM": true, - "VALUES": true, - "VIEW": true, - "VIRTUAL": true, - "WHEN": true, - "WHERE": true, - "WITH": true, - "WITHOUT": true, - } - - sqlite3Quoter = schemas.Quoter{ - Prefix: '`', - Suffix: '`', - IsReserved: schemas.AlwaysReserve, - } -) - -type sqlite3 struct { - Base -} - -func (db *sqlite3) Init(uri *URI) error { - db.quoter = sqlite3Quoter - return db.Base.Init(db, uri) -} - -func (db *sqlite3) Version(ctx context.Context, queryer core.Queryer) (*schemas.Version, error) { - rows, err := queryer.QueryContext(ctx, "SELECT sqlite_version()") - if err != nil { - return nil, err - } - defer rows.Close() - - var version string - if !rows.Next() { - if rows.Err() != nil { - return nil, rows.Err() - } - return nil, errors.New("unknow version") - } - - if err := rows.Scan(&version); err != nil { - return nil, err - } - return &schemas.Version{ - Number: version, - Edition: "sqlite", - }, nil -} - -func (db *sqlite3) Features() *DialectFeatures { - return &DialectFeatures{ - AutoincrMode: IncrAutoincrMode, - } -} - -func (db *sqlite3) SetQuotePolicy(quotePolicy QuotePolicy) { - switch quotePolicy { - case QuotePolicyNone: - var q = sqlite3Quoter - q.IsReserved = schemas.AlwaysNoReserve - db.quoter = q - case QuotePolicyReserved: - var q = sqlite3Quoter - q.IsReserved = db.IsReserved - db.quoter = q - case QuotePolicyAlways: - fallthrough - default: - db.quoter = sqlite3Quoter - } -} - -func (db *sqlite3) SQLType(c *schemas.Column) string { - switch t := c.SQLType.Name; t { - case schemas.Bool: - if c.Default == "true" { - c.Default = "1" - } else if c.Default == "false" { - c.Default = "0" - } - return schemas.Integer - case schemas.Date, schemas.DateTime, schemas.TimeStamp, schemas.Time: - return schemas.DateTime - case schemas.TimeStampz: - return schemas.Text - case schemas.Char, schemas.Varchar, schemas.NVarchar, schemas.TinyText, - schemas.Text, schemas.MediumText, schemas.LongText, schemas.Json: - return schemas.Text - case schemas.Bit, schemas.TinyInt, schemas.UnsignedTinyInt, schemas.SmallInt, - schemas.UnsignedSmallInt, schemas.MediumInt, schemas.Int, schemas.UnsignedInt, - schemas.BigInt, schemas.UnsignedBigInt, schemas.Integer: - return schemas.Integer - case schemas.Float, schemas.Double, schemas.Real: - return schemas.Real - case schemas.Decimal, schemas.Numeric: - return schemas.Numeric - case schemas.TinyBlob, schemas.Blob, schemas.MediumBlob, schemas.LongBlob, schemas.Bytea, schemas.Binary, schemas.VarBinary: - return schemas.Blob - case schemas.Serial, schemas.BigSerial: - c.IsPrimaryKey = true - c.IsAutoIncrement = true - c.Nullable = false - return schemas.Integer - default: - return t - } -} - -func (db *sqlite3) ColumnTypeKind(t string) int { - switch strings.ToUpper(t) { - case "DATETIME": - return schemas.TIME_TYPE - case "TEXT": - return schemas.TEXT_TYPE - case "INTEGER", "REAL", "NUMERIC", "DECIMAL": - return schemas.NUMERIC_TYPE - case "BLOB": - return schemas.BLOB_TYPE - default: - return schemas.UNKNOW_TYPE - } -} - -func (db *sqlite3) IsReserved(name string) bool { - _, ok := sqlite3ReservedWords[strings.ToUpper(name)] - return ok -} - -func (db *sqlite3) AutoIncrStr() string { - return "AUTOINCREMENT" -} - -func (db *sqlite3) IndexCheckSQL(tableName, idxName string) (string, []interface{}) { - args := []interface{}{idxName} - return "SELECT name FROM sqlite_master WHERE type='index' and name = ?", args -} - -func (db *sqlite3) IsTableExist(queryer core.Queryer, ctx context.Context, tableName string) (bool, error) { - return db.HasRecords(queryer, ctx, "SELECT name FROM sqlite_master WHERE type='table' and name = ?", tableName) -} - -func (db *sqlite3) DropIndexSQL(tableName string, index *schemas.Index) string { - // var unique string - idxName := index.Name - - if !strings.HasPrefix(idxName, "UQE_") && - !strings.HasPrefix(idxName, "IDX_") { - if index.Type == schemas.UniqueType { - idxName = fmt.Sprintf("UQE_%v_%v", tableName, index.Name) - } else { - idxName = fmt.Sprintf("IDX_%v_%v", tableName, index.Name) - } - } - return fmt.Sprintf("DROP INDEX %v", db.Quoter().Quote(idxName)) -} - -func (db *sqlite3) ForUpdateSQL(query string) string { - return query -} - -func (db *sqlite3) IsColumnExist(queryer core.Queryer, ctx context.Context, tableName, colName string) (bool, error) { - query := "SELECT * FROM " + tableName + " LIMIT 0" - rows, err := queryer.QueryContext(ctx, query) - if err != nil { - return false, err - } - defer rows.Close() - - cols, err := rows.Columns() - if err != nil { - return false, err - } - - for _, col := range cols { - if strings.EqualFold(col, colName) { - return true, nil - } - } - - return false, nil -} - -// splitColStr splits a sqlite col strings as fields -func splitColStr(colStr string) []string { - colStr = strings.TrimSpace(colStr) - var results = make([]string, 0, 10) - var lastIdx int - var hasC, hasQuote bool - for i, c := range colStr { - if c == ' ' && !hasQuote { - if hasC { - results = append(results, colStr[lastIdx:i]) - hasC = false - } - } else { - if c == '\'' { - hasQuote = !hasQuote - } - if !hasC { - lastIdx = i - } - hasC = true - if i == len(colStr)-1 { - results = append(results, colStr[lastIdx:i+1]) - } - } - } - return results -} - -func parseString(colStr string) (*schemas.Column, error) { - fields := splitColStr(colStr) - col := new(schemas.Column) - col.Indexes = make(map[string]int) - col.Nullable = true - col.DefaultIsEmpty = true - - for idx, field := range fields { - if idx == 0 { - col.Name = strings.Trim(strings.Trim(field, "`[] "), `"`) - continue - } else if idx == 1 { - col.SQLType = schemas.SQLType{Name: field, DefaultLength: 0, DefaultLength2: 0} - continue - } - switch field { - case "PRIMARY": - col.IsPrimaryKey = true - case "AUTOINCREMENT": - col.IsAutoIncrement = true - case "NULL": - if fields[idx-1] == "NOT" { - col.Nullable = false - } else { - col.Nullable = true - } - case "DEFAULT": - col.Default = fields[idx+1] - col.DefaultIsEmpty = false - } - } - return col, nil -} - -func (db *sqlite3) GetColumns(queryer core.Queryer, ctx context.Context, tableName string) ([]string, map[string]*schemas.Column, error) { - args := []interface{}{tableName} - s := "SELECT sql FROM sqlite_master WHERE type='table' and name = ?" - - rows, err := queryer.QueryContext(ctx, s, args...) - if err != nil { - return nil, nil, err - } - defer rows.Close() - - var name string - if rows.Next() { - err = rows.Scan(&name) - if err != nil { - return nil, nil, err - } - } - if rows.Err() != nil { - return nil, nil, rows.Err() - } - - if name == "" { - return nil, nil, errors.New("no table named " + tableName) - } - - nStart := strings.Index(name, "(") - nEnd := strings.LastIndex(name, ")") - reg := regexp.MustCompile(`[^\(,\)]*(\([^\(]*\))?`) - colCreates := reg.FindAllString(name[nStart+1:nEnd], -1) - cols := make(map[string]*schemas.Column) - colSeq := make([]string, 0) - - for _, colStr := range colCreates { - reg = regexp.MustCompile(`,\s`) - colStr = reg.ReplaceAllString(colStr, ",") - if strings.HasPrefix(strings.TrimSpace(colStr), "PRIMARY KEY") { - parts := strings.Split(strings.TrimSpace(colStr), "(") - if len(parts) == 2 { - pkCols := strings.Split(strings.TrimRight(strings.TrimSpace(parts[1]), ")"), ",") - for _, pk := range pkCols { - if col, ok := cols[strings.Trim(strings.TrimSpace(pk), "`")]; ok { - col.IsPrimaryKey = true - } - } - } - continue - } - - col, err := parseString(colStr) - if err != nil { - return colSeq, cols, err - } - - cols[col.Name] = col - colSeq = append(colSeq, col.Name) - } - return colSeq, cols, nil -} - -func (db *sqlite3) GetTables(queryer core.Queryer, ctx context.Context) ([]*schemas.Table, error) { - args := []interface{}{} - s := "SELECT name FROM sqlite_master WHERE type='table'" - - rows, err := queryer.QueryContext(ctx, s, args...) - if err != nil { - return nil, err - } - defer rows.Close() - - tables := make([]*schemas.Table, 0) - for rows.Next() { - table := schemas.NewEmptyTable() - err = rows.Scan(&table.Name) - if err != nil { - return nil, err - } - if table.Name == "sqlite_sequence" { - continue - } - tables = append(tables, table) - } - if rows.Err() != nil { - return nil, rows.Err() - } - return tables, nil -} - -func (db *sqlite3) GetIndexes(queryer core.Queryer, ctx context.Context, tableName string) (map[string]*schemas.Index, error) { - args := []interface{}{tableName} - s := "SELECT sql FROM sqlite_master WHERE type='index' and tbl_name = ?" - - rows, err := queryer.QueryContext(ctx, s, args...) - if err != nil { - return nil, err - } - defer rows.Close() - - indexes := make(map[string]*schemas.Index) - for rows.Next() { - var tmpSQL sql.NullString - err = rows.Scan(&tmpSQL) - if err != nil { - return nil, err - } - - if !tmpSQL.Valid { - continue - } - sql := tmpSQL.String - - index := new(schemas.Index) - nNStart := strings.Index(sql, "INDEX") - nNEnd := strings.Index(sql, "ON") - if nNStart == -1 || nNEnd == -1 { - continue - } - - indexName := strings.Trim(strings.TrimSpace(sql[nNStart+6:nNEnd]), "`[]'\"") - var isRegular bool - if strings.HasPrefix(indexName, "IDX_"+tableName) || strings.HasPrefix(indexName, "UQE_"+tableName) { - index.Name = indexName[5+len(tableName):] - isRegular = true - } else { - index.Name = indexName - } - - if strings.HasPrefix(sql, "CREATE UNIQUE INDEX") { - index.Type = schemas.UniqueType - } else { - index.Type = schemas.IndexType - } - - nStart := strings.Index(sql, "(") - nEnd := strings.Index(sql, ")") - colIndexes := strings.Split(sql[nStart+1:nEnd], ",") - - index.Cols = make([]string, 0) - for _, col := range colIndexes { - index.Cols = append(index.Cols, strings.Trim(col, "` []")) - } - index.IsRegular = isRegular - indexes[index.Name] = index - } - if rows.Err() != nil { - return nil, rows.Err() - } - - return indexes, nil -} - -func (db *sqlite3) Filters() []Filter { - return []Filter{} -} - -type sqlite3Driver struct { - baseDriver -} - -func (p *sqlite3Driver) Features() *DriverFeatures { - return &DriverFeatures{ - SupportReturnInsertedID: true, - } -} - -func (p *sqlite3Driver) Parse(driverName, dataSourceName string) (*URI, error) { - if strings.Contains(dataSourceName, "?") { - dataSourceName = dataSourceName[:strings.Index(dataSourceName, "?")] - } - - return &URI{DBType: schemas.SQLITE, DBName: dataSourceName}, nil -} - -func (p *sqlite3Driver) GenScanResult(colType string) (interface{}, error) { - switch colType { - case "TEXT": - var s sql.NullString - return &s, nil - case "INTEGER": - var s sql.NullInt64 - return &s, nil - case "DATETIME": - var s sql.NullTime - return &s, nil - case "REAL": - var s sql.NullFloat64 - return &s, nil - case "NUMERIC", "DECIMAL": - var s sql.NullString - return &s, nil - case "BLOB": - var s sql.RawBytes - return &s, nil - default: - var r sql.NullString - return &r, nil - } -} diff --git a/vendor/xorm.io/xorm/dialects/table_name.go b/vendor/xorm.io/xorm/dialects/table_name.go deleted file mode 100644 index 8a0baeac..00000000 --- a/vendor/xorm.io/xorm/dialects/table_name.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2015 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package dialects - -import ( - "fmt" - "reflect" - "strings" - - "xorm.io/xorm/internal/utils" - "xorm.io/xorm/names" - "xorm.io/xorm/schemas" -) - -// TableNameWithSchema will add schema prefix on table name if possible -func TableNameWithSchema(dialect Dialect, tableName string) string { - // Add schema name as prefix of table name. - // Only for postgres database. - if dialect.URI().Schema != "" && !strings.Contains(tableName, ".") { - return fmt.Sprintf("%s.%s", dialect.URI().Schema, tableName) - } - return tableName -} - -// TableNameNoSchema returns table name with given tableName -func TableNameNoSchema(dialect Dialect, mapper names.Mapper, tableName interface{}) string { - quote := dialect.Quoter().Quote - switch tt := tableName.(type) { - case []string: - if len(tt) > 1 { - if dialect.URI().DBType == schemas.ORACLE { - return fmt.Sprintf("%v %v", quote(tt[0]), quote(tt[1])) - } - return fmt.Sprintf("%v AS %v", quote(tt[0]), quote(tt[1])) - } else if len(tt) == 1 { - return quote(tt[0]) - } - case []interface{}: - l := len(tt) - var table string - if l > 0 { - f := tt[0] - switch f.(type) { - case string: - table = f.(string) - case names.TableName: - table = f.(names.TableName).TableName() - default: - v := utils.ReflectValue(f) - t := v.Type() - if t.Kind() == reflect.Struct { - table = names.GetTableName(mapper, v) - } else { - table = quote(fmt.Sprintf("%v", f)) - } - } - } - if l > 1 { - if dialect.URI().DBType == schemas.ORACLE { - return fmt.Sprintf("%v %v", quote(table), quote(fmt.Sprintf("%v", tt[1]))) - } - return fmt.Sprintf("%v AS %v", quote(table), quote(fmt.Sprintf("%v", tt[1]))) - } else if l == 1 { - return quote(table) - } - case names.TableName: - return tableName.(names.TableName).TableName() - case string: - return tableName.(string) - case reflect.Value: - v := tableName.(reflect.Value) - return names.GetTableName(mapper, v) - default: - v := utils.ReflectValue(tableName) - t := v.Type() - if t.Kind() == reflect.Struct { - return names.GetTableName(mapper, v) - } - return quote(fmt.Sprintf("%v", tableName)) - } - return "" -} - -// FullTableName returns table name with quote and schema according parameter -func FullTableName(dialect Dialect, mapper names.Mapper, bean interface{}, includeSchema ...bool) string { - tbName := TableNameNoSchema(dialect, mapper, bean) - if len(includeSchema) > 0 && includeSchema[0] && !utils.IsSubQuery(tbName) { - tbName = TableNameWithSchema(dialect, tbName) - } - return tbName -} diff --git a/vendor/xorm.io/xorm/dialects/time.go b/vendor/xorm.io/xorm/dialects/time.go deleted file mode 100644 index cdc896be..00000000 --- a/vendor/xorm.io/xorm/dialects/time.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2015 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package dialects - -import ( - "strings" - "time" - - "xorm.io/xorm/schemas" -) - -// FormatColumnTime format column time -func FormatColumnTime(dialect Dialect, dbLocation *time.Location, col *schemas.Column, t time.Time) (interface{}, error) { - if t.IsZero() { - if col.Nullable { - return nil, nil - } - - if col.SQLType.IsNumeric() { - return 0, nil - } - } - - tmZone := dbLocation - if col.TimeZone != nil { - tmZone = col.TimeZone - } - - t = t.In(tmZone) - - switch col.SQLType.Name { - case schemas.Date: - return t.Format("2006-01-02"), nil - case schemas.Time: - layout := "15:04:05" - if col.Length > 0 { - // we can use int(...) casting here as it's very unlikely to a huge sized field - layout += "." + strings.Repeat("0", int(col.Length)) - } - return t.Format(layout), nil - case schemas.DateTime, schemas.TimeStamp: - layout := "2006-01-02 15:04:05" - if col.Length > 0 { - // we can use int(...) casting here as it's very unlikely to a huge sized field - layout += "." + strings.Repeat("0", int(col.Length)) - } - return t.Format(layout), nil - case schemas.Varchar: - return t.Format("2006-01-02 15:04:05"), nil - case schemas.TimeStampz: - if dialect.URI().DBType == schemas.MSSQL { - return t.Format("2006-01-02T15:04:05.9999999Z07:00"), nil - } else { - return t.Format(time.RFC3339Nano), nil - } - case schemas.BigInt, schemas.Int: - return t.Unix(), nil - default: - return t, nil - } -} diff --git a/vendor/xorm.io/xorm/doc.go b/vendor/xorm.io/xorm/doc.go deleted file mode 100644 index a1565806..00000000 --- a/vendor/xorm.io/xorm/doc.go +++ /dev/null @@ -1,250 +0,0 @@ -// Copyright 2013 - 2016 The XORM Authors. All rights reserved. -// Use of this source code is governed by a BSD -// license that can be found in the LICENSE file. - -/* - -Package xorm is a simple and powerful ORM for Go. - -Installation - -Make sure you have installed Go 1.11+ and then: - - go get xorm.io/xorm - -Create Engine - -Firstly, we should create an engine for a database - - engine, err := xorm.NewEngine(driverName, dataSourceName) - -Method NewEngine's parameters are the same as sql.Open which depend drivers' implementation. -Generally, one engine for an application is enough. You can define it as a package variable. - -Raw Methods - -XORM supports raw SQL execution: - -1. query with a SQL string, the returned results is []map[string][]byte - - results, err := engine.Query("select * from user") - -2. query with a SQL string, the returned results is []map[string]string - - results, err := engine.QueryString("select * from user") - -3. query with a SQL string, the returned results is []map[string]interface{} - - results, err := engine.QueryInterface("select * from user") - -4. execute with a SQL string, the returned results - - affected, err := engine.Exec("update user set .... where ...") - -ORM Methods - -There are 8 major ORM methods and many helpful methods to use to operate database. - -1. Insert one or multiple records to database - - affected, err := engine.Insert(&struct) - // INSERT INTO struct () values () - affected, err := engine.Insert(&struct1, &struct2) - // INSERT INTO struct1 () values () - // INSERT INTO struct2 () values () - affected, err := engine.Insert(&sliceOfStruct) - // INSERT INTO struct () values (),(),() - affected, err := engine.Insert(&struct1, &sliceOfStruct2) - // INSERT INTO struct1 () values () - // INSERT INTO struct2 () values (),(),() - -2. Query one record or one variable from database - - has, err := engine.Get(&user) - // SELECT * FROM user LIMIT 1 - - var id int64 - has, err := engine.Table("user").Where("name = ?", name).Get(&id) - // SELECT id FROM user WHERE name = ? LIMIT 1 - - var id int64 - var name string - has, err := engine.Table(&user).Cols("id", "name").Get(&id, &name) - // SELECT id, name FROM user LIMIT 1 - -3. Query multiple records from database - - var sliceOfStructs []Struct - err := engine.Find(&sliceOfStructs) - // SELECT * FROM user - - var mapOfStructs = make(map[int64]Struct) - err := engine.Find(&mapOfStructs) - // SELECT * FROM user - - var int64s []int64 - err := engine.Table("user").Cols("id").Find(&int64s) - // SELECT id FROM user - -4. Query multiple records and record by record handle, there two methods, one is Iterate, -another is Rows - - err := engine.Iterate(new(User), func(i int, bean interface{}) error { - // do something - }) - // SELECT * FROM user - - rows, err := engine.Rows(...) - // SELECT * FROM user - defer rows.Close() - bean := new(Struct) - for rows.Next() { - err = rows.Scan(bean) - } - -or - - rows, err := engine.Cols("name", "age").Rows(...) - // SELECT * FROM user - defer rows.Close() - for rows.Next() { - var name string - var age int - err = rows.Scan(&name, &age) - } - -5. Update one or more records - - affected, err := engine.ID(...).Update(&user) - // UPDATE user SET ... - -6. Delete one or more records, Delete MUST has condition - - affected, err := engine.Where(...).Delete(&user) - // DELETE FROM user Where ... - -7. Count records - - counts, err := engine.Count(&user) - // SELECT count(*) AS total FROM user - - counts, err := engine.SQL("select count(*) FROM user").Count() - // select count(*) FROM user - -8. Sum records - - sumFloat64, err := engine.Sum(&user, "id") - // SELECT sum(id) from user - - sumFloat64s, err := engine.Sums(&user, "id1", "id2") - // SELECT sum(id1), sum(id2) from user - - sumInt64s, err := engine.SumsInt(&user, "id1", "id2") - // SELECT sum(id1), sum(id2) from user - -Conditions - -The above 8 methods could use with condition methods chainable. -Notice: the above 8 methods should be the last chainable method. - -1. ID, In - - engine.ID(1).Get(&user) // for single primary key - // SELECT * FROM user WHERE id = 1 - engine.ID(schemas.PK{1, 2}).Get(&user) // for composite primary keys - // SELECT * FROM user WHERE id1 = 1 AND id2 = 2 - engine.In("id", 1, 2, 3).Find(&users) - // SELECT * FROM user WHERE id IN (1, 2, 3) - engine.In("id", []int{1, 2, 3}).Find(&users) - // SELECT * FROM user WHERE id IN (1, 2, 3) - -2. Where, And, Or - - engine.Where().And().Or().Find() - // SELECT * FROM user WHERE (.. AND ..) OR ... - -3. OrderBy, Asc, Desc - - engine.Asc().Desc().Find() - // SELECT * FROM user ORDER BY .. ASC, .. DESC - engine.OrderBy().Find() - // SELECT * FROM user ORDER BY .. - -4. Limit, Top - - engine.Limit().Find() - // SELECT * FROM user LIMIT .. OFFSET .. - engine.Top(5).Find() - // SELECT TOP 5 * FROM user // for mssql - // SELECT * FROM user LIMIT .. OFFSET 0 //for other databases - -5. SQL, let you custom SQL - - var users []User - engine.SQL("select * from user").Find(&users) - -6. Cols, Omit, Distinct - - var users []*User - engine.Cols("col1, col2").Find(&users) - // SELECT col1, col2 FROM user - engine.Cols("col1", "col2").Where().Update(user) - // UPDATE user set col1 = ?, col2 = ? Where ... - engine.Omit("col1").Find(&users) - // SELECT col2, col3 FROM user - engine.Omit("col1").Insert(&user) - // INSERT INTO table (non-col1) VALUES () - engine.Distinct("col1").Find(&users) - // SELECT DISTINCT col1 FROM user - -7. Join, GroupBy, Having - - engine.GroupBy("name").Having("name='xlw'").Find(&users) - //SELECT * FROM user GROUP BY name HAVING name='xlw' - engine.Join("LEFT", "userdetail", "user.id=userdetail.id").Find(&users) - //SELECT * FROM user LEFT JOIN userdetail ON user.id=userdetail.id - -Builder - -xorm could work with xorm.io/builder directly. - -1. With Where - - var cond = builder.Eq{"a":1, "b":2} - engine.Where(cond).Find(&users) - -2. With In - - var subQuery = builder.Select("name").From("group") - engine.In("group_name", subQuery).Find(&users) - -3. With Join - - var subQuery = builder.Select("name").From("group") - engine.Join("INNER", subQuery, "group.id = user.group_id").Find(&users) - -4. With SetExprs - - var subQuery = builder.Select("name").From("group") - engine.ID(1).SetExprs("name", subQuery).Update(new(User)) - -5. With SQL - - var query = builder.Select("name").From("group") - results, err := engine.SQL(query).Find(&groups) - -6. With Query - - var query = builder.Select("name").From("group") - results, err := engine.Query(query) - results, err := engine.QueryString(query) - results, err := engine.QueryInterface(query) - -7. With Exec - - var query = builder.Insert("a, b").Into("table1").Select("b, c").From("table2") - results, err := engine.Exec(query) - -More usage, please visit http://xorm.io/docs -*/ -package xorm diff --git a/vendor/xorm.io/xorm/engine.go b/vendor/xorm.io/xorm/engine.go deleted file mode 100644 index 81cfc7a9..00000000 --- a/vendor/xorm.io/xorm/engine.go +++ /dev/null @@ -1,1433 +0,0 @@ -// Copyright 2015 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xorm - -import ( - "context" - "database/sql" - "fmt" - "io" - "os" - "reflect" - "regexp" - "runtime" - "strconv" - "strings" - "time" - - "xorm.io/xorm/caches" - "xorm.io/xorm/contexts" - "xorm.io/xorm/core" - "xorm.io/xorm/dialects" - "xorm.io/xorm/internal/utils" - "xorm.io/xorm/log" - "xorm.io/xorm/names" - "xorm.io/xorm/schemas" - "xorm.io/xorm/tags" -) - -// Engine is the major struct of xorm, it means a database manager. -// Commonly, an application only need one engine -type Engine struct { - cacherMgr *caches.Manager - defaultContext context.Context - dialect dialects.Dialect - driver dialects.Driver - engineGroup *EngineGroup - logger log.ContextLogger - tagParser *tags.Parser - db *core.DB - - driverName string - dataSourceName string - - TZLocation *time.Location // The timezone of the application - DatabaseTZ *time.Location // The timezone of the database - - logSessionID bool // create session id -} - -// NewEngine new a db manager according to the parameter. Currently support four -// drivers -func NewEngine(driverName string, dataSourceName string) (*Engine, error) { - dialect, err := dialects.OpenDialect(driverName, dataSourceName) - if err != nil { - return nil, err - } - - db, err := core.Open(driverName, dataSourceName) - if err != nil { - return nil, err - } - - return newEngine(driverName, dataSourceName, dialect, db) -} - -func newEngine(driverName, dataSourceName string, dialect dialects.Dialect, db *core.DB) (*Engine, error) { - cacherMgr := caches.NewManager() - mapper := names.NewCacheMapper(new(names.SnakeMapper)) - tagParser := tags.NewParser("xorm", dialect, mapper, mapper, cacherMgr) - - engine := &Engine{ - dialect: dialect, - driver: dialects.QueryDriver(driverName), - TZLocation: time.Local, - defaultContext: context.Background(), - cacherMgr: cacherMgr, - tagParser: tagParser, - driverName: driverName, - dataSourceName: dataSourceName, - db: db, - logSessionID: false, - } - - if dialect.URI().DBType == schemas.SQLITE { - engine.DatabaseTZ = time.UTC - } else { - engine.DatabaseTZ = time.Local - } - - logger := log.NewSimpleLogger(os.Stdout) - logger.SetLevel(log.LOG_INFO) - engine.SetLogger(log.NewLoggerAdapter(logger)) - - runtime.SetFinalizer(engine, func(engine *Engine) { - _ = engine.Close() - }) - - return engine, nil -} - -// NewEngineWithParams new a db manager with params. The params will be passed to dialects. -func NewEngineWithParams(driverName string, dataSourceName string, params map[string]string) (*Engine, error) { - engine, err := NewEngine(driverName, dataSourceName) - engine.dialect.SetParams(params) - return engine, err -} - -// NewEngineWithDB new a db manager with db. The params will be passed to db. -func NewEngineWithDB(driverName string, dataSourceName string, db *core.DB) (*Engine, error) { - dialect, err := dialects.OpenDialect(driverName, dataSourceName) - if err != nil { - return nil, err - } - return newEngine(driverName, dataSourceName, dialect, db) -} - -// NewEngineWithDialectAndDB new a db manager according to the parameter. -// If you do not want to use your own dialect or db, please use NewEngine. -// For creating dialect, you can call dialects.OpenDialect. And, for creating db, -// you can call core.Open or core.FromDB. -func NewEngineWithDialectAndDB(driverName, dataSourceName string, dialect dialects.Dialect, db *core.DB) (*Engine, error) { - return newEngine(driverName, dataSourceName, dialect, db) -} - -// EnableSessionID if enable session id -func (engine *Engine) EnableSessionID(enable bool) { - engine.logSessionID = enable -} - -// SetCacher sets cacher for the table -func (engine *Engine) SetCacher(tableName string, cacher caches.Cacher) { - engine.cacherMgr.SetCacher(tableName, cacher) -} - -// GetCacher returns the cachher of the special table -func (engine *Engine) GetCacher(tableName string) caches.Cacher { - return engine.cacherMgr.GetCacher(tableName) -} - -// SetQuotePolicy sets the special quote policy -func (engine *Engine) SetQuotePolicy(quotePolicy dialects.QuotePolicy) { - engine.dialect.SetQuotePolicy(quotePolicy) -} - -// BufferSize sets buffer size for iterate -func (engine *Engine) BufferSize(size int) *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.BufferSize(size) -} - -// ShowSQL show SQL statement or not on logger if log level is great than INFO -func (engine *Engine) ShowSQL(show ...bool) { - engine.logger.ShowSQL(show...) - engine.DB().Logger = engine.logger -} - -// Logger return the logger interface -func (engine *Engine) Logger() log.ContextLogger { - return engine.logger -} - -// SetLogger set the new logger -func (engine *Engine) SetLogger(logger interface{}) { - var realLogger log.ContextLogger - switch t := logger.(type) { - case log.ContextLogger: - realLogger = t - case log.Logger: - realLogger = log.NewLoggerAdapter(t) - default: - panic("logger should implement either log.ContextLogger or log.Logger") - } - engine.logger = realLogger - engine.DB().Logger = realLogger -} - -// SetLogLevel sets the logger level -func (engine *Engine) SetLogLevel(level log.LogLevel) { - engine.logger.SetLevel(level) -} - -// SetDisableGlobalCache disable global cache or not -func (engine *Engine) SetDisableGlobalCache(disable bool) { - engine.cacherMgr.SetDisableGlobalCache(disable) -} - -// DriverName return the current sql driver's name -func (engine *Engine) DriverName() string { - return engine.driverName -} - -// DataSourceName return the current connection string -func (engine *Engine) DataSourceName() string { - return engine.dataSourceName -} - -// SetMapper set the name mapping rules -func (engine *Engine) SetMapper(mapper names.Mapper) { - engine.SetTableMapper(mapper) - engine.SetColumnMapper(mapper) -} - -// SetTableMapper set the table name mapping rule -func (engine *Engine) SetTableMapper(mapper names.Mapper) { - engine.tagParser.SetTableMapper(mapper) -} - -// SetColumnMapper set the column name mapping rule -func (engine *Engine) SetColumnMapper(mapper names.Mapper) { - engine.tagParser.SetColumnMapper(mapper) -} - -// SetTagIdentifier set the tag identifier -func (engine *Engine) SetTagIdentifier(tagIdentifier string) { - engine.tagParser.SetIdentifier(tagIdentifier) -} - -// Quote Use QuoteStr quote the string sql -func (engine *Engine) Quote(value string) string { - value = strings.TrimSpace(value) - if len(value) == 0 { - return value - } - - buf := strings.Builder{} - engine.QuoteTo(&buf, value) - - return buf.String() -} - -// QuoteTo quotes string and writes into the buffer -func (engine *Engine) QuoteTo(buf *strings.Builder, value string) { - if buf == nil { - return - } - - value = strings.TrimSpace(value) - if value == "" { - return - } - engine.dialect.Quoter().QuoteTo(buf, value) -} - -// SQLType A simple wrapper to dialect's core.SqlType method -func (engine *Engine) SQLType(c *schemas.Column) string { - return engine.dialect.SQLType(c) -} - -// SetConnMaxLifetime sets the maximum amount of time a connection may be reused. -func (engine *Engine) SetConnMaxLifetime(d time.Duration) { - engine.DB().SetConnMaxLifetime(d) -} - -// SetMaxOpenConns is only available for go 1.2+ -func (engine *Engine) SetMaxOpenConns(conns int) { - engine.DB().SetMaxOpenConns(conns) -} - -// SetMaxIdleConns set the max idle connections on pool, default is 2 -func (engine *Engine) SetMaxIdleConns(conns int) { - engine.DB().SetMaxIdleConns(conns) -} - -// SetDefaultCacher set the default cacher. Xorm's default not enable cacher. -func (engine *Engine) SetDefaultCacher(cacher caches.Cacher) { - engine.cacherMgr.SetDefaultCacher(cacher) -} - -// GetDefaultCacher returns the default cacher -func (engine *Engine) GetDefaultCacher() caches.Cacher { - return engine.cacherMgr.GetDefaultCacher() -} - -// NoCache If you has set default cacher, and you want temporilly stop use cache, -// you can use NoCache() -func (engine *Engine) NoCache() *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.NoCache() -} - -// NoCascade If you do not want to auto cascade load object -func (engine *Engine) NoCascade() *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.NoCascade() -} - -// MapCacher Set a table use a special cacher -func (engine *Engine) MapCacher(bean interface{}, cacher caches.Cacher) error { - engine.SetCacher(dialects.FullTableName(engine.dialect, engine.GetTableMapper(), bean, true), cacher) - return nil -} - -// NewDB provides an interface to operate database directly -func (engine *Engine) NewDB() (*core.DB, error) { - return core.Open(engine.driverName, engine.dataSourceName) -} - -// DB return the wrapper of sql.DB -func (engine *Engine) DB() *core.DB { - return engine.db -} - -// Dialect return database dialect -func (engine *Engine) Dialect() dialects.Dialect { - return engine.dialect -} - -// NewSession New a session -func (engine *Engine) NewSession() *Session { - return newSession(engine) -} - -// Close the engine -func (engine *Engine) Close() error { - return engine.DB().Close() -} - -// Ping tests if database is alive -func (engine *Engine) Ping() error { - session := engine.NewSession() - defer session.Close() - return session.Ping() -} - -// SQL method let's you manually write raw SQL and operate -// For example: -// -// engine.SQL("select * from user").Find(&users) -// -// This code will execute "select * from user" and set the records to users -func (engine *Engine) SQL(query interface{}, args ...interface{}) *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.SQL(query, args...) -} - -// NoAutoTime Default if your struct has "created" or "updated" filed tag, the fields -// will automatically be filled with current time when Insert or Update -// invoked. Call NoAutoTime if you dont' want to fill automatically. -func (engine *Engine) NoAutoTime() *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.NoAutoTime() -} - -// NoAutoCondition disable auto generate Where condition from bean or not -func (engine *Engine) NoAutoCondition(no ...bool) *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.NoAutoCondition(no...) -} - -func (engine *Engine) loadTableInfo(table *schemas.Table) error { - colSeq, cols, err := engine.dialect.GetColumns(engine.db, engine.defaultContext, table.Name) - if err != nil { - return err - } - for _, name := range colSeq { - table.AddColumn(cols[name]) - } - indexes, err := engine.dialect.GetIndexes(engine.db, engine.defaultContext, table.Name) - if err != nil { - return err - } - table.Indexes = indexes - - var seq int - for _, index := range indexes { - for _, name := range index.Cols { - parts := strings.Split(strings.TrimSpace(name), " ") - if len(parts) > 1 { - if parts[1] == "DESC" { - seq = 1 - } else if parts[1] == "ASC" { - seq = 0 - } - } - colName := strings.Trim(parts[0], `"`) - if col := table.GetColumn(colName); col != nil { - col.Indexes[index.Name] = index.Type - } else { - return fmt.Errorf("Unknown col %s seq %d, in index %v of table %v, columns %v", name, seq, index.Name, table.Name, table.ColumnsSeq()) - } - } - } - return nil -} - -// DBMetas Retrieve all tables, columns, indexes' informations from database. -func (engine *Engine) DBMetas() ([]*schemas.Table, error) { - tables, err := engine.dialect.GetTables(engine.db, engine.defaultContext) - if err != nil { - return nil, err - } - - for _, table := range tables { - if err = engine.loadTableInfo(table); err != nil { - return nil, err - } - } - return tables, nil -} - -// DumpAllToFile dump database all table structs and data to a file -func (engine *Engine) DumpAllToFile(fp string, tp ...schemas.DBType) error { - f, err := os.Create(fp) - if err != nil { - return err - } - defer f.Close() - return engine.DumpAll(f, tp...) -} - -// DumpAll dump database all table structs and data to w -func (engine *Engine) DumpAll(w io.Writer, tp ...schemas.DBType) error { - tables, err := engine.DBMetas() - if err != nil { - return err - } - return engine.DumpTables(tables, w, tp...) -} - -// DumpTablesToFile dump specified tables to SQL file. -func (engine *Engine) DumpTablesToFile(tables []*schemas.Table, fp string, tp ...schemas.DBType) error { - f, err := os.Create(fp) - if err != nil { - return err - } - defer f.Close() - return engine.DumpTables(tables, f, tp...) -} - -// DumpTables dump specify tables to io.Writer -func (engine *Engine) DumpTables(tables []*schemas.Table, w io.Writer, tp ...schemas.DBType) error { - return engine.dumpTables(context.Background(), tables, w, tp...) -} - -func formatBool(s bool, dstDialect dialects.Dialect) string { - if dstDialect.URI().DBType != schemas.POSTGRES { - if s { - return "1" - } - return "0" - } - return strconv.FormatBool(s) -} - -var controlCharactersRe = regexp.MustCompile(`[\x00-\x1f\x7f]+`) - -// dumpTables dump database all table structs and data to w with specify db type -func (engine *Engine) dumpTables(ctx context.Context, tables []*schemas.Table, w io.Writer, tp ...schemas.DBType) error { - var dstDialect dialects.Dialect - if len(tp) == 0 { - dstDialect = engine.dialect - } else { - dstDialect = dialects.QueryDialect(tp[0]) - if dstDialect == nil { - return fmt.Errorf("unsupported database type %v", tp[0]) - } - - uri := engine.dialect.URI() - destURI := dialects.URI{ - DBType: tp[0], - DBName: uri.DBName, - // DO NOT SET SCHEMA HERE - } - if tp[0] == schemas.POSTGRES { - destURI.Schema = engine.dialect.URI().Schema - } - if err := dstDialect.Init(&destURI); err != nil { - return err - } - } - cacherMgr := caches.NewManager() - dstTableCache := tags.NewParser("xorm", dstDialect, engine.GetTableMapper(), engine.GetColumnMapper(), cacherMgr) - - _, err := io.WriteString(w, fmt.Sprintf("/*Generated by xorm %s, from %s to %s*/\n\n", - time.Now().In(engine.TZLocation).Format("2006-01-02 15:04:05"), engine.dialect.URI().DBType, dstDialect.URI().DBType)) - if err != nil { - return err - } - - if dstDialect.URI().DBType == schemas.MYSQL { - // For MySQL set NO_BACKLASH_ESCAPES so that strings work properly - if _, err := io.WriteString(w, "SET sql_mode='NO_BACKSLASH_ESCAPES';\n"); err != nil { - return err - } - } - - for i, table := range tables { - dstTable := table - if table.Type != nil { - dstTable, err = dstTableCache.Parse(reflect.New(table.Type).Elem()) - if err != nil { - engine.logger.Errorf("Unable to infer table for %s in new dialect. Error: %v", table.Name) - dstTable = table - } - } - - dstTableName := dstTable.Name - quoter := dstDialect.Quoter().Quote - quotedDstTableName := quoter(dstTable.Name) - if dstDialect.URI().Schema != "" { - dstTableName = fmt.Sprintf("%s.%s", dstDialect.URI().Schema, dstTable.Name) - quotedDstTableName = fmt.Sprintf("%s.%s", quoter(dstDialect.URI().Schema), quoter(dstTable.Name)) - } - originalTableName := table.Name - if engine.dialect.URI().Schema != "" { - originalTableName = fmt.Sprintf("%s.%s", engine.dialect.URI().Schema, table.Name) - } - if i > 0 { - _, err = io.WriteString(w, "\n") - if err != nil { - return err - } - } - - if dstTable.AutoIncrement != "" && dstDialect.Features().AutoincrMode == dialects.SequenceAutoincrMode { - sqlstr, err := dstDialect.CreateSequenceSQL(ctx, engine.db, utils.SeqName(dstTableName)) - if err != nil { - return err - } - _, err = io.WriteString(w, sqlstr+";\n") - if err != nil { - return err - } - } - - sqlstr, _, err := dstDialect.CreateTableSQL(ctx, engine.db, dstTable, dstTableName) - if err != nil { - return err - } - _, err = io.WriteString(w, sqlstr+";\n") - if err != nil { - return err - } - - if len(dstTable.PKColumns()) > 0 && dstDialect.URI().DBType == schemas.MSSQL { - fmt.Fprintf(w, "SET IDENTITY_INSERT [%s] ON;\n", dstTable.Name) - } - - for _, index := range dstTable.Indexes { - _, err = io.WriteString(w, dstDialect.CreateIndexSQL(dstTable.Name, index)+";\n") - if err != nil { - return err - } - } - - cols := table.ColumnsSeq() - dstCols := dstTable.ColumnsSeq() - - colNames := engine.dialect.Quoter().Join(cols, ", ") - destColNames := dstDialect.Quoter().Join(dstCols, ", ") - - rows, err := engine.DB().QueryContext(engine.defaultContext, "SELECT "+colNames+" FROM "+engine.Quote(originalTableName)) - if err != nil { - return err - } - defer rows.Close() - - types, err := rows.ColumnTypes() - if err != nil { - return err - } - - fields, err := rows.Columns() - if err != nil { - return err - } - - sess := engine.NewSession() - defer sess.Close() - for rows.Next() { - _, err = io.WriteString(w, "INSERT INTO "+quotedDstTableName+" ("+destColNames+") VALUES (") - if err != nil { - return err - } - - scanResults, err := sess.engine.scanStringInterface(rows, fields, types) - if err != nil { - return err - } - for i, scanResult := range scanResults { - stp := schemas.SQLType{Name: types[i].DatabaseTypeName()} - s := scanResult.(*sql.NullString) - if !s.Valid { - if _, err = io.WriteString(w, "NULL"); err != nil { - return err - } - } else { - if table.Columns()[i].SQLType.IsBool() || stp.IsBool() || (dstDialect.URI().DBType == schemas.MSSQL && strings.EqualFold(stp.Name, schemas.Bit)) { - val, err := strconv.ParseBool(s.String) - if err != nil { - return err - } - - if _, err = io.WriteString(w, formatBool(val, dstDialect)); err != nil { - return err - } - } else if stp.IsNumeric() { - if _, err = io.WriteString(w, s.String); err != nil { - return err - } - } else if sess.engine.dialect.URI().DBType == schemas.DAMENG && stp.IsTime() && len(s.String) == 25 { - r := strings.ReplaceAll(s.String[:19], "T", " ") - if _, err = io.WriteString(w, "'"+r+"'"); err != nil { - return err - } - } else if len(s.String) == 0 { - if _, err := io.WriteString(w, "''"); err != nil { - return err - } - } else if dstDialect.URI().DBType == schemas.POSTGRES { - if dstTable.Columns()[i].SQLType.IsBlob() { - // Postgres has the escape format and we should use that for bytea data - if _, err := fmt.Fprintf(w, "'\\x%x'", s.String); err != nil { - return err - } - } else { - // Postgres concatentates strings using || (NOTE: a NUL byte in a text segment will fail) - toCheck := strings.ReplaceAll(s.String, "'", "''") - for len(toCheck) > 0 { - loc := controlCharactersRe.FindStringIndex(toCheck) - if loc == nil { - if _, err := io.WriteString(w, "'"+toCheck+"'"); err != nil { - return err - } - break - } - if loc[0] > 0 { - if _, err := io.WriteString(w, "'"+toCheck[:loc[0]]+"' || "); err != nil { - return err - } - } - if _, err := io.WriteString(w, "e'"); err != nil { - return err - } - for i := loc[0]; i < loc[1]; i++ { - if _, err := fmt.Fprintf(w, "\\x%02x", toCheck[i]); err != nil { - return err - } - } - toCheck = toCheck[loc[1]:] - if len(toCheck) > 0 { - if _, err := io.WriteString(w, "' || "); err != nil { - return err - } - } else { - if _, err := io.WriteString(w, "'"); err != nil { - return err - } - } - } - } - } else if dstDialect.URI().DBType == schemas.MYSQL { - loc := controlCharactersRe.FindStringIndex(s.String) - if loc == nil { - if _, err := io.WriteString(w, "'"+strings.ReplaceAll(s.String, "'", "''")+"'"); err != nil { - return err - } - } else { - if _, err := io.WriteString(w, "CONCAT("); err != nil { - return err - } - toCheck := strings.ReplaceAll(s.String, "'", "''") - for len(toCheck) > 0 { - loc := controlCharactersRe.FindStringIndex(toCheck) - if loc == nil { - if _, err := io.WriteString(w, "'"+toCheck+"')"); err != nil { - return err - } - break - } - if loc[0] > 0 { - if _, err := io.WriteString(w, "'"+toCheck[:loc[0]]+"', "); err != nil { - return err - } - } - for i := loc[0]; i < loc[1]-1; i++ { - if _, err := io.WriteString(w, "CHAR("+strconv.Itoa(int(toCheck[i]))+"), "); err != nil { - return err - } - } - char := toCheck[loc[1]-1] - toCheck = toCheck[loc[1]:] - if len(toCheck) > 0 { - if _, err := io.WriteString(w, "CHAR("+strconv.Itoa(int(char))+"), "); err != nil { - return err - } - } else { - if _, err = io.WriteString(w, "CHAR("+strconv.Itoa(int(char))+"))"); err != nil { - return err - } - } - } - } - } else if dstDialect.URI().DBType == schemas.SQLITE { - if dstTable.Columns()[i].SQLType.IsBlob() { - // SQLite has its escape format - if _, err := fmt.Fprintf(w, "X'%x'", s.String); err != nil { - return err - } - } else { - // SQLite concatentates strings using || (NOTE: a NUL byte in a text segment will fail) - toCheck := strings.ReplaceAll(s.String, "'", "''") - for len(toCheck) > 0 { - loc := controlCharactersRe.FindStringIndex(toCheck) - if loc == nil { - if _, err := io.WriteString(w, "'"+toCheck+"'"); err != nil { - return err - } - break - } - if loc[0] > 0 { - if _, err := io.WriteString(w, "'"+toCheck[:loc[0]]+"' || "); err != nil { - return err - } - } - if _, err := fmt.Fprintf(w, "X'%x'", toCheck[loc[0]:loc[1]]); err != nil { - return err - } - toCheck = toCheck[loc[1]:] - if len(toCheck) > 0 { - if _, err := io.WriteString(w, " || "); err != nil { - return err - } - } - } - } - } else if dstDialect.URI().DBType == schemas.DAMENG || dstDialect.URI().DBType == schemas.ORACLE { - if dstTable.Columns()[i].SQLType.IsBlob() { - // ORACLE/DAMENG uses HEXTORAW - if _, err := fmt.Fprintf(w, "HEXTORAW('%x')", s.String); err != nil { - return err - } - } else { - // ORACLE/DAMENG concatentates strings in multiple ways but uses CHAR and has CONCAT - // (NOTE: a NUL byte in a text segment will fail) - if _, err := io.WriteString(w, "CONCAT("); err != nil { - return err - } - toCheck := strings.ReplaceAll(s.String, "'", "''") - for len(toCheck) > 0 { - loc := controlCharactersRe.FindStringIndex(toCheck) - if loc == nil { - if _, err := io.WriteString(w, "'"+toCheck+"')"); err != nil { - return err - } - break - } - if loc[0] > 0 { - if _, err := io.WriteString(w, "'"+toCheck[:loc[0]]+"', "); err != nil { - return err - } - } - for i := loc[0]; i < loc[1]-1; i++ { - if _, err := io.WriteString(w, "CHAR("+strconv.Itoa(int(toCheck[i]))+"), "); err != nil { - return err - } - } - char := toCheck[loc[1]-1] - toCheck = toCheck[loc[1]:] - if len(toCheck) > 0 { - if _, err := io.WriteString(w, "CHAR("+strconv.Itoa(int(char))+"), "); err != nil { - return err - } - } else { - if _, err = io.WriteString(w, "CHAR("+strconv.Itoa(int(char))+"))"); err != nil { - return err - } - } - } - } - } else if dstDialect.URI().DBType == schemas.MSSQL { - if dstTable.Columns()[i].SQLType.IsBlob() { - // MSSQL uses CONVERT(VARBINARY(MAX), '0xDEADBEEF', 1) - if _, err := fmt.Fprintf(w, "CONVERT(VARBINARY(MAX), '0x%x', 1)", s.String); err != nil { - return err - } - } else { - if _, err = io.WriteString(w, "N'"+strings.ReplaceAll(s.String, "'", "''")+"'"); err != nil { - return err - } - } - } else { - if _, err = io.WriteString(w, "'"+strings.ReplaceAll(s.String, "'", "''")+"'"); err != nil { - return err - } - } - } - if i < len(scanResults)-1 { - if _, err = io.WriteString(w, ","); err != nil { - return err - } - } - } - _, err = io.WriteString(w, ");\n") - if err != nil { - return err - } - } - if rows.Err() != nil { - return rows.Err() - } - - // FIXME: Hack for postgres - if dstDialect.URI().DBType == schemas.POSTGRES && table.AutoIncrColumn() != nil { - _, err = io.WriteString(w, "SELECT setval('"+dstTableName+"_id_seq', COALESCE((SELECT MAX("+table.AutoIncrColumn().Name+") + 1 FROM "+dstDialect.Quoter().Quote(dstTableName)+"), 1), false);\n") - if err != nil { - return err - } - } - } - return nil -} - -// Cascade use cascade or not -func (engine *Engine) Cascade(trueOrFalse ...bool) *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.Cascade(trueOrFalse...) -} - -// Where method provide a condition query -func (engine *Engine) Where(query interface{}, args ...interface{}) *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.Where(query, args...) -} - -// ID method provoide a condition as (id) = ? -func (engine *Engine) ID(id interface{}) *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.ID(id) -} - -// Before apply before Processor, affected bean is passed to closure arg -func (engine *Engine) Before(closures func(interface{})) *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.Before(closures) -} - -// After apply after insert Processor, affected bean is passed to closure arg -func (engine *Engine) After(closures func(interface{})) *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.After(closures) -} - -// Charset set charset when create table, only support mysql now -func (engine *Engine) Charset(charset string) *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.Charset(charset) -} - -// StoreEngine set store engine when create table, only support mysql now -func (engine *Engine) StoreEngine(storeEngine string) *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.StoreEngine(storeEngine) -} - -// Distinct use for distinct columns. Caution: when you are using cache, -// distinct will not be cached because cache system need id, -// but distinct will not provide id -func (engine *Engine) Distinct(columns ...string) *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.Distinct(columns...) -} - -// Select customerize your select columns or contents -func (engine *Engine) Select(str string) *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.Select(str) -} - -// Cols only use the parameters as select or update columns -func (engine *Engine) Cols(columns ...string) *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.Cols(columns...) -} - -// AllCols indicates that all columns should be use -func (engine *Engine) AllCols() *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.AllCols() -} - -// MustCols specify some columns must use even if they are empty -func (engine *Engine) MustCols(columns ...string) *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.MustCols(columns...) -} - -// UseBool xorm automatically retrieve condition according struct, but -// if struct has bool field, it will ignore them. So use UseBool -// to tell system to do not ignore them. -// If no parameters, it will use all the bool field of struct, or -// it will use parameters's columns -func (engine *Engine) UseBool(columns ...string) *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.UseBool(columns...) -} - -// Omit only not use the parameters as select or update columns -func (engine *Engine) Omit(columns ...string) *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.Omit(columns...) -} - -// Nullable set null when column is zero-value and nullable for update -func (engine *Engine) Nullable(columns ...string) *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.Nullable(columns...) -} - -// In will generate "column IN (?, ?)" -func (engine *Engine) In(column string, args ...interface{}) *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.In(column, args...) -} - -// NotIn will generate "column NOT IN (?, ?)" -func (engine *Engine) NotIn(column string, args ...interface{}) *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.NotIn(column, args...) -} - -// Incr provides a update string like "column = column + ?" -func (engine *Engine) Incr(column string, arg ...interface{}) *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.Incr(column, arg...) -} - -// Decr provides a update string like "column = column - ?" -func (engine *Engine) Decr(column string, arg ...interface{}) *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.Decr(column, arg...) -} - -// SetExpr provides a update string like "column = {expression}" -func (engine *Engine) SetExpr(column string, expression interface{}) *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.SetExpr(column, expression) -} - -// Table temporarily change the Get, Find, Update's table -func (engine *Engine) Table(tableNameOrBean interface{}) *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.Table(tableNameOrBean) -} - -// Alias set the table alias -func (engine *Engine) Alias(alias string) *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.Alias(alias) -} - -// Limit will generate "LIMIT start, limit" -func (engine *Engine) Limit(limit int, start ...int) *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.Limit(limit, start...) -} - -// Desc will generate "ORDER BY column1 DESC, column2 DESC" -func (engine *Engine) Desc(colNames ...string) *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.Desc(colNames...) -} - -// Asc will generate "ORDER BY column1,column2 Asc" -// This method can chainable use. -// -// engine.Desc("name").Asc("age").Find(&users) -// // SELECT * FROM user ORDER BY name DESC, age ASC -// -func (engine *Engine) Asc(colNames ...string) *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.Asc(colNames...) -} - -// OrderBy will generate "ORDER BY order" -func (engine *Engine) OrderBy(order interface{}, args ...interface{}) *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.OrderBy(order, args...) -} - -// Prepare enables prepare statement -func (engine *Engine) Prepare() *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.Prepare() -} - -// Join the join_operator should be one of INNER, LEFT OUTER, CROSS etc - this will be prepended to JOIN -func (engine *Engine) Join(joinOperator string, tablename interface{}, condition string, args ...interface{}) *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.Join(joinOperator, tablename, condition, args...) -} - -// GroupBy generate group by statement -func (engine *Engine) GroupBy(keys string) *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.GroupBy(keys) -} - -// Having generate having statement -func (engine *Engine) Having(conditions string) *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.Having(conditions) -} - -// DBVersion returns the database version -func (engine *Engine) DBVersion() (*schemas.Version, error) { - return engine.dialect.Version(engine.defaultContext, engine.db) -} - -// TableInfo get table info according to bean's content -func (engine *Engine) TableInfo(bean interface{}) (*schemas.Table, error) { - v := utils.ReflectValue(bean) - return engine.tagParser.ParseWithCache(v) -} - -// IsTableEmpty if a table has any reocrd -func (engine *Engine) IsTableEmpty(bean interface{}) (bool, error) { - session := engine.NewSession() - defer session.Close() - return session.IsTableEmpty(bean) -} - -// IsTableExist if a table is exist -func (engine *Engine) IsTableExist(beanOrTableName interface{}) (bool, error) { - session := engine.NewSession() - defer session.Close() - return session.IsTableExist(beanOrTableName) -} - -// TableName returns table name with schema prefix if has -func (engine *Engine) TableName(bean interface{}, includeSchema ...bool) string { - return dialects.FullTableName(engine.dialect, engine.GetTableMapper(), bean, includeSchema...) -} - -// CreateIndexes create indexes -func (engine *Engine) CreateIndexes(bean interface{}) error { - session := engine.NewSession() - defer session.Close() - return session.CreateIndexes(bean) -} - -// CreateUniques create uniques -func (engine *Engine) CreateUniques(bean interface{}) error { - session := engine.NewSession() - defer session.Close() - return session.CreateUniques(bean) -} - -// ClearCacheBean if enabled cache, clear the cache bean -func (engine *Engine) ClearCacheBean(bean interface{}, id string) error { - tableName := dialects.FullTableName(engine.dialect, engine.GetTableMapper(), bean) - cacher := engine.GetCacher(tableName) - if cacher != nil { - cacher.ClearIds(tableName) - cacher.DelBean(tableName, id) - } - return nil -} - -// ClearCache if enabled cache, clear some tables' cache -func (engine *Engine) ClearCache(beans ...interface{}) error { - for _, bean := range beans { - tableName := dialects.FullTableName(engine.dialect, engine.GetTableMapper(), bean) - cacher := engine.GetCacher(tableName) - if cacher != nil { - cacher.ClearIds(tableName) - cacher.ClearBeans(tableName) - } - } - return nil -} - -// UnMapType remove table from tables cache -func (engine *Engine) UnMapType(t reflect.Type) { - engine.tagParser.ClearCacheTable(t) -} - -// Sync the new struct changes to database, this method will automatically add -// table, column, index, unique. but will not delete or change anything. -// If you change some field, you should change the database manually. -func (engine *Engine) Sync(beans ...interface{}) error { - session := engine.NewSession() - defer session.Close() - return session.Sync(beans...) -} - -// Sync2 synchronize structs to database tables -// Depricated -func (engine *Engine) Sync2(beans ...interface{}) error { - return engine.Sync(beans...) -} - -// CreateTables create tabls according bean -func (engine *Engine) CreateTables(beans ...interface{}) error { - session := engine.NewSession() - defer session.Close() - - err := session.Begin() - if err != nil { - return err - } - - for _, bean := range beans { - err = session.createTable(bean) - if err != nil { - _ = session.Rollback() - return err - } - } - return session.Commit() -} - -// DropTables drop specify tables -func (engine *Engine) DropTables(beans ...interface{}) error { - session := engine.NewSession() - defer session.Close() - - err := session.Begin() - if err != nil { - return err - } - - for _, bean := range beans { - err = session.dropTable(bean) - if err != nil { - _ = session.Rollback() - return err - } - } - return session.Commit() -} - -// DropIndexes drop indexes of a table -func (engine *Engine) DropIndexes(bean interface{}) error { - session := engine.NewSession() - defer session.Close() - return session.DropIndexes(bean) -} - -// Exec raw sql -func (engine *Engine) Exec(sqlOrArgs ...interface{}) (sql.Result, error) { - session := engine.NewSession() - defer session.Close() - return session.Exec(sqlOrArgs...) -} - -// Query a raw sql and return records as []map[string][]byte -func (engine *Engine) Query(sqlOrArgs ...interface{}) (resultsSlice []map[string][]byte, err error) { - session := engine.NewSession() - defer session.Close() - return session.Query(sqlOrArgs...) -} - -// QueryString runs a raw sql and return records as []map[string]string -func (engine *Engine) QueryString(sqlOrArgs ...interface{}) ([]map[string]string, error) { - session := engine.NewSession() - defer session.Close() - return session.QueryString(sqlOrArgs...) -} - -// QueryInterface runs a raw sql and return records as []map[string]interface{} -func (engine *Engine) QueryInterface(sqlOrArgs ...interface{}) ([]map[string]interface{}, error) { - session := engine.NewSession() - defer session.Close() - return session.QueryInterface(sqlOrArgs...) -} - -// Insert one or more records -func (engine *Engine) Insert(beans ...interface{}) (int64, error) { - session := engine.NewSession() - defer session.Close() - return session.Insert(beans...) -} - -// InsertOne insert only one record -func (engine *Engine) InsertOne(bean interface{}) (int64, error) { - session := engine.NewSession() - defer session.Close() - return session.InsertOne(bean) -} - -// Update records, bean's non-empty fields are updated contents, -// condiBean' non-empty filds are conditions -// CAUTION: -// 1.bool will defaultly be updated content nor conditions -// You should call UseBool if you have bool to use. -// 2.float32 & float64 may be not inexact as conditions -func (engine *Engine) Update(bean interface{}, condiBeans ...interface{}) (int64, error) { - session := engine.NewSession() - defer session.Close() - return session.Update(bean, condiBeans...) -} - -// Delete records, bean's non-empty fields are conditions -func (engine *Engine) Delete(beans ...interface{}) (int64, error) { - session := engine.NewSession() - defer session.Close() - return session.Delete(beans...) -} - -// Get retrieve one record from table, bean's non-empty fields -// are conditions -func (engine *Engine) Get(beans ...interface{}) (bool, error) { - session := engine.NewSession() - defer session.Close() - return session.Get(beans...) -} - -// Exist returns true if the record exist otherwise return false -func (engine *Engine) Exist(bean ...interface{}) (bool, error) { - session := engine.NewSession() - defer session.Close() - return session.Exist(bean...) -} - -// Find retrieve records from table, condiBeans's non-empty fields -// are conditions. beans could be []Struct, []*Struct, map[int64]Struct -// map[int64]*Struct -func (engine *Engine) Find(beans interface{}, condiBeans ...interface{}) error { - session := engine.NewSession() - defer session.Close() - return session.Find(beans, condiBeans...) -} - -// FindAndCount find the results and also return the counts -func (engine *Engine) FindAndCount(rowsSlicePtr interface{}, condiBean ...interface{}) (int64, error) { - session := engine.NewSession() - defer session.Close() - return session.FindAndCount(rowsSlicePtr, condiBean...) -} - -// Iterate record by record handle records from table, bean's non-empty fields -// are conditions. -func (engine *Engine) Iterate(bean interface{}, fun IterFunc) error { - session := engine.NewSession() - defer session.Close() - return session.Iterate(bean, fun) -} - -// Rows return sql.Rows compatible Rows obj, as a forward Iterator object for iterating record by record, bean's non-empty fields -// are conditions. -func (engine *Engine) Rows(bean interface{}) (*Rows, error) { - session := engine.NewSession() - return session.Rows(bean) -} - -// Count counts the records. bean's non-empty fields are conditions. -func (engine *Engine) Count(bean ...interface{}) (int64, error) { - session := engine.NewSession() - defer session.Close() - return session.Count(bean...) -} - -// Sum sum the records by some column. bean's non-empty fields are conditions. -func (engine *Engine) Sum(bean interface{}, colName string) (float64, error) { - session := engine.NewSession() - defer session.Close() - return session.Sum(bean, colName) -} - -// SumInt sum the records by some column. bean's non-empty fields are conditions. -func (engine *Engine) SumInt(bean interface{}, colName string) (int64, error) { - session := engine.NewSession() - defer session.Close() - return session.SumInt(bean, colName) -} - -// Sums sum the records by some columns. bean's non-empty fields are conditions. -func (engine *Engine) Sums(bean interface{}, colNames ...string) ([]float64, error) { - session := engine.NewSession() - defer session.Close() - return session.Sums(bean, colNames...) -} - -// SumsInt like Sums but return slice of int64 instead of float64. -func (engine *Engine) SumsInt(bean interface{}, colNames ...string) ([]int64, error) { - session := engine.NewSession() - defer session.Close() - return session.SumsInt(bean, colNames...) -} - -// ImportFile SQL DDL file -func (engine *Engine) ImportFile(ddlPath string) ([]sql.Result, error) { - session := engine.NewSession() - defer session.Close() - return session.ImportFile(ddlPath) -} - -// Import SQL DDL from io.Reader -func (engine *Engine) Import(r io.Reader) ([]sql.Result, error) { - session := engine.NewSession() - defer session.Close() - return session.Import(r) -} - -// nowTime return current time -func (engine *Engine) nowTime(col *schemas.Column) (interface{}, time.Time, error) { - t := time.Now() - result, err := dialects.FormatColumnTime(engine.dialect, engine.DatabaseTZ, col, t) - if err != nil { - return nil, time.Time{}, err - } - return result, t.In(engine.TZLocation), nil -} - -// GetColumnMapper returns the column name mapper -func (engine *Engine) GetColumnMapper() names.Mapper { - return engine.tagParser.GetColumnMapper() -} - -// GetTableMapper returns the table name mapper -func (engine *Engine) GetTableMapper() names.Mapper { - return engine.tagParser.GetTableMapper() -} - -// GetTZLocation returns time zone of the application -func (engine *Engine) GetTZLocation() *time.Location { - return engine.TZLocation -} - -// SetTZLocation sets time zone of the application -func (engine *Engine) SetTZLocation(tz *time.Location) { - engine.TZLocation = tz -} - -// GetTZDatabase returns time zone of the database -func (engine *Engine) GetTZDatabase() *time.Location { - return engine.DatabaseTZ -} - -// SetTZDatabase sets time zone of the database -func (engine *Engine) SetTZDatabase(tz *time.Location) { - engine.DatabaseTZ = tz -} - -// SetSchema sets the schema of database -func (engine *Engine) SetSchema(schema string) { - engine.dialect.URI().SetSchema(schema) -} - -// AddHook adds a context Hook -func (engine *Engine) AddHook(hook contexts.Hook) { - engine.db.AddHook(hook) -} - -// Unscoped always disable struct tag "deleted" -func (engine *Engine) Unscoped() *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.Unscoped() -} - -func (engine *Engine) tbNameWithSchema(v string) string { - return dialects.TableNameWithSchema(engine.dialect, v) -} - -// Context creates a session with the context -func (engine *Engine) Context(ctx context.Context) *Session { - session := engine.NewSession() - session.isAutoClose = true - return session.Context(ctx) -} - -// SetDefaultContext set the default context -func (engine *Engine) SetDefaultContext(ctx context.Context) { - engine.defaultContext = ctx -} - -// PingContext tests if database is alive -func (engine *Engine) PingContext(ctx context.Context) error { - session := engine.NewSession() - defer session.Close() - return session.PingContext(ctx) -} - -// Transaction Execute sql wrapped in a transaction(abbr as tx), tx will automatic commit if no errors occurred -func (engine *Engine) Transaction(f func(*Session) (interface{}, error)) (interface{}, error) { - session := engine.NewSession() - defer session.Close() - - if err := session.Begin(); err != nil { - return nil, err - } - - result, err := f(session) - if err != nil { - return result, err - } - - if err := session.Commit(); err != nil { - return result, err - } - - return result, nil -} diff --git a/vendor/xorm.io/xorm/engine_group.go b/vendor/xorm.io/xorm/engine_group.go deleted file mode 100644 index f2fe913d..00000000 --- a/vendor/xorm.io/xorm/engine_group.go +++ /dev/null @@ -1,267 +0,0 @@ -// Copyright 2017 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xorm - -import ( - "context" - "time" - - "xorm.io/xorm/caches" - "xorm.io/xorm/contexts" - "xorm.io/xorm/dialects" - "xorm.io/xorm/log" - "xorm.io/xorm/names" -) - -// EngineGroup defines an engine group -type EngineGroup struct { - *Engine - slaves []*Engine - policy GroupPolicy -} - -// NewEngineGroup creates a new engine group -func NewEngineGroup(args1 interface{}, args2 interface{}, policies ...GroupPolicy) (*EngineGroup, error) { - var eg EngineGroup - if len(policies) > 0 { - eg.policy = policies[0] - } else { - eg.policy = RoundRobinPolicy() - } - - driverName, ok1 := args1.(string) - conns, ok2 := args2.([]string) - if ok1 && ok2 { - engines := make([]*Engine, len(conns)) - for i, conn := range conns { - engine, err := NewEngine(driverName, conn) - if err != nil { - return nil, err - } - engine.engineGroup = &eg - engines[i] = engine - } - - eg.Engine = engines[0] - eg.slaves = engines[1:] - return &eg, nil - } - - master, ok3 := args1.(*Engine) - slaves, ok4 := args2.([]*Engine) - if ok3 && ok4 { - master.engineGroup = &eg - for i := 0; i < len(slaves); i++ { - slaves[i].engineGroup = &eg - } - eg.Engine = master - eg.slaves = slaves - return &eg, nil - } - return nil, ErrParamsType -} - -// Close the engine -func (eg *EngineGroup) Close() error { - err := eg.Engine.Close() - if err != nil { - return err - } - - for i := 0; i < len(eg.slaves); i++ { - err := eg.slaves[i].Close() - if err != nil { - return err - } - } - return nil -} - -// Context returned a group session -func (eg *EngineGroup) Context(ctx context.Context) *Session { - sess := eg.NewSession() - sess.isAutoClose = true - return sess.Context(ctx) -} - -// NewSession returned a group session -func (eg *EngineGroup) NewSession() *Session { - sess := eg.Engine.NewSession() - sess.sessionType = groupSession - return sess -} - -// Master returns the master engine -func (eg *EngineGroup) Master() *Engine { - return eg.Engine -} - -// Ping tests if database is alive -func (eg *EngineGroup) Ping() error { - if err := eg.Engine.Ping(); err != nil { - return err - } - - for _, slave := range eg.slaves { - if err := slave.Ping(); err != nil { - return err - } - } - return nil -} - -// SetColumnMapper set the column name mapping rule -func (eg *EngineGroup) SetColumnMapper(mapper names.Mapper) { - eg.Engine.SetColumnMapper(mapper) - for i := 0; i < len(eg.slaves); i++ { - eg.slaves[i].SetColumnMapper(mapper) - } -} - -// SetConnMaxLifetime sets the maximum amount of time a connection may be reused. -func (eg *EngineGroup) SetConnMaxLifetime(d time.Duration) { - eg.Engine.SetConnMaxLifetime(d) - for i := 0; i < len(eg.slaves); i++ { - eg.slaves[i].SetConnMaxLifetime(d) - } -} - -// SetDefaultCacher set the default cacher -func (eg *EngineGroup) SetDefaultCacher(cacher caches.Cacher) { - eg.Engine.SetDefaultCacher(cacher) - for i := 0; i < len(eg.slaves); i++ { - eg.slaves[i].SetDefaultCacher(cacher) - } -} - -// SetLogger set the new logger -func (eg *EngineGroup) SetLogger(logger interface{}) { - eg.Engine.SetLogger(logger) - for i := 0; i < len(eg.slaves); i++ { - eg.slaves[i].SetLogger(logger) - } -} - -// AddHook adds Hook -func (eg *EngineGroup) AddHook(hook contexts.Hook) { - eg.Engine.AddHook(hook) - for i := 0; i < len(eg.slaves); i++ { - eg.slaves[i].AddHook(hook) - } -} - -// SetLogLevel sets the logger level -func (eg *EngineGroup) SetLogLevel(level log.LogLevel) { - eg.Engine.SetLogLevel(level) - for i := 0; i < len(eg.slaves); i++ { - eg.slaves[i].SetLogLevel(level) - } -} - -// SetMapper set the name mapping rules -func (eg *EngineGroup) SetMapper(mapper names.Mapper) { - eg.Engine.SetMapper(mapper) - for i := 0; i < len(eg.slaves); i++ { - eg.slaves[i].SetMapper(mapper) - } -} - -// SetTagIdentifier set the tag identifier -func (eg *EngineGroup) SetTagIdentifier(tagIdentifier string) { - eg.Engine.SetTagIdentifier(tagIdentifier) - for i := 0; i < len(eg.slaves); i++ { - eg.slaves[i].SetTagIdentifier(tagIdentifier) - } -} - -// SetMaxIdleConns set the max idle connections on pool, default is 2 -func (eg *EngineGroup) SetMaxIdleConns(conns int) { - eg.Engine.DB().SetMaxIdleConns(conns) - for i := 0; i < len(eg.slaves); i++ { - eg.slaves[i].DB().SetMaxIdleConns(conns) - } -} - -// SetMaxOpenConns is only available for go 1.2+ -func (eg *EngineGroup) SetMaxOpenConns(conns int) { - eg.Engine.DB().SetMaxOpenConns(conns) - for i := 0; i < len(eg.slaves); i++ { - eg.slaves[i].DB().SetMaxOpenConns(conns) - } -} - -// SetPolicy set the group policy -func (eg *EngineGroup) SetPolicy(policy GroupPolicy) *EngineGroup { - eg.policy = policy - return eg -} - -// SetQuotePolicy sets the special quote policy -func (eg *EngineGroup) SetQuotePolicy(quotePolicy dialects.QuotePolicy) { - eg.Engine.SetQuotePolicy(quotePolicy) - for i := 0; i < len(eg.slaves); i++ { - eg.slaves[i].SetQuotePolicy(quotePolicy) - } -} - -// SetTableMapper set the table name mapping rule -func (eg *EngineGroup) SetTableMapper(mapper names.Mapper) { - eg.Engine.SetTableMapper(mapper) - for i := 0; i < len(eg.slaves); i++ { - eg.slaves[i].SetTableMapper(mapper) - } -} - -// ShowSQL show SQL statement or not on logger if log level is great than INFO -func (eg *EngineGroup) ShowSQL(show ...bool) { - eg.Engine.ShowSQL(show...) - for i := 0; i < len(eg.slaves); i++ { - eg.slaves[i].ShowSQL(show...) - } -} - -// Slave returns one of the physical databases which is a slave according the policy -func (eg *EngineGroup) Slave() *Engine { - switch len(eg.slaves) { - case 0: - return eg.Engine - case 1: - return eg.slaves[0] - } - return eg.policy.Slave(eg) -} - -// Slaves returns all the slaves -func (eg *EngineGroup) Slaves() []*Engine { - return eg.slaves -} - -// Query execcute a select SQL and return the result -func (eg *EngineGroup) Query(sqlOrArgs ...interface{}) (resultsSlice []map[string][]byte, err error) { - sess := eg.NewSession() - sess.isAutoClose = true - return sess.Query(sqlOrArgs...) -} - -// QueryInterface execcute a select SQL and return the result -func (eg *EngineGroup) QueryInterface(sqlOrArgs ...interface{}) ([]map[string]interface{}, error) { - sess := eg.NewSession() - sess.isAutoClose = true - return sess.QueryInterface(sqlOrArgs...) -} - -// QueryString execcute a select SQL and return the result -func (eg *EngineGroup) QueryString(sqlOrArgs ...interface{}) ([]map[string]string, error) { - sess := eg.NewSession() - sess.isAutoClose = true - return sess.QueryString(sqlOrArgs...) -} - -// Rows execcute a select SQL and return the result -func (eg *EngineGroup) Rows(bean interface{}) (*Rows, error) { - sess := eg.NewSession() - sess.isAutoClose = true - return sess.Rows(bean) -} diff --git a/vendor/xorm.io/xorm/engine_group_policy.go b/vendor/xorm.io/xorm/engine_group_policy.go deleted file mode 100644 index 1def8ce4..00000000 --- a/vendor/xorm.io/xorm/engine_group_policy.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2017 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xorm - -import ( - "math/rand" - "sync" - "time" -) - -// GroupPolicy is be used by chosing the current slave from slaves -type GroupPolicy interface { - Slave(*EngineGroup) *Engine -} - -// GroupPolicyHandler should be used when a function is a GroupPolicy -type GroupPolicyHandler func(*EngineGroup) *Engine - -// Slave implements the chosen of slaves -func (h GroupPolicyHandler) Slave(eg *EngineGroup) *Engine { - return h(eg) -} - -// RandomPolicy implmentes randomly chose the slave of slaves -func RandomPolicy() GroupPolicyHandler { - var r = rand.New(rand.NewSource(time.Now().UnixNano())) - return func(g *EngineGroup) *Engine { - return g.Slaves()[r.Intn(len(g.Slaves()))] - } -} - -// WeightRandomPolicy implmentes randomly chose the slave of slaves -func WeightRandomPolicy(weights []int) GroupPolicyHandler { - var rands = make([]int, 0, len(weights)) - for i := 0; i < len(weights); i++ { - for n := 0; n < weights[i]; n++ { - rands = append(rands, i) - } - } - var r = rand.New(rand.NewSource(time.Now().UnixNano())) - - return func(g *EngineGroup) *Engine { - var slaves = g.Slaves() - idx := rands[r.Intn(len(rands))] - if idx >= len(slaves) { - idx = len(slaves) - 1 - } - return slaves[idx] - } -} - -// RoundRobinPolicy returns a group policy handler -func RoundRobinPolicy() GroupPolicyHandler { - var pos = -1 - var lock sync.Mutex - return func(g *EngineGroup) *Engine { - var slaves = g.Slaves() - - lock.Lock() - defer lock.Unlock() - pos++ - if pos >= len(slaves) { - pos = 0 - } - - return slaves[pos] - } -} - -// WeightRoundRobinPolicy returns a group policy handler -func WeightRoundRobinPolicy(weights []int) GroupPolicyHandler { - var rands = make([]int, 0, len(weights)) - for i := 0; i < len(weights); i++ { - for n := 0; n < weights[i]; n++ { - rands = append(rands, i) - } - } - var pos = -1 - var lock sync.Mutex - - return func(g *EngineGroup) *Engine { - var slaves = g.Slaves() - lock.Lock() - defer lock.Unlock() - pos++ - if pos >= len(rands) { - pos = 0 - } - - idx := rands[pos] - if idx >= len(slaves) { - idx = len(slaves) - 1 - } - return slaves[idx] - } -} - -// LeastConnPolicy implements GroupPolicy, every time will get the least connections slave -func LeastConnPolicy() GroupPolicyHandler { - return func(g *EngineGroup) *Engine { - var slaves = g.Slaves() - connections := 0 - idx := 0 - for i := 0; i < len(slaves); i++ { - openConnections := slaves[i].DB().Stats().OpenConnections - if i == 0 { - connections = openConnections - idx = i - } else if openConnections <= connections { - connections = openConnections - idx = i - } - } - return slaves[idx] - } -} diff --git a/vendor/xorm.io/xorm/error.go b/vendor/xorm.io/xorm/error.go deleted file mode 100644 index cfa5c819..00000000 --- a/vendor/xorm.io/xorm/error.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2015 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xorm - -import ( - "errors" -) - -var ( - // ErrPtrSliceType represents a type error - ErrPtrSliceType = errors.New("A point to a slice is needed") - // ErrParamsType params error - ErrParamsType = errors.New("Params type error") - // ErrTableNotFound table not found error - ErrTableNotFound = errors.New("Table not found") - // ErrUnSupportedType unsupported error - ErrUnSupportedType = errors.New("Unsupported type error") - // ErrNotExist record does not exist error - ErrNotExist = errors.New("Record does not exist") - // ErrCacheFailed cache failed error - ErrCacheFailed = errors.New("Cache failed") - // ErrConditionType condition type unsupported - ErrConditionType = errors.New("Unsupported condition type") -) diff --git a/vendor/xorm.io/xorm/interface.go b/vendor/xorm.io/xorm/interface.go deleted file mode 100644 index 55ffebe4..00000000 --- a/vendor/xorm.io/xorm/interface.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright 2017 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xorm - -import ( - "context" - "database/sql" - "reflect" - "time" - - "xorm.io/xorm/caches" - "xorm.io/xorm/contexts" - "xorm.io/xorm/dialects" - "xorm.io/xorm/log" - "xorm.io/xorm/names" - "xorm.io/xorm/schemas" -) - -// Interface defines the interface which Engine, EngineGroup and Session will implementate. -type Interface interface { - AllCols() *Session - Alias(alias string) *Session - Asc(colNames ...string) *Session - BufferSize(size int) *Session - Cols(columns ...string) *Session - Count(...interface{}) (int64, error) - CreateIndexes(bean interface{}) error - CreateUniques(bean interface{}) error - Decr(column string, arg ...interface{}) *Session - Desc(...string) *Session - Delete(...interface{}) (int64, error) - Distinct(columns ...string) *Session - DropIndexes(bean interface{}) error - Exec(sqlOrArgs ...interface{}) (sql.Result, error) - Exist(bean ...interface{}) (bool, error) - Find(interface{}, ...interface{}) error - FindAndCount(interface{}, ...interface{}) (int64, error) - Get(...interface{}) (bool, error) - GroupBy(keys string) *Session - ID(interface{}) *Session - In(string, ...interface{}) *Session - Incr(column string, arg ...interface{}) *Session - Insert(...interface{}) (int64, error) - InsertOne(interface{}) (int64, error) - IsTableEmpty(bean interface{}) (bool, error) - IsTableExist(beanOrTableName interface{}) (bool, error) - Iterate(interface{}, IterFunc) error - Limit(int, ...int) *Session - MustCols(columns ...string) *Session - NoAutoCondition(...bool) *Session - NotIn(string, ...interface{}) *Session - Nullable(...string) *Session - Join(joinOperator string, tablename interface{}, condition string, args ...interface{}) *Session - Omit(columns ...string) *Session - OrderBy(order interface{}, args ...interface{}) *Session - Ping() error - Query(sqlOrArgs ...interface{}) (resultsSlice []map[string][]byte, err error) - QueryInterface(sqlOrArgs ...interface{}) ([]map[string]interface{}, error) - QueryString(sqlOrArgs ...interface{}) ([]map[string]string, error) - Rows(bean interface{}) (*Rows, error) - SetExpr(string, interface{}) *Session - Select(string) *Session - SQL(interface{}, ...interface{}) *Session - Sum(bean interface{}, colName string) (float64, error) - SumInt(bean interface{}, colName string) (int64, error) - Sums(bean interface{}, colNames ...string) ([]float64, error) - SumsInt(bean interface{}, colNames ...string) ([]int64, error) - Table(tableNameOrBean interface{}) *Session - Unscoped() *Session - Update(bean interface{}, condiBeans ...interface{}) (int64, error) - UseBool(...string) *Session - Where(interface{}, ...interface{}) *Session -} - -// EngineInterface defines the interface which Engine, EngineGroup will implementate. -type EngineInterface interface { - Interface - - Before(func(interface{})) *Session - Charset(charset string) *Session - ClearCache(...interface{}) error - Context(context.Context) *Session - CreateTables(...interface{}) error - DBMetas() ([]*schemas.Table, error) - DBVersion() (*schemas.Version, error) - Dialect() dialects.Dialect - DriverName() string - DropTables(...interface{}) error - DumpAllToFile(fp string, tp ...schemas.DBType) error - GetCacher(string) caches.Cacher - GetColumnMapper() names.Mapper - GetDefaultCacher() caches.Cacher - GetTableMapper() names.Mapper - GetTZDatabase() *time.Location - GetTZLocation() *time.Location - ImportFile(fp string) ([]sql.Result, error) - MapCacher(interface{}, caches.Cacher) error - NewSession() *Session - NoAutoTime() *Session - Prepare() *Session - Quote(string) string - SetCacher(string, caches.Cacher) - SetConnMaxLifetime(time.Duration) - SetColumnMapper(names.Mapper) - SetTagIdentifier(string) - SetDefaultCacher(caches.Cacher) - SetLogger(logger interface{}) - SetLogLevel(log.LogLevel) - SetMapper(names.Mapper) - SetMaxOpenConns(int) - SetMaxIdleConns(int) - SetQuotePolicy(dialects.QuotePolicy) - SetSchema(string) - SetTableMapper(names.Mapper) - SetTZDatabase(tz *time.Location) - SetTZLocation(tz *time.Location) - AddHook(hook contexts.Hook) - ShowSQL(show ...bool) - Sync(...interface{}) error - Sync2(...interface{}) error - StoreEngine(storeEngine string) *Session - TableInfo(bean interface{}) (*schemas.Table, error) - TableName(interface{}, ...bool) string - UnMapType(reflect.Type) - EnableSessionID(bool) -} - -var ( - _ Interface = &Session{} - _ EngineInterface = &Engine{} - _ EngineInterface = &EngineGroup{} -) diff --git a/vendor/xorm.io/xorm/internal/json/gojson.go b/vendor/xorm.io/xorm/internal/json/gojson.go deleted file mode 100644 index 4f1448e7..00000000 --- a/vendor/xorm.io/xorm/internal/json/gojson.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2021 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gojson - -package json - -import ( - gojson "github.com/goccy/go-json" -) - -func init() { - DefaultJSONHandler = GOjson{} -} - -// GOjson implements JSONInterface via gojson -type GOjson struct{} - -// Marshal implements JSONInterface -func (GOjson) Marshal(v interface{}) ([]byte, error) { - return gojson.Marshal(v) -} - -// Unmarshal implements JSONInterface -func (GOjson) Unmarshal(data []byte, v interface{}) error { - return gojson.Unmarshal(data, v) -} diff --git a/vendor/xorm.io/xorm/internal/json/json.go b/vendor/xorm.io/xorm/internal/json/json.go deleted file mode 100644 index ef52f51f..00000000 --- a/vendor/xorm.io/xorm/internal/json/json.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2019 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package json - -import "encoding/json" - -// Interface represents an interface to handle json data -type Interface interface { - Marshal(v interface{}) ([]byte, error) - Unmarshal(data []byte, v interface{}) error -} - -var ( - // DefaultJSONHandler default json handler - DefaultJSONHandler Interface = StdJSON{} -) - -// StdJSON implements JSONInterface via encoding/json -type StdJSON struct{} - -// Marshal implements JSONInterface -func (StdJSON) Marshal(v interface{}) ([]byte, error) { - return json.Marshal(v) -} - -// Unmarshal implements JSONInterface -func (StdJSON) Unmarshal(data []byte, v interface{}) error { - return json.Unmarshal(data, v) -} diff --git a/vendor/xorm.io/xorm/internal/json/jsoniter.go b/vendor/xorm.io/xorm/internal/json/jsoniter.go deleted file mode 100644 index cfe7a19e..00000000 --- a/vendor/xorm.io/xorm/internal/json/jsoniter.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2021 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build jsoniter - -package json - -import ( - jsoniter "github.com/json-iterator/go" -) - -func init() { - DefaultJSONHandler = JSONiter{} -} - -// JSONiter implements JSONInterface via jsoniter -type JSONiter struct{} - -// Marshal implements JSONInterface -func (JSONiter) Marshal(v interface{}) ([]byte, error) { - return jsoniter.Marshal(v) -} - -// Unmarshal implements JSONInterface -func (JSONiter) Unmarshal(data []byte, v interface{}) error { - return jsoniter.Unmarshal(data, v) -} diff --git a/vendor/xorm.io/xorm/internal/statements/cache.go b/vendor/xorm.io/xorm/internal/statements/cache.go deleted file mode 100644 index 669cd018..00000000 --- a/vendor/xorm.io/xorm/internal/statements/cache.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2019 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package statements - -import ( - "fmt" - "strings" - - "xorm.io/xorm/internal/utils" - "xorm.io/xorm/schemas" -) - -// ConvertIDSQL converts SQL with id -func (statement *Statement) ConvertIDSQL(sqlStr string) string { - if statement.RefTable != nil { - cols := statement.RefTable.PKColumns() - if len(cols) == 0 { - return "" - } - - colstrs := statement.joinColumns(cols, false) - sqls := utils.SplitNNoCase(sqlStr, " from ", 2) - if len(sqls) != 2 { - return "" - } - - var top string - pLimitN := statement.LimitN - if pLimitN != nil && statement.dialect.URI().DBType == schemas.MSSQL { - top = fmt.Sprintf("TOP %d ", *pLimitN) - } - - newsql := fmt.Sprintf("SELECT %s%s FROM %v", top, colstrs, sqls[1]) - return newsql - } - return "" -} - -// ConvertUpdateSQL converts update SQL -func (statement *Statement) ConvertUpdateSQL(sqlStr string) (string, string) { - if statement.RefTable == nil || len(statement.RefTable.PrimaryKeys) != 1 { - return "", "" - } - - colstrs := statement.joinColumns(statement.RefTable.PKColumns(), true) - sqls := utils.SplitNNoCase(sqlStr, "where", 2) - if len(sqls) != 2 { - if len(sqls) == 1 { - return sqls[0], fmt.Sprintf("SELECT %v FROM %v", - colstrs, statement.quote(statement.TableName())) - } - return "", "" - } - - var whereStr = sqls[1] - - // TODO: for postgres only, if any other database? - var paraStr string - if statement.dialect.URI().DBType == schemas.POSTGRES { - paraStr = "$" - } else if statement.dialect.URI().DBType == schemas.MSSQL { - paraStr = ":" - } - - if paraStr != "" { - if strings.Contains(sqls[1], paraStr) { - dollers := strings.Split(sqls[1], paraStr) - whereStr = dollers[0] - for i, c := range dollers[1:] { - ccs := strings.SplitN(c, " ", 2) - whereStr += fmt.Sprintf(paraStr+"%v %v", i+1, ccs[1]) - } - } - } - - return sqls[0], fmt.Sprintf("SELECT %v FROM %v WHERE %v", - colstrs, statement.quote(statement.TableName()), - whereStr) -} diff --git a/vendor/xorm.io/xorm/internal/statements/column_map.go b/vendor/xorm.io/xorm/internal/statements/column_map.go deleted file mode 100644 index bb764b4e..00000000 --- a/vendor/xorm.io/xorm/internal/statements/column_map.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2019 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package statements - -import ( - "strings" - - "xorm.io/xorm/schemas" -) - -type columnMap []string - -func (m columnMap) Contain(colName string) bool { - if len(m) == 0 { - return false - } - - n := len(colName) - for _, mk := range m { - if len(mk) != n { - continue - } - if strings.EqualFold(mk, colName) { - return true - } - } - - return false -} - -func (m columnMap) Len() int { - return len(m) -} - -func (m columnMap) IsEmpty() bool { - return len(m) == 0 -} - -func (m *columnMap) Add(colName string) bool { - if m.Contain(colName) { - return false - } - *m = append(*m, colName) - return true -} - -func getFlagForColumn(m map[string]bool, col *schemas.Column) (val bool, has bool) { - if len(m) == 0 { - return false, false - } - - n := len(col.Name) - - for mk := range m { - if len(mk) != n { - continue - } - if strings.EqualFold(mk, col.Name) { - return m[mk], true - } - } - - return false, false -} diff --git a/vendor/xorm.io/xorm/internal/statements/cond.go b/vendor/xorm.io/xorm/internal/statements/cond.go deleted file mode 100644 index dfc6c208..00000000 --- a/vendor/xorm.io/xorm/internal/statements/cond.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2022 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package statements - -import ( - "xorm.io/builder" - "xorm.io/xorm/schemas" -) - -type QuoteReplacer struct { - *builder.BytesWriter - quoter schemas.Quoter -} - -func (q *QuoteReplacer) Write(p []byte) (n int, err error) { - c := q.quoter.Replace(string(p)) - return q.BytesWriter.Builder.WriteString(c) -} - -func (statement *Statement) QuoteReplacer(w *builder.BytesWriter) *QuoteReplacer { - return &QuoteReplacer{ - BytesWriter: w, - quoter: statement.dialect.Quoter(), - } -} - -// Where add Where statement -func (statement *Statement) Where(query interface{}, args ...interface{}) *Statement { - return statement.And(query, args...) -} - -// And add Where & and statement -func (statement *Statement) And(query interface{}, args ...interface{}) *Statement { - switch qr := query.(type) { - case string: - cond := builder.Expr(qr, args...) - statement.cond = statement.cond.And(cond) - case map[string]interface{}: - cond := make(builder.Eq) - for k, v := range qr { - cond[statement.quote(k)] = v - } - statement.cond = statement.cond.And(cond) - case builder.Cond: - statement.cond = statement.cond.And(qr) - for _, v := range args { - if vv, ok := v.(builder.Cond); ok { - statement.cond = statement.cond.And(vv) - } - } - default: - statement.LastError = ErrConditionType - } - - return statement -} - -// Or add Where & Or statement -func (statement *Statement) Or(query interface{}, args ...interface{}) *Statement { - switch qr := query.(type) { - case string: - cond := builder.Expr(qr, args...) - statement.cond = statement.cond.Or(cond) - case map[string]interface{}: - cond := make(builder.Eq) - for k, v := range qr { - cond[statement.quote(k)] = v - } - statement.cond = statement.cond.Or(cond) - case builder.Cond: - statement.cond = statement.cond.Or(qr) - for _, v := range args { - if vv, ok := v.(builder.Cond); ok { - statement.cond = statement.cond.Or(vv) - } - } - default: - statement.LastError = ErrConditionType - } - return statement -} - -// In generate "Where column IN (?) " statement -func (statement *Statement) In(column string, args ...interface{}) *Statement { - in := builder.In(statement.quote(column), args...) - statement.cond = statement.cond.And(in) - return statement -} - -// NotIn generate "Where column NOT IN (?) " statement -func (statement *Statement) NotIn(column string, args ...interface{}) *Statement { - notIn := builder.NotIn(statement.quote(column), args...) - statement.cond = statement.cond.And(notIn) - return statement -} - -// SetNoAutoCondition if you do not want convert bean's field as query condition, then use this function -func (statement *Statement) SetNoAutoCondition(no ...bool) *Statement { - statement.NoAutoCondition = true - if len(no) > 0 { - statement.NoAutoCondition = no[0] - } - return statement -} - -// Conds returns condtions -func (statement *Statement) Conds() builder.Cond { - return statement.cond -} diff --git a/vendor/xorm.io/xorm/internal/statements/expr.go b/vendor/xorm.io/xorm/internal/statements/expr.go deleted file mode 100644 index c2a2e1cc..00000000 --- a/vendor/xorm.io/xorm/internal/statements/expr.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2019 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package statements - -import ( - "fmt" - "strings" - - "xorm.io/builder" - "xorm.io/xorm/schemas" -) - -// ErrUnsupportedExprType represents an error with unsupported express type -type ErrUnsupportedExprType struct { - tp string -} - -func (err ErrUnsupportedExprType) Error() string { - return fmt.Sprintf("Unsupported expression type: %v", err.tp) -} - -// Expr represents an SQL express -type Expr struct { - ColName string - Arg interface{} -} - -// WriteArgs writes args to the writer -func (expr *Expr) WriteArgs(w *builder.BytesWriter) error { - switch arg := expr.Arg.(type) { - case *builder.Builder: - if _, err := w.WriteString("("); err != nil { - return err - } - if err := arg.WriteTo(w); err != nil { - return err - } - if _, err := w.WriteString(")"); err != nil { - return err - } - case string: - if arg == "" { - arg = "''" - } - if _, err := w.WriteString(fmt.Sprintf("%v", arg)); err != nil { - return err - } - default: - if _, err := w.WriteString("?"); err != nil { - return err - } - w.Append(arg) - } - return nil -} - -type exprParams []Expr - -func (exprs exprParams) ColNames() []string { - var cols = make([]string, 0, len(exprs)) - for _, expr := range exprs { - cols = append(cols, expr.ColName) - } - return cols -} - -func (exprs *exprParams) Add(name string, arg interface{}) { - *exprs = append(*exprs, Expr{name, arg}) -} - -func (exprs exprParams) IsColExist(colName string) bool { - for _, expr := range exprs { - if strings.EqualFold(schemas.CommonQuoter.Trim(expr.ColName), schemas.CommonQuoter.Trim(colName)) { - return true - } - } - return false -} - -func (exprs exprParams) WriteArgs(w *builder.BytesWriter) error { - for i, expr := range exprs { - if err := expr.WriteArgs(w); err != nil { - return err - } - if i != len(exprs)-1 { - if _, err := w.WriteString(","); err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/xorm.io/xorm/internal/statements/insert.go b/vendor/xorm.io/xorm/internal/statements/insert.go deleted file mode 100644 index 91a33319..00000000 --- a/vendor/xorm.io/xorm/internal/statements/insert.go +++ /dev/null @@ -1,299 +0,0 @@ -// Copyright 2020 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package statements - -import ( - "errors" - "fmt" - "strings" - - "xorm.io/builder" - "xorm.io/xorm/internal/utils" - "xorm.io/xorm/schemas" -) - -func (statement *Statement) writeInsertOutput(buf *strings.Builder, table *schemas.Table) error { - if statement.dialect.URI().DBType == schemas.MSSQL && len(table.AutoIncrement) > 0 { - if _, err := buf.WriteString(" OUTPUT Inserted."); err != nil { - return err - } - if err := statement.dialect.Quoter().QuoteTo(buf, table.AutoIncrement); err != nil { - return err - } - } - return nil -} - -// GenInsertSQL generates insert beans SQL -func (statement *Statement) GenInsertSQL(colNames []string, args []interface{}) (string, []interface{}, error) { - var ( - buf = builder.NewWriter() - exprs = statement.ExprColumns - table = statement.RefTable - tableName = statement.TableName() - ) - - if _, err := buf.WriteString("INSERT INTO "); err != nil { - return "", nil, err - } - - if err := statement.dialect.Quoter().QuoteTo(buf.Builder, tableName); err != nil { - return "", nil, err - } - - var hasInsertColumns = len(colNames) > 0 - var needSeq = len(table.AutoIncrement) > 0 && (statement.dialect.URI().DBType == schemas.ORACLE || statement.dialect.URI().DBType == schemas.DAMENG) - if needSeq { - for _, col := range colNames { - if strings.EqualFold(col, table.AutoIncrement) { - needSeq = false - break - } - } - } - - if !hasInsertColumns && statement.dialect.URI().DBType != schemas.ORACLE && - statement.dialect.URI().DBType != schemas.DAMENG { - if statement.dialect.URI().DBType == schemas.MYSQL { - if _, err := buf.WriteString(" VALUES ()"); err != nil { - return "", nil, err - } - } else { - if err := statement.writeInsertOutput(buf.Builder, table); err != nil { - return "", nil, err - } - if _, err := buf.WriteString(" DEFAULT VALUES"); err != nil { - return "", nil, err - } - } - } else { - if _, err := buf.WriteString(" ("); err != nil { - return "", nil, err - } - - if needSeq { - colNames = append(colNames, table.AutoIncrement) - } - - if err := statement.dialect.Quoter().JoinWrite(buf.Builder, append(colNames, exprs.ColNames()...), ","); err != nil { - return "", nil, err - } - - if _, err := buf.WriteString(")"); err != nil { - return "", nil, err - } - if err := statement.writeInsertOutput(buf.Builder, table); err != nil { - return "", nil, err - } - - if statement.Conds().IsValid() { - if _, err := buf.WriteString(" SELECT "); err != nil { - return "", nil, err - } - - if err := statement.WriteArgs(buf, args); err != nil { - return "", nil, err - } - - if needSeq { - if len(args) > 0 { - if _, err := buf.WriteString(","); err != nil { - return "", nil, err - } - } - if _, err := buf.WriteString(utils.SeqName(tableName) + ".nextval"); err != nil { - return "", nil, err - } - } - if len(exprs) > 0 { - if _, err := buf.WriteString(","); err != nil { - return "", nil, err - } - if err := exprs.WriteArgs(buf); err != nil { - return "", nil, err - } - } - - if _, err := buf.WriteString(" FROM "); err != nil { - return "", nil, err - } - - if err := statement.dialect.Quoter().QuoteTo(buf.Builder, tableName); err != nil { - return "", nil, err - } - - if _, err := buf.WriteString(" WHERE "); err != nil { - return "", nil, err - } - - if err := statement.Conds().WriteTo(buf); err != nil { - return "", nil, err - } - } else { - if _, err := buf.WriteString(" VALUES ("); err != nil { - return "", nil, err - } - - if err := statement.WriteArgs(buf, args); err != nil { - return "", nil, err - } - - // Insert tablename (id) Values(seq_tablename.nextval) - if needSeq { - if hasInsertColumns { - if _, err := buf.WriteString(","); err != nil { - return "", nil, err - } - } - if _, err := buf.WriteString(utils.SeqName(tableName) + ".nextval"); err != nil { - return "", nil, err - } - } - - if len(exprs) > 0 { - if _, err := buf.WriteString(","); err != nil { - return "", nil, err - } - } - - if err := exprs.WriteArgs(buf); err != nil { - return "", nil, err - } - - if _, err := buf.WriteString(")"); err != nil { - return "", nil, err - } - } - } - - if len(table.AutoIncrement) > 0 && statement.dialect.URI().DBType == schemas.POSTGRES { - if _, err := buf.WriteString(" RETURNING "); err != nil { - return "", nil, err - } - if err := statement.dialect.Quoter().QuoteTo(buf.Builder, table.AutoIncrement); err != nil { - return "", nil, err - } - } - - return buf.String(), buf.Args(), nil -} - -// GenInsertMapSQL generates insert map SQL -func (statement *Statement) GenInsertMapSQL(columns []string, args []interface{}) (string, []interface{}, error) { - var ( - buf = builder.NewWriter() - exprs = statement.ExprColumns - tableName = statement.TableName() - ) - - if _, err := buf.WriteString(fmt.Sprintf("INSERT INTO %s (", statement.quote(tableName))); err != nil { - return "", nil, err - } - - if err := statement.dialect.Quoter().JoinWrite(buf.Builder, append(columns, exprs.ColNames()...), ","); err != nil { - return "", nil, err - } - - // if insert where - if statement.Conds().IsValid() { - if _, err := buf.WriteString(") SELECT "); err != nil { - return "", nil, err - } - - if err := statement.WriteArgs(buf, args); err != nil { - return "", nil, err - } - - if len(exprs) > 0 { - if _, err := buf.WriteString(","); err != nil { - return "", nil, err - } - if err := exprs.WriteArgs(buf); err != nil { - return "", nil, err - } - } - - if _, err := buf.WriteString(fmt.Sprintf(" FROM %s WHERE ", statement.quote(tableName))); err != nil { - return "", nil, err - } - - if err := statement.Conds().WriteTo(buf); err != nil { - return "", nil, err - } - } else { - if _, err := buf.WriteString(") VALUES ("); err != nil { - return "", nil, err - } - if err := statement.WriteArgs(buf, args); err != nil { - return "", nil, err - } - - if len(exprs) > 0 { - if _, err := buf.WriteString(","); err != nil { - return "", nil, err - } - if err := exprs.WriteArgs(buf); err != nil { - return "", nil, err - } - } - if _, err := buf.WriteString(")"); err != nil { - return "", nil, err - } - } - - return buf.String(), buf.Args(), nil -} - -func (statement *Statement) GenInsertMultipleMapSQL(columns []string, argss [][]interface{}) (string, []interface{}, error) { - var ( - buf = builder.NewWriter() - exprs = statement.ExprColumns - tableName = statement.TableName() - ) - - if _, err := buf.WriteString(fmt.Sprintf("INSERT INTO %s (", statement.quote(tableName))); err != nil { - return "", nil, err - } - - if err := statement.dialect.Quoter().JoinWrite(buf.Builder, append(columns, exprs.ColNames()...), ","); err != nil { - return "", nil, err - } - - // if insert where - if statement.Conds().IsValid() { - return "", nil, errors.New("batch insert don't support with where") - } - - if _, err := buf.WriteString(") VALUES "); err != nil { - return "", nil, err - } - for i, args := range argss { - if _, err := buf.WriteString("("); err != nil { - return "", nil, err - } - if err := statement.WriteArgs(buf, args); err != nil { - return "", nil, err - } - - if len(exprs) > 0 { - if _, err := buf.WriteString(","); err != nil { - return "", nil, err - } - if err := exprs.WriteArgs(buf); err != nil { - return "", nil, err - } - } - if _, err := buf.WriteString(")"); err != nil { - return "", nil, err - } - if i < len(argss)-1 { - if _, err := buf.WriteString(","); err != nil { - return "", nil, err - } - } - } - - return buf.String(), buf.Args(), nil -} diff --git a/vendor/xorm.io/xorm/internal/statements/join.go b/vendor/xorm.io/xorm/internal/statements/join.go deleted file mode 100644 index 45fc2441..00000000 --- a/vendor/xorm.io/xorm/internal/statements/join.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2022 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package statements - -import ( - "fmt" - "strings" - - "xorm.io/builder" - "xorm.io/xorm/dialects" - "xorm.io/xorm/internal/utils" - "xorm.io/xorm/schemas" -) - -// Join The joinOP should be one of INNER, LEFT OUTER, CROSS etc - this will be prepended to JOIN -func (statement *Statement) Join(joinOP string, tablename interface{}, condition string, args ...interface{}) *Statement { - var buf strings.Builder - if len(statement.JoinStr) > 0 { - fmt.Fprintf(&buf, "%v %v JOIN ", statement.JoinStr, joinOP) - } else { - fmt.Fprintf(&buf, "%v JOIN ", joinOP) - } - - switch tp := tablename.(type) { - case builder.Builder: - subSQL, subQueryArgs, err := tp.ToSQL() - if err != nil { - statement.LastError = err - return statement - } - - fields := strings.Split(tp.TableName(), ".") - aliasName := statement.dialect.Quoter().Trim(fields[len(fields)-1]) - aliasName = schemas.CommonQuoter.Trim(aliasName) - - fmt.Fprintf(&buf, "(%s) %s ON %v", statement.ReplaceQuote(subSQL), statement.quote(aliasName), statement.ReplaceQuote(condition)) - statement.joinArgs = append(statement.joinArgs, subQueryArgs...) - case *builder.Builder: - subSQL, subQueryArgs, err := tp.ToSQL() - if err != nil { - statement.LastError = err - return statement - } - - fields := strings.Split(tp.TableName(), ".") - aliasName := statement.dialect.Quoter().Trim(fields[len(fields)-1]) - aliasName = schemas.CommonQuoter.Trim(aliasName) - - fmt.Fprintf(&buf, "(%s) %s ON %v", statement.ReplaceQuote(subSQL), statement.quote(aliasName), statement.ReplaceQuote(condition)) - statement.joinArgs = append(statement.joinArgs, subQueryArgs...) - default: - tbName := dialects.FullTableName(statement.dialect, statement.tagParser.GetTableMapper(), tablename, true) - if !utils.IsSubQuery(tbName) { - var buf strings.Builder - _ = statement.dialect.Quoter().QuoteTo(&buf, tbName) - tbName = buf.String() - } else { - tbName = statement.ReplaceQuote(tbName) - } - fmt.Fprintf(&buf, "%s ON %v", tbName, statement.ReplaceQuote(condition)) - } - - statement.JoinStr = buf.String() - statement.joinArgs = append(statement.joinArgs, args...) - return statement -} - -func (statement *Statement) writeJoin(w builder.Writer) error { - if statement.JoinStr != "" { - if _, err := fmt.Fprint(w, " ", statement.JoinStr); err != nil { - return err - } - w.Append(statement.joinArgs...) - } - return nil -} diff --git a/vendor/xorm.io/xorm/internal/statements/order_by.go b/vendor/xorm.io/xorm/internal/statements/order_by.go deleted file mode 100644 index 08a8263b..00000000 --- a/vendor/xorm.io/xorm/internal/statements/order_by.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2022 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package statements - -import ( - "fmt" - "strings" - - "xorm.io/builder" -) - -func (statement *Statement) HasOrderBy() bool { - return statement.orderStr != "" -} - -// ResetOrderBy reset ordery conditions -func (statement *Statement) ResetOrderBy() { - statement.orderStr = "" - statement.orderArgs = nil -} - -// WriteOrderBy write order by to writer -func (statement *Statement) WriteOrderBy(w builder.Writer) error { - if len(statement.orderStr) > 0 { - if _, err := fmt.Fprintf(w, " ORDER BY %s", statement.orderStr); err != nil { - return err - } - w.Append(statement.orderArgs...) - } - return nil -} - -// OrderBy generate "Order By order" statement -func (statement *Statement) OrderBy(order interface{}, args ...interface{}) *Statement { - if len(statement.orderStr) > 0 { - statement.orderStr += ", " - } - var rawOrder string - switch t := order.(type) { - case (*builder.Expression): - rawOrder = t.Content() - args = t.Args() - case string: - rawOrder = t - default: - statement.LastError = ErrUnSupportedSQLType - return statement - } - statement.orderStr += statement.ReplaceQuote(rawOrder) - if len(args) > 0 { - statement.orderArgs = append(statement.orderArgs, args...) - } - return statement -} - -// Desc generate `ORDER BY xx DESC` -func (statement *Statement) Desc(colNames ...string) *Statement { - var buf strings.Builder - if len(statement.orderStr) > 0 { - fmt.Fprint(&buf, statement.orderStr, ", ") - } - for i, col := range colNames { - if i > 0 { - fmt.Fprint(&buf, ", ") - } - _ = statement.dialect.Quoter().QuoteTo(&buf, col) - fmt.Fprint(&buf, " DESC") - } - statement.orderStr = buf.String() - return statement -} - -// Asc provide asc order by query condition, the input parameters are columns. -func (statement *Statement) Asc(colNames ...string) *Statement { - var buf strings.Builder - if len(statement.orderStr) > 0 { - fmt.Fprint(&buf, statement.orderStr, ", ") - } - for i, col := range colNames { - if i > 0 { - fmt.Fprint(&buf, ", ") - } - _ = statement.dialect.Quoter().QuoteTo(&buf, col) - fmt.Fprint(&buf, " ASC") - } - statement.orderStr = buf.String() - return statement -} diff --git a/vendor/xorm.io/xorm/internal/statements/pk.go b/vendor/xorm.io/xorm/internal/statements/pk.go deleted file mode 100644 index 59da89c0..00000000 --- a/vendor/xorm.io/xorm/internal/statements/pk.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2017 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package statements - -import ( - "fmt" - "reflect" - - "xorm.io/builder" - "xorm.io/xorm/schemas" -) - -var ( - ptrPkType = reflect.TypeOf(&schemas.PK{}) - pkType = reflect.TypeOf(schemas.PK{}) - stringType = reflect.TypeOf("") - intType = reflect.TypeOf(int64(0)) - uintType = reflect.TypeOf(uint64(0)) -) - -// ErrIDConditionWithNoTable represents an error there is no reference table with an ID condition -type ErrIDConditionWithNoTable struct { - ID schemas.PK -} - -func (err ErrIDConditionWithNoTable) Error() string { - return fmt.Sprintf("ID condition %#v need reference table", err.ID) -} - -// IsIDConditionWithNoTableErr return true if the err is ErrIDConditionWithNoTable -func IsIDConditionWithNoTableErr(err error) bool { - _, ok := err.(ErrIDConditionWithNoTable) - return ok -} - -// ID generate "where id = ? " statement or for composite key "where key1 = ? and key2 = ?" -func (statement *Statement) ID(id interface{}) *Statement { - switch t := id.(type) { - case *schemas.PK: - statement.idParam = *t - case schemas.PK: - statement.idParam = t - case string, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: - statement.idParam = schemas.PK{id} - default: - idValue := reflect.ValueOf(id) - idType := idValue.Type() - - switch idType.Kind() { - case reflect.String: - statement.idParam = schemas.PK{idValue.Convert(stringType).Interface()} - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - statement.idParam = schemas.PK{idValue.Convert(intType).Interface()} - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - statement.idParam = schemas.PK{idValue.Convert(uintType).Interface()} - case reflect.Slice: - if idType.ConvertibleTo(pkType) { - statement.idParam = idValue.Convert(pkType).Interface().(schemas.PK) - } - case reflect.Ptr: - if idType.ConvertibleTo(ptrPkType) { - statement.idParam = idValue.Convert(ptrPkType).Elem().Interface().(schemas.PK) - } - } - } - - if statement.idParam == nil { - statement.LastError = fmt.Errorf("ID param %#v is not supported", id) - } - - return statement -} - -// ProcessIDParam handles the process of id condition -func (statement *Statement) ProcessIDParam() error { - if statement.idParam == nil { - return nil - } - - if statement.RefTable == nil { - return ErrIDConditionWithNoTable{statement.idParam} - } - - if len(statement.RefTable.PrimaryKeys) != len(statement.idParam) { - return fmt.Errorf("ID condition is error, expect %d primarykeys, there are %d", - len(statement.RefTable.PrimaryKeys), - len(statement.idParam), - ) - } - - for i, col := range statement.RefTable.PKColumns() { - var colName = statement.colName(col, statement.TableName()) - statement.cond = statement.cond.And(builder.Eq{colName: statement.idParam[i]}) - } - return nil -} diff --git a/vendor/xorm.io/xorm/internal/statements/query.go b/vendor/xorm.io/xorm/internal/statements/query.go deleted file mode 100644 index f72c8602..00000000 --- a/vendor/xorm.io/xorm/internal/statements/query.go +++ /dev/null @@ -1,499 +0,0 @@ -// Copyright 2019 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package statements - -import ( - "errors" - "fmt" - "reflect" - "strings" - - "xorm.io/builder" - "xorm.io/xorm/internal/utils" - "xorm.io/xorm/schemas" -) - -// GenQuerySQL generate query SQL -func (statement *Statement) GenQuerySQL(sqlOrArgs ...interface{}) (string, []interface{}, error) { - if len(sqlOrArgs) > 0 { - return statement.ConvertSQLOrArgs(sqlOrArgs...) - } - - if statement.RawSQL != "" { - return statement.GenRawSQL(), statement.RawParams, nil - } - - if len(statement.TableName()) <= 0 { - return "", nil, ErrTableNotFound - } - - columnStr := statement.ColumnStr() - if len(statement.SelectStr) > 0 { - columnStr = statement.SelectStr - } else { - if statement.JoinStr == "" { - if columnStr == "" { - if statement.GroupByStr != "" { - columnStr = statement.quoteColumnStr(statement.GroupByStr) - } else { - columnStr = statement.genColumnStr() - } - } - } else { - if columnStr == "" { - if statement.GroupByStr != "" { - columnStr = statement.quoteColumnStr(statement.GroupByStr) - } else { - columnStr = "*" - } - } - } - if columnStr == "" { - columnStr = "*" - } - } - - if err := statement.ProcessIDParam(); err != nil { - return "", nil, err - } - - return statement.genSelectSQL(columnStr, true, true) -} - -// GenSumSQL generates sum SQL -func (statement *Statement) GenSumSQL(bean interface{}, columns ...string) (string, []interface{}, error) { - if statement.RawSQL != "" { - return statement.GenRawSQL(), statement.RawParams, nil - } - - if err := statement.SetRefBean(bean); err != nil { - return "", nil, err - } - - sumStrs := make([]string, 0, len(columns)) - for _, colName := range columns { - if !strings.Contains(colName, " ") && !strings.Contains(colName, "(") { - colName = statement.quote(colName) - } else { - colName = statement.ReplaceQuote(colName) - } - sumStrs = append(sumStrs, fmt.Sprintf("COALESCE(sum(%s),0)", colName)) - } - sumSelect := strings.Join(sumStrs, ", ") - - if err := statement.MergeConds(bean); err != nil { - return "", nil, err - } - - return statement.genSelectSQL(sumSelect, true, true) -} - -// GenGetSQL generates Get SQL -func (statement *Statement) GenGetSQL(bean interface{}) (string, []interface{}, error) { - var isStruct bool - if bean != nil { - v := rValue(bean) - isStruct = v.Kind() == reflect.Struct - if isStruct { - if err := statement.SetRefBean(bean); err != nil { - return "", nil, err - } - } - } - - columnStr := statement.ColumnStr() - if len(statement.SelectStr) > 0 { - columnStr = statement.SelectStr - } else { - // TODO: always generate column names, not use * even if join - if len(statement.JoinStr) == 0 { - if len(columnStr) == 0 { - if len(statement.GroupByStr) > 0 { - columnStr = statement.quoteColumnStr(statement.GroupByStr) - } else { - columnStr = statement.genColumnStr() - } - } - } else { - if len(columnStr) == 0 { - if len(statement.GroupByStr) > 0 { - columnStr = statement.quoteColumnStr(statement.GroupByStr) - } - } - } - } - - if len(columnStr) == 0 { - columnStr = "*" - } - - if isStruct { - if err := statement.MergeConds(bean); err != nil { - return "", nil, err - } - } else { - if err := statement.ProcessIDParam(); err != nil { - return "", nil, err - } - } - - return statement.genSelectSQL(columnStr, true, true) -} - -// GenCountSQL generates the SQL for counting -func (statement *Statement) GenCountSQL(beans ...interface{}) (string, []interface{}, error) { - if statement.RawSQL != "" { - return statement.GenRawSQL(), statement.RawParams, nil - } - - var condArgs []interface{} - var err error - if len(beans) > 0 { - if err := statement.SetRefBean(beans[0]); err != nil { - return "", nil, err - } - if err := statement.MergeConds(beans[0]); err != nil { - return "", nil, err - } - } - - selectSQL := statement.SelectStr - if len(selectSQL) <= 0 { - if statement.IsDistinct { - selectSQL = fmt.Sprintf("count(DISTINCT %s)", statement.ColumnStr()) - } else if statement.ColumnStr() != "" { - selectSQL = fmt.Sprintf("count(%s)", statement.ColumnStr()) - } else { - selectSQL = "count(*)" - } - } - var subQuerySelect string - if statement.GroupByStr != "" { - subQuerySelect = statement.GroupByStr - } else { - subQuerySelect = selectSQL - } - - sqlStr, condArgs, err := statement.genSelectSQL(subQuerySelect, false, false) - if err != nil { - return "", nil, err - } - - if statement.GroupByStr != "" { - sqlStr = fmt.Sprintf("SELECT %s FROM (%s) sub", selectSQL, sqlStr) - } - - return sqlStr, condArgs, nil -} - -func (statement *Statement) writeFrom(w builder.Writer) error { - if _, err := fmt.Fprint(w, " FROM "); err != nil { - return err - } - if err := statement.writeTableName(w); err != nil { - return err - } - if err := statement.writeAlias(w); err != nil { - return err - } - return statement.writeJoin(w) -} - -func (statement *Statement) writeLimitOffset(w builder.Writer) error { - if statement.Start > 0 { - if statement.LimitN != nil { - _, err := fmt.Fprintf(w, " LIMIT %v OFFSET %v", *statement.LimitN, statement.Start) - return err - } - _, err := fmt.Fprintf(w, " LIMIT 0 OFFSET %v", statement.Start) - return err - } - if statement.LimitN != nil { - _, err := fmt.Fprint(w, " LIMIT ", *statement.LimitN) - return err - } - // no limit statement - return nil -} - -func (statement *Statement) genSelectSQL(columnStr string, needLimit, needOrderBy bool) (string, []interface{}, error) { - var ( - distinct string - dialect = statement.dialect - top, whereStr string - mssqlCondi = builder.NewWriter() - ) - - if statement.IsDistinct && !strings.HasPrefix(columnStr, "count") { - distinct = "DISTINCT " - } - - condWriter := builder.NewWriter() - if err := statement.cond.WriteTo(statement.QuoteReplacer(condWriter)); err != nil { - return "", nil, err - } - - if condWriter.Len() > 0 { - whereStr = " WHERE " - } - - pLimitN := statement.LimitN - if dialect.URI().DBType == schemas.MSSQL { - if pLimitN != nil { - LimitNValue := *pLimitN - top = fmt.Sprintf("TOP %d ", LimitNValue) - } - if statement.Start > 0 { - if statement.RefTable == nil { - return "", nil, errors.New("Unsupported query limit without reference table") - } - var column string - if len(statement.RefTable.PKColumns()) == 0 { - for _, index := range statement.RefTable.Indexes { - if len(index.Cols) == 1 { - column = index.Cols[0] - break - } - } - if len(column) == 0 { - column = statement.RefTable.ColumnsSeq()[0] - } - } else { - column = statement.RefTable.PKColumns()[0].Name - } - if statement.needTableName() { - if len(statement.TableAlias) > 0 { - column = fmt.Sprintf("%s.%s", statement.TableAlias, column) - } else { - column = fmt.Sprintf("%s.%s", statement.TableName(), column) - } - } - - if _, err := fmt.Fprintf(mssqlCondi, "(%s NOT IN (SELECT TOP %d %s", - column, statement.Start, column); err != nil { - return "", nil, err - } - if err := statement.writeFrom(mssqlCondi); err != nil { - return "", nil, err - } - if whereStr != "" { - if _, err := fmt.Fprint(mssqlCondi, whereStr); err != nil { - return "", nil, err - } - if err := utils.WriteBuilder(mssqlCondi, statement.QuoteReplacer(condWriter)); err != nil { - return "", nil, err - } - } - if needOrderBy { - if err := statement.WriteOrderBy(mssqlCondi); err != nil { - return "", nil, err - } - } - if err := statement.WriteGroupBy(mssqlCondi); err != nil { - return "", nil, err - } - if _, err := fmt.Fprint(mssqlCondi, "))"); err != nil { - return "", nil, err - } - } - } - - buf := builder.NewWriter() - if _, err := fmt.Fprintf(buf, "SELECT %v%v%v", distinct, top, columnStr); err != nil { - return "", nil, err - } - if err := statement.writeFrom(buf); err != nil { - return "", nil, err - } - if whereStr != "" { - if _, err := fmt.Fprint(buf, whereStr); err != nil { - return "", nil, err - } - if err := utils.WriteBuilder(buf, statement.QuoteReplacer(condWriter)); err != nil { - return "", nil, err - } - } - if mssqlCondi.Len() > 0 { - if len(whereStr) > 0 { - if _, err := fmt.Fprint(buf, " AND "); err != nil { - return "", nil, err - } - } else { - if _, err := fmt.Fprint(buf, " WHERE "); err != nil { - return "", nil, err - } - } - - if err := utils.WriteBuilder(buf, mssqlCondi); err != nil { - return "", nil, err - } - } - - if err := statement.WriteGroupBy(buf); err != nil { - return "", nil, err - } - if err := statement.writeHaving(buf); err != nil { - return "", nil, err - } - if needOrderBy { - if err := statement.WriteOrderBy(buf); err != nil { - return "", nil, err - } - } - if needLimit { - if dialect.URI().DBType != schemas.MSSQL && dialect.URI().DBType != schemas.ORACLE { - if err := statement.writeLimitOffset(buf); err != nil { - return "", nil, err - } - } else if dialect.URI().DBType == schemas.ORACLE { - if pLimitN != nil { - oldString := buf.String() - buf.Reset() - rawColStr := columnStr - if rawColStr == "*" { - rawColStr = "at.*" - } - fmt.Fprintf(buf, "SELECT %v FROM (SELECT %v,ROWNUM RN FROM (%v) at WHERE ROWNUM <= %d) aat WHERE RN > %d", - columnStr, rawColStr, oldString, statement.Start+*pLimitN, statement.Start) - } - } - } - if statement.IsForUpdate { - return dialect.ForUpdateSQL(buf.String()), buf.Args(), nil - } - - return buf.String(), buf.Args(), nil -} - -// GenExistSQL generates Exist SQL -func (statement *Statement) GenExistSQL(bean ...interface{}) (string, []interface{}, error) { - if statement.RawSQL != "" { - return statement.GenRawSQL(), statement.RawParams, nil - } - - var b interface{} - if len(bean) > 0 { - b = bean[0] - beanValue := reflect.ValueOf(bean[0]) - if beanValue.Kind() != reflect.Ptr { - return "", nil, errors.New("needs a pointer") - } - - if beanValue.Elem().Kind() == reflect.Struct { - if err := statement.SetRefBean(bean[0]); err != nil { - return "", nil, err - } - } - } - tableName := statement.TableName() - if len(tableName) <= 0 { - return "", nil, ErrTableNotFound - } - if statement.RefTable != nil { - return statement.Limit(1).GenGetSQL(b) - } - - tableName = statement.quote(tableName) - - buf := builder.NewWriter() - if statement.dialect.URI().DBType == schemas.MSSQL { - if _, err := fmt.Fprintf(buf, "SELECT TOP 1 * FROM %s", tableName); err != nil { - return "", nil, err - } - if err := statement.writeJoin(buf); err != nil { - return "", nil, err - } - if statement.Conds().IsValid() { - if _, err := fmt.Fprintf(buf, " WHERE "); err != nil { - return "", nil, err - } - if err := statement.Conds().WriteTo(statement.QuoteReplacer(buf)); err != nil { - return "", nil, err - } - } - } else if statement.dialect.URI().DBType == schemas.ORACLE { - if _, err := fmt.Fprintf(buf, "SELECT * FROM %s", tableName); err != nil { - return "", nil, err - } - if err := statement.writeJoin(buf); err != nil { - return "", nil, err - } - if _, err := fmt.Fprintf(buf, " WHERE "); err != nil { - return "", nil, err - } - if statement.Conds().IsValid() { - if err := statement.Conds().WriteTo(statement.QuoteReplacer(buf)); err != nil { - return "", nil, err - } - if _, err := fmt.Fprintf(buf, " AND "); err != nil { - return "", nil, err - } - } - if _, err := fmt.Fprintf(buf, "ROWNUM=1"); err != nil { - return "", nil, err - } - } else { - if _, err := fmt.Fprintf(buf, "SELECT 1 FROM %s", tableName); err != nil { - return "", nil, err - } - if err := statement.writeJoin(buf); err != nil { - return "", nil, err - } - if statement.Conds().IsValid() { - if _, err := fmt.Fprintf(buf, " WHERE "); err != nil { - return "", nil, err - } - if err := statement.Conds().WriteTo(statement.QuoteReplacer(buf)); err != nil { - return "", nil, err - } - } - if _, err := fmt.Fprintf(buf, " LIMIT 1"); err != nil { - return "", nil, err - } - } - - return buf.String(), buf.Args(), nil -} - -// GenFindSQL generates Find SQL -func (statement *Statement) GenFindSQL(autoCond builder.Cond) (string, []interface{}, error) { - if statement.RawSQL != "" { - return statement.GenRawSQL(), statement.RawParams, nil - } - - if len(statement.TableName()) <= 0 { - return "", nil, ErrTableNotFound - } - - columnStr := statement.ColumnStr() - if len(statement.SelectStr) > 0 { - columnStr = statement.SelectStr - } else { - if statement.JoinStr == "" { - if columnStr == "" { - if statement.GroupByStr != "" { - columnStr = statement.quoteColumnStr(statement.GroupByStr) - } else { - columnStr = statement.genColumnStr() - } - } - } else { - if columnStr == "" { - if statement.GroupByStr != "" { - columnStr = statement.quoteColumnStr(statement.GroupByStr) - } else { - columnStr = "*" - } - } - } - if columnStr == "" { - columnStr = "*" - } - } - - statement.cond = statement.cond.And(autoCond) - - return statement.genSelectSQL(columnStr, true, true) -} diff --git a/vendor/xorm.io/xorm/internal/statements/select.go b/vendor/xorm.io/xorm/internal/statements/select.go deleted file mode 100644 index 2bd2e94d..00000000 --- a/vendor/xorm.io/xorm/internal/statements/select.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2022 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package statements - -import ( - "fmt" - "strings" - - "xorm.io/xorm/schemas" -) - -// Select replace select -func (statement *Statement) Select(str string) *Statement { - statement.SelectStr = statement.ReplaceQuote(str) - return statement -} - -func col2NewCols(columns ...string) []string { - newColumns := make([]string, 0, len(columns)) - for _, col := range columns { - col = strings.Replace(col, "`", "", -1) - col = strings.Replace(col, `"`, "", -1) - ccols := strings.Split(col, ",") - for _, c := range ccols { - newColumns = append(newColumns, strings.TrimSpace(c)) - } - } - return newColumns -} - -// Cols generate "col1, col2" statement -func (statement *Statement) Cols(columns ...string) *Statement { - cols := col2NewCols(columns...) - for _, nc := range cols { - statement.ColumnMap.Add(nc) - } - return statement -} - -// ColumnStr returns column string -func (statement *Statement) ColumnStr() string { - return statement.dialect.Quoter().Join(statement.ColumnMap, ", ") -} - -// AllCols update use only: update all columns -func (statement *Statement) AllCols() *Statement { - statement.useAllCols = true - return statement -} - -// MustCols update use only: must update columns -func (statement *Statement) MustCols(columns ...string) *Statement { - newColumns := col2NewCols(columns...) - for _, nc := range newColumns { - statement.MustColumnMap[strings.ToLower(nc)] = true - } - return statement -} - -// UseBool indicates that use bool fields as update contents and query contiditions -func (statement *Statement) UseBool(columns ...string) *Statement { - if len(columns) > 0 { - statement.MustCols(columns...) - } else { - statement.allUseBool = true - } - return statement -} - -// Omit do not use the columns -func (statement *Statement) Omit(columns ...string) { - newColumns := col2NewCols(columns...) - for _, nc := range newColumns { - statement.OmitColumnMap = append(statement.OmitColumnMap, nc) - } -} - -func (statement *Statement) genColumnStr() string { - if statement.RefTable == nil { - return "" - } - - var buf strings.Builder - columns := statement.RefTable.Columns() - - for _, col := range columns { - if statement.OmitColumnMap.Contain(col.Name) { - continue - } - - if len(statement.ColumnMap) > 0 && !statement.ColumnMap.Contain(col.Name) { - continue - } - - if col.MapType == schemas.ONLYTODB { - continue - } - - if buf.Len() != 0 { - buf.WriteString(", ") - } - - if statement.JoinStr != "" { - if statement.TableAlias != "" { - buf.WriteString(statement.TableAlias) - } else { - buf.WriteString(statement.TableName()) - } - - buf.WriteString(".") - } - - statement.dialect.Quoter().QuoteTo(&buf, col.Name) - } - - return buf.String() -} - -func (statement *Statement) colName(col *schemas.Column, tableName string) string { - if statement.needTableName() { - nm := tableName - if len(statement.TableAlias) > 0 { - nm = statement.TableAlias - } - return fmt.Sprintf("%s.%s", statement.quote(nm), statement.quote(col.Name)) - } - return statement.quote(col.Name) -} - -// Distinct generates "DISTINCT col1, col2 " statement -func (statement *Statement) Distinct(columns ...string) *Statement { - statement.IsDistinct = true - statement.Cols(columns...) - return statement -} diff --git a/vendor/xorm.io/xorm/internal/statements/statement.go b/vendor/xorm.io/xorm/internal/statements/statement.go deleted file mode 100644 index a8fe34fa..00000000 --- a/vendor/xorm.io/xorm/internal/statements/statement.go +++ /dev/null @@ -1,700 +0,0 @@ -// Copyright 2015 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package statements - -import ( - "database/sql/driver" - "errors" - "fmt" - "math/big" - "reflect" - "strings" - "time" - - "xorm.io/builder" - "xorm.io/xorm/contexts" - "xorm.io/xorm/convert" - "xorm.io/xorm/dialects" - "xorm.io/xorm/internal/json" - "xorm.io/xorm/internal/utils" - "xorm.io/xorm/schemas" - "xorm.io/xorm/tags" -) - -var ( - // ErrConditionType condition type unsupported - ErrConditionType = errors.New("Unsupported condition type") - // ErrUnSupportedSQLType parameter of SQL is not supported - ErrUnSupportedSQLType = errors.New("Unsupported sql type") - // ErrUnSupportedType unsupported error - ErrUnSupportedType = errors.New("Unsupported type error") - // ErrTableNotFound table not found error - ErrTableNotFound = errors.New("Table not found") -) - -// Statement save all the sql info for executing SQL -type Statement struct { - RefTable *schemas.Table - dialect dialects.Dialect - defaultTimeZone *time.Location - tagParser *tags.Parser - Start int - LimitN *int - idParam schemas.PK - orderStr string - orderArgs []interface{} - JoinStr string - joinArgs []interface{} - GroupByStr string - HavingStr string - SelectStr string - useAllCols bool - AltTableName string - tableName string - RawSQL string - RawParams []interface{} - UseCascade bool - UseAutoJoin bool - StoreEngine string - Charset string - UseCache bool - UseAutoTime bool - NoAutoCondition bool - IsDistinct bool - IsForUpdate bool - TableAlias string - allUseBool bool - CheckVersion bool - unscoped bool - ColumnMap columnMap - OmitColumnMap columnMap - MustColumnMap map[string]bool - NullableMap map[string]bool - IncrColumns exprParams - DecrColumns exprParams - ExprColumns exprParams - cond builder.Cond - BufferSize int - Context contexts.ContextCache - LastError error -} - -// NewStatement creates a new statement -func NewStatement(dialect dialects.Dialect, tagParser *tags.Parser, defaultTimeZone *time.Location) *Statement { - statement := &Statement{ - dialect: dialect, - tagParser: tagParser, - defaultTimeZone: defaultTimeZone, - } - statement.Reset() - return statement -} - -// SetTableName set table name -func (statement *Statement) SetTableName(tableName string) { - statement.tableName = tableName -} - -// GenRawSQL generates correct raw sql -func (statement *Statement) GenRawSQL() string { - return statement.ReplaceQuote(statement.RawSQL) -} - -// ReplaceQuote replace sql key words with quote -func (statement *Statement) ReplaceQuote(sql string) string { - if sql == "" || statement.dialect.URI().DBType == schemas.MYSQL || - statement.dialect.URI().DBType == schemas.SQLITE { - return sql - } - return statement.dialect.Quoter().Replace(sql) -} - -// SetContextCache sets context cache -func (statement *Statement) SetContextCache(ctxCache contexts.ContextCache) { - statement.Context = ctxCache -} - -// Reset reset all the statement's fields -func (statement *Statement) Reset() { - statement.RefTable = nil - statement.Start = 0 - statement.LimitN = nil - statement.ResetOrderBy() - statement.UseCascade = true - statement.JoinStr = "" - statement.joinArgs = make([]interface{}, 0) - statement.GroupByStr = "" - statement.HavingStr = "" - statement.ColumnMap = columnMap{} - statement.OmitColumnMap = columnMap{} - statement.AltTableName = "" - statement.tableName = "" - statement.idParam = nil - statement.RawSQL = "" - statement.RawParams = make([]interface{}, 0) - statement.UseCache = true - statement.UseAutoTime = true - statement.NoAutoCondition = false - statement.IsDistinct = false - statement.IsForUpdate = false - statement.TableAlias = "" - statement.SelectStr = "" - statement.allUseBool = false - statement.useAllCols = false - statement.MustColumnMap = make(map[string]bool) - statement.NullableMap = make(map[string]bool) - statement.CheckVersion = true - statement.unscoped = false - statement.IncrColumns = exprParams{} - statement.DecrColumns = exprParams{} - statement.ExprColumns = exprParams{} - statement.cond = builder.NewCond() - statement.BufferSize = 0 - statement.Context = nil - statement.LastError = nil -} - -// SQL adds raw sql statement -func (statement *Statement) SQL(query interface{}, args ...interface{}) *Statement { - switch query.(type) { - case (*builder.Builder): - var err error - statement.RawSQL, statement.RawParams, err = query.(*builder.Builder).ToSQL() - if err != nil { - statement.LastError = err - } - case string: - statement.RawSQL = query.(string) - statement.RawParams = args - default: - statement.LastError = ErrUnSupportedSQLType - } - - return statement -} - -func (statement *Statement) quote(s string) string { - return statement.dialect.Quoter().Quote(s) -} - -// SetRefValue set ref value -func (statement *Statement) SetRefValue(v reflect.Value) error { - var err error - statement.RefTable, err = statement.tagParser.ParseWithCache(reflect.Indirect(v)) - if err != nil { - return err - } - statement.tableName = dialects.FullTableName(statement.dialect, statement.tagParser.GetTableMapper(), v, true) - return nil -} - -func rValue(bean interface{}) reflect.Value { - return reflect.Indirect(reflect.ValueOf(bean)) -} - -// SetRefBean set ref bean -func (statement *Statement) SetRefBean(bean interface{}) error { - var err error - statement.RefTable, err = statement.tagParser.ParseWithCache(rValue(bean)) - if err != nil { - return err - } - statement.tableName = dialects.FullTableName(statement.dialect, statement.tagParser.GetTableMapper(), bean, true) - return nil -} - -func (statement *Statement) needTableName() bool { - return len(statement.JoinStr) > 0 -} - -// Incr Generate "Update ... Set column = column + arg" statement -func (statement *Statement) Incr(column string, arg ...interface{}) *Statement { - if len(arg) > 0 { - statement.IncrColumns.Add(column, arg[0]) - } else { - statement.IncrColumns.Add(column, 1) - } - return statement -} - -// Decr Generate "Update ... Set column = column - arg" statement -func (statement *Statement) Decr(column string, arg ...interface{}) *Statement { - if len(arg) > 0 { - statement.DecrColumns.Add(column, arg[0]) - } else { - statement.DecrColumns.Add(column, 1) - } - return statement -} - -// SetExpr Generate "Update ... Set column = {expression}" statement -func (statement *Statement) SetExpr(column string, expression interface{}) *Statement { - if e, ok := expression.(string); ok { - statement.ExprColumns.Add(column, statement.dialect.Quoter().Replace(e)) - } else { - statement.ExprColumns.Add(column, expression) - } - return statement -} - -// ForUpdate generates "SELECT ... FOR UPDATE" statement -func (statement *Statement) ForUpdate() *Statement { - statement.IsForUpdate = true - return statement -} - -// Nullable Update use only: update columns to null when value is nullable and zero-value -func (statement *Statement) Nullable(columns ...string) { - newColumns := col2NewCols(columns...) - for _, nc := range newColumns { - statement.NullableMap[strings.ToLower(nc)] = true - } -} - -// Top generate LIMIT limit statement -func (statement *Statement) Top(limit int) *Statement { - statement.Limit(limit) - return statement -} - -// Limit generate LIMIT start, limit statement -func (statement *Statement) Limit(limit int, start ...int) *Statement { - statement.LimitN = &limit - if len(start) > 0 { - statement.Start = start[0] - } - return statement -} - -// SetTable tempororily set table name, the parameter could be a string or a pointer of struct -func (statement *Statement) SetTable(tableNameOrBean interface{}) error { - v := rValue(tableNameOrBean) - t := v.Type() - if t.Kind() == reflect.Struct { - var err error - statement.RefTable, err = statement.tagParser.ParseWithCache(v) - if err != nil { - return err - } - } - - statement.AltTableName = dialects.FullTableName(statement.dialect, statement.tagParser.GetTableMapper(), tableNameOrBean, true) - return nil -} - -// GroupBy generate "Group By keys" statement -func (statement *Statement) GroupBy(keys string) *Statement { - statement.GroupByStr = statement.ReplaceQuote(keys) - return statement -} - -func (statement *Statement) WriteGroupBy(w builder.Writer) error { - if statement.GroupByStr == "" { - return nil - } - _, err := fmt.Fprintf(w, " GROUP BY %s", statement.GroupByStr) - return err -} - -// Having generate "Having conditions" statement -func (statement *Statement) Having(conditions string) *Statement { - statement.HavingStr = fmt.Sprintf("HAVING %v", statement.ReplaceQuote(conditions)) - return statement -} - -func (statement *Statement) writeHaving(w builder.Writer) error { - if statement.HavingStr == "" { - return nil - } - _, err := fmt.Fprint(w, " ", statement.HavingStr) - return err -} - -// SetUnscoped always disable struct tag "deleted" -func (statement *Statement) SetUnscoped() *Statement { - statement.unscoped = true - return statement -} - -// GetUnscoped return true if it's unscoped -func (statement *Statement) GetUnscoped() bool { - return statement.unscoped -} - -// GenIndexSQL generated create index SQL -func (statement *Statement) GenIndexSQL() []string { - var sqls []string - tbName := statement.TableName() - for _, index := range statement.RefTable.Indexes { - if index.Type == schemas.IndexType { - sql := statement.dialect.CreateIndexSQL(tbName, index) - sqls = append(sqls, sql) - } - } - return sqls -} - -// GenUniqueSQL generates unique SQL -func (statement *Statement) GenUniqueSQL() []string { - var sqls []string - tbName := statement.TableName() - for _, index := range statement.RefTable.Indexes { - if index.Type == schemas.UniqueType { - sql := statement.dialect.CreateIndexSQL(tbName, index) - sqls = append(sqls, sql) - } - } - return sqls -} - -// GenDelIndexSQL generate delete index SQL -func (statement *Statement) GenDelIndexSQL() []string { - var sqls []string - tbName := statement.TableName() - idx := strings.Index(tbName, ".") - if idx > -1 { - tbName = tbName[idx+1:] - } - for _, index := range statement.RefTable.Indexes { - sqls = append(sqls, statement.dialect.DropIndexSQL(tbName, index)) - } - return sqls -} - -func (statement *Statement) asDBCond(fieldValue reflect.Value, fieldType reflect.Type, col *schemas.Column, allUseBool, requiredField bool) (interface{}, bool, error) { - switch fieldType.Kind() { - case reflect.Ptr: - if fieldValue.IsNil() { - return nil, true, nil - } - return statement.asDBCond(fieldValue.Elem(), fieldType.Elem(), col, allUseBool, requiredField) - case reflect.Bool: - if allUseBool || requiredField { - return fieldValue.Interface(), true, nil - } - // if a bool in a struct, it will not be as a condition because it default is false, - // please use Where() instead - return nil, false, nil - case reflect.String: - if !requiredField && fieldValue.String() == "" { - return nil, false, nil - } - // for MyString, should convert to string or panic - if fieldType.String() != reflect.String.String() { - return fieldValue.String(), true, nil - } - return fieldValue.Interface(), true, nil - case reflect.Int8, reflect.Int16, reflect.Int, reflect.Int32, reflect.Int64: - if !requiredField && fieldValue.Int() == 0 { - return nil, false, nil - } - return fieldValue.Interface(), true, nil - case reflect.Float32, reflect.Float64: - if !requiredField && fieldValue.Float() == 0.0 { - return nil, false, nil - } - return fieldValue.Interface(), true, nil - case reflect.Uint8, reflect.Uint16, reflect.Uint, reflect.Uint32, reflect.Uint64: - if !requiredField && fieldValue.Uint() == 0 { - return nil, false, nil - } - return fieldValue.Interface(), true, nil - case reflect.Struct: - if fieldType.ConvertibleTo(schemas.TimeType) { - t := fieldValue.Convert(schemas.TimeType).Interface().(time.Time) - if !requiredField && (t.IsZero() || !fieldValue.IsValid()) { - return nil, false, nil - } - res, err := dialects.FormatColumnTime(statement.dialect, statement.defaultTimeZone, col, t) - if err != nil { - return nil, false, err - } - return res, true, nil - } else if fieldType.ConvertibleTo(schemas.BigFloatType) { - t := fieldValue.Convert(schemas.BigFloatType).Interface().(big.Float) - v := t.String() - if v == "0" { - return nil, false, nil - } - return t.String(), true, nil - } else if _, ok := reflect.New(fieldType).Interface().(convert.Conversion); ok { - return nil, false, nil - } else if valNul, ok := fieldValue.Interface().(driver.Valuer); ok { - val, _ := valNul.Value() - if val == nil && !requiredField { - return nil, false, nil - } - return val, true, nil - } else { - if col.IsJSON { - if col.SQLType.IsText() { - bytes, err := json.DefaultJSONHandler.Marshal(fieldValue.Interface()) - if err != nil { - return nil, false, err - } - return string(bytes), true, nil - } else if col.SQLType.IsBlob() { - var bytes []byte - var err error - bytes, err = json.DefaultJSONHandler.Marshal(fieldValue.Interface()) - if err != nil { - return nil, false, err - } - return bytes, true, nil - } - } else { - table, err := statement.tagParser.ParseWithCache(fieldValue) - if err != nil { - return fieldValue.Interface(), true, nil - } - - if len(table.PrimaryKeys) == 1 { - pkField := reflect.Indirect(fieldValue).FieldByName(table.PKColumns()[0].FieldName) - // fix non-int pk issues - // if pkField.Int() != 0 { - if pkField.IsValid() && !utils.IsZero(pkField.Interface()) { - return pkField.Interface(), true, nil - } - return nil, false, nil - } - return nil, false, fmt.Errorf("not supported %v as %v", fieldValue.Interface(), table.PrimaryKeys) - } - } - case reflect.Array: - return nil, false, nil - case reflect.Slice, reflect.Map: - if fieldValue == reflect.Zero(fieldType) { - return nil, false, nil - } - if fieldValue.IsNil() || !fieldValue.IsValid() || fieldValue.Len() == 0 { - return nil, false, nil - } - - if col.SQLType.IsText() { - bytes, err := json.DefaultJSONHandler.Marshal(fieldValue.Interface()) - if err != nil { - return nil, false, err - } - return string(bytes), true, nil - } else if col.SQLType.IsBlob() { - var bytes []byte - var err error - if (fieldType.Kind() == reflect.Array || fieldType.Kind() == reflect.Slice) && - fieldType.Elem().Kind() == reflect.Uint8 { - if fieldValue.Len() > 0 { - return fieldValue.Bytes(), true, nil - } - return nil, false, nil - } - bytes, err = json.DefaultJSONHandler.Marshal(fieldValue.Interface()) - if err != nil { - return nil, false, err - } - return bytes, true, nil - } - return nil, false, nil - } - return fieldValue.Interface(), true, nil -} - -func (statement *Statement) buildConds2(table *schemas.Table, bean interface{}, - includeVersion bool, includeUpdated bool, includeNil bool, - includeAutoIncr bool, allUseBool bool, useAllCols bool, unscoped bool, - mustColumnMap map[string]bool, tableName, aliasName string, addedTableName bool, -) (builder.Cond, error) { - var conds []builder.Cond - for _, col := range table.Columns() { - if !includeVersion && col.IsVersion { - continue - } - if !includeUpdated && col.IsUpdated { - continue - } - if !includeAutoIncr && col.IsAutoIncrement { - continue - } - - if col.IsJSON { - continue - } - - var colName string - if addedTableName { - nm := tableName - if len(aliasName) > 0 { - nm = aliasName - } - colName = statement.quote(nm) + "." + statement.quote(col.Name) - } else { - colName = statement.quote(col.Name) - } - - fieldValuePtr, err := col.ValueOf(bean) - if err != nil { - continue - } else if fieldValuePtr == nil { - continue - } - - if col.IsDeleted && !unscoped { // tag "deleted" is enabled - conds = append(conds, statement.CondDeleted(col)) - } - - fieldValue := *fieldValuePtr - if fieldValue.Interface() == nil { - continue - } - - if statement.dialect.URI().DBType == schemas.MSSQL && (col.SQLType.Name == schemas.Text || - col.SQLType.IsBlob() || col.SQLType.Name == schemas.TimeStampz) { - if utils.IsValueZero(fieldValue) { - continue - } - - return nil, fmt.Errorf("column %s is a TEXT type with data %#v which cannot be as compare condition", col.Name, fieldValue.Interface()) - } - - requiredField := useAllCols - if b, ok := getFlagForColumn(mustColumnMap, col); ok { - if b { - requiredField = true - } else { - continue - } - } - - fieldType := reflect.TypeOf(fieldValue.Interface()) - if fieldType.Kind() == reflect.Ptr { - if fieldValue.IsNil() { - if includeNil { - conds = append(conds, builder.Eq{colName: nil}) - } - continue - } else if !fieldValue.IsValid() { - continue - } else { - // dereference ptr type to instance type - fieldValue = fieldValue.Elem() - fieldType = reflect.TypeOf(fieldValue.Interface()) - requiredField = true - } - } - - val, ok, err := statement.asDBCond(fieldValue, fieldType, col, allUseBool, requiredField) - if err != nil { - return nil, err - } - if !ok { - continue - } - - conds = append(conds, builder.Eq{colName: val}) - } - - return builder.And(conds...), nil -} - -// BuildConds builds condition -func (statement *Statement) BuildConds(table *schemas.Table, bean interface{}, includeVersion bool, includeUpdated bool, includeNil bool, includeAutoIncr bool, addedTableName bool) (builder.Cond, error) { - return statement.buildConds2(table, bean, includeVersion, includeUpdated, includeNil, includeAutoIncr, statement.allUseBool, statement.useAllCols, - statement.unscoped, statement.MustColumnMap, statement.TableName(), statement.TableAlias, addedTableName) -} - -// MergeConds merge conditions from bean and id -func (statement *Statement) MergeConds(bean interface{}) error { - if !statement.NoAutoCondition && statement.RefTable != nil { - addedTableName := (len(statement.JoinStr) > 0) - autoCond, err := statement.BuildConds(statement.RefTable, bean, true, true, false, true, addedTableName) - if err != nil { - return err - } - statement.cond = statement.cond.And(autoCond) - } - - return statement.ProcessIDParam() -} - -func (statement *Statement) quoteColumnStr(columnStr string) string { - columns := strings.Split(columnStr, ",") - return statement.dialect.Quoter().Join(columns, ",") -} - -// ConvertSQLOrArgs converts sql or args -func (statement *Statement) ConvertSQLOrArgs(sqlOrArgs ...interface{}) (string, []interface{}, error) { - sql, args, err := statement.convertSQLOrArgs(sqlOrArgs...) - if err != nil { - return "", nil, err - } - return statement.ReplaceQuote(sql), args, nil -} - -func (statement *Statement) convertSQLOrArgs(sqlOrArgs ...interface{}) (string, []interface{}, error) { - switch sqlOrArgs[0].(type) { - case string: - if len(sqlOrArgs) > 1 { - newArgs := make([]interface{}, 0, len(sqlOrArgs)-1) - for _, arg := range sqlOrArgs[1:] { - if v, ok := arg.(time.Time); ok { - newArgs = append(newArgs, v.In(statement.defaultTimeZone).Format("2006-01-02 15:04:05")) - } else if v, ok := arg.(*time.Time); ok && v != nil { - newArgs = append(newArgs, v.In(statement.defaultTimeZone).Format("2006-01-02 15:04:05")) - } else { - newArgs = append(newArgs, arg) - } - } - return sqlOrArgs[0].(string), newArgs, nil - } - return sqlOrArgs[0].(string), sqlOrArgs[1:], nil - case *builder.Builder: - return sqlOrArgs[0].(*builder.Builder).ToSQL() - case builder.Builder: - bd := sqlOrArgs[0].(builder.Builder) - return bd.ToSQL() - } - - return "", nil, ErrUnSupportedType -} - -func (statement *Statement) joinColumns(cols []*schemas.Column, includeTableName bool) string { - colnames := make([]string, len(cols)) - for i, col := range cols { - if includeTableName { - colnames[i] = statement.quote(statement.TableName()) + - "." + statement.quote(col.Name) - } else { - colnames[i] = statement.quote(col.Name) - } - } - return strings.Join(colnames, ", ") -} - -// CondDeleted returns the conditions whether a record is soft deleted. -func (statement *Statement) CondDeleted(col *schemas.Column) builder.Cond { - colName := statement.quote(col.Name) - if statement.JoinStr != "" { - var prefix string - if statement.TableAlias != "" { - prefix = statement.TableAlias - } else { - prefix = statement.TableName() - } - colName = statement.quote(prefix) + "." + statement.quote(col.Name) - } - cond := builder.NewCond() - if col.SQLType.IsNumeric() { - cond = builder.Eq{colName: 0} - } else { - // FIXME: mssql: The conversion of a nvarchar data type to a datetime data type resulted in an out-of-range value. - if statement.dialect.URI().DBType != schemas.MSSQL { - cond = builder.Eq{colName: utils.ZeroTime1} - } - } - - if col.Nullable { - cond = cond.Or(builder.IsNull{colName}) - } - - return cond -} diff --git a/vendor/xorm.io/xorm/internal/statements/statement_args.go b/vendor/xorm.io/xorm/internal/statements/statement_args.go deleted file mode 100644 index 727d5977..00000000 --- a/vendor/xorm.io/xorm/internal/statements/statement_args.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2019 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package statements - -import ( - "xorm.io/builder" - "xorm.io/xorm/schemas" -) - -// WriteArg writes an arg -func (statement *Statement) WriteArg(w *builder.BytesWriter, arg interface{}) error { - switch argv := arg.(type) { - case *builder.Builder: - if _, err := w.WriteString("("); err != nil { - return err - } - if err := argv.WriteTo(w); err != nil { - return err - } - if _, err := w.WriteString(")"); err != nil { - return err - } - default: - if err := w.WriteByte('?'); err != nil { - return err - } - if v, ok := arg.(bool); ok && statement.dialect.URI().DBType == schemas.MSSQL { - if v { - w.Append(1) - } else { - w.Append(0) - } - } else { - w.Append(arg) - } - } - return nil -} - -// WriteArgs writes args -func (statement *Statement) WriteArgs(w *builder.BytesWriter, args []interface{}) error { - for i, arg := range args { - if err := statement.WriteArg(w, arg); err != nil { - return err - } - - if i+1 != len(args) { - if _, err := w.WriteString(","); err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/xorm.io/xorm/internal/statements/table_name.go b/vendor/xorm.io/xorm/internal/statements/table_name.go deleted file mode 100644 index 8072a99d..00000000 --- a/vendor/xorm.io/xorm/internal/statements/table_name.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2022 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package statements - -import ( - "fmt" - "strings" - - "xorm.io/builder" - "xorm.io/xorm/schemas" -) - -// TableName return current tableName -func (statement *Statement) TableName() string { - if statement.AltTableName != "" { - return statement.AltTableName - } - - return statement.tableName -} - -// Alias set the table alias -func (statement *Statement) Alias(alias string) *Statement { - statement.TableAlias = alias - return statement -} - -func (statement *Statement) writeAlias(w builder.Writer) error { - if statement.TableAlias != "" { - if statement.dialect.URI().DBType == schemas.ORACLE { - if _, err := fmt.Fprint(w, " ", statement.quote(statement.TableAlias)); err != nil { - return err - } - } else { - if _, err := fmt.Fprint(w, " AS ", statement.quote(statement.TableAlias)); err != nil { - return err - } - } - } - return nil -} - -func (statement *Statement) writeTableName(w builder.Writer) error { - if statement.dialect.URI().DBType == schemas.MSSQL && strings.Contains(statement.TableName(), "..") { - if _, err := fmt.Fprint(w, statement.TableName()); err != nil { - return err - } - } else { - if _, err := fmt.Fprint(w, statement.quote(statement.TableName())); err != nil { - return err - } - } - return nil -} diff --git a/vendor/xorm.io/xorm/internal/statements/update.go b/vendor/xorm.io/xorm/internal/statements/update.go deleted file mode 100644 index 40159e0c..00000000 --- a/vendor/xorm.io/xorm/internal/statements/update.go +++ /dev/null @@ -1,308 +0,0 @@ -// Copyright 2017 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package statements - -import ( - "database/sql/driver" - "errors" - "fmt" - "reflect" - "time" - - "xorm.io/xorm/convert" - "xorm.io/xorm/dialects" - "xorm.io/xorm/internal/json" - "xorm.io/xorm/internal/utils" - "xorm.io/xorm/schemas" -) - -func (statement *Statement) ifAddColUpdate(col *schemas.Column, includeVersion, includeUpdated, includeNil, - includeAutoIncr, update bool) (bool, error) { - columnMap := statement.ColumnMap - omitColumnMap := statement.OmitColumnMap - unscoped := statement.unscoped - - if !includeVersion && col.IsVersion { - return false, nil - } - if col.IsCreated && !columnMap.Contain(col.Name) { - return false, nil - } - if !includeUpdated && col.IsUpdated { - return false, nil - } - if !includeAutoIncr && col.IsAutoIncrement { - return false, nil - } - if col.IsDeleted && !unscoped { - return false, nil - } - if omitColumnMap.Contain(col.Name) { - return false, nil - } - if len(columnMap) > 0 && !columnMap.Contain(col.Name) { - return false, nil - } - - if col.MapType == schemas.ONLYFROMDB { - return false, nil - } - - if statement.IncrColumns.IsColExist(col.Name) { - return false, nil - } else if statement.DecrColumns.IsColExist(col.Name) { - return false, nil - } else if statement.ExprColumns.IsColExist(col.Name) { - return false, nil - } - - return true, nil -} - -// BuildUpdates auto generating update columnes and values according a struct -func (statement *Statement) BuildUpdates(tableValue reflect.Value, - includeVersion, includeUpdated, includeNil, - includeAutoIncr, update bool) ([]string, []interface{}, error) { - table := statement.RefTable - allUseBool := statement.allUseBool - useAllCols := statement.useAllCols - mustColumnMap := statement.MustColumnMap - nullableMap := statement.NullableMap - - var colNames = make([]string, 0) - var args = make([]interface{}, 0) - - for _, col := range table.Columns() { - ok, err := statement.ifAddColUpdate(col, includeVersion, includeUpdated, includeNil, - includeAutoIncr, update) - if err != nil { - return nil, nil, err - } - if !ok { - continue - } - - fieldValuePtr, err := col.ValueOfV(&tableValue) - if err != nil { - return nil, nil, err - } - if fieldValuePtr == nil { - continue - } - - fieldValue := *fieldValuePtr - fieldType := reflect.TypeOf(fieldValue.Interface()) - if fieldType == nil { - continue - } - - requiredField := useAllCols - includeNil := useAllCols - - if b, ok := getFlagForColumn(mustColumnMap, col); ok { - if b { - requiredField = true - } else { - continue - } - } - - // !evalphobia! set fieldValue as nil when column is nullable and zero-value - if b, ok := getFlagForColumn(nullableMap, col); ok { - if b && col.Nullable && utils.IsZero(fieldValue.Interface()) { - var nilValue *int - fieldValue = reflect.ValueOf(nilValue) - fieldType = reflect.TypeOf(fieldValue.Interface()) - includeNil = true - } - } - - var val interface{} - - if fieldValue.CanAddr() { - if structConvert, ok := fieldValue.Addr().Interface().(convert.Conversion); ok { - data, err := structConvert.ToDB() - if err != nil { - return nil, nil, err - } - if data != nil { - val = data - if !col.SQLType.IsBlob() { - val = string(data) - } - } - goto APPEND - } - } - - if structConvert, ok := fieldValue.Interface().(convert.Conversion); ok && !fieldValue.IsNil() { - data, err := structConvert.ToDB() - if err != nil { - return nil, nil, err - } - if data != nil { - val = data - if !col.SQLType.IsBlob() { - val = string(data) - } - } - goto APPEND - } - - if fieldType.Kind() == reflect.Ptr { - if fieldValue.IsNil() { - if includeNil { - args = append(args, nil) - colNames = append(colNames, fmt.Sprintf("%v=?", statement.quote(col.Name))) - } - continue - } else if !fieldValue.IsValid() { - continue - } else { - // dereference ptr type to instance type - fieldValue = fieldValue.Elem() - fieldType = reflect.TypeOf(fieldValue.Interface()) - requiredField = true - } - } - - switch fieldType.Kind() { - case reflect.Bool: - if allUseBool || requiredField { - val = fieldValue.Interface() - } else { - // if a bool in a struct, it will not be as a condition because it default is false, - // please use Where() instead - continue - } - case reflect.String: - if !requiredField && fieldValue.String() == "" { - continue - } - // for MyString, should convert to string or panic - if fieldType.String() != reflect.String.String() { - val = fieldValue.String() - } else { - val = fieldValue.Interface() - } - case reflect.Int8, reflect.Int16, reflect.Int, reflect.Int32, reflect.Int64: - if !requiredField && fieldValue.Int() == 0 { - continue - } - val = fieldValue.Interface() - case reflect.Float32, reflect.Float64: - if !requiredField && fieldValue.Float() == 0.0 { - continue - } - val = fieldValue.Interface() - case reflect.Uint8, reflect.Uint16, reflect.Uint, reflect.Uint32, reflect.Uint64: - if !requiredField && fieldValue.Uint() == 0 { - continue - } - val = fieldValue.Interface() - case reflect.Struct: - if fieldType.ConvertibleTo(schemas.TimeType) { - t := fieldValue.Convert(schemas.TimeType).Interface().(time.Time) - if !requiredField && (t.IsZero() || !fieldValue.IsValid()) { - continue - } - val, err = dialects.FormatColumnTime(statement.dialect, statement.defaultTimeZone, col, t) - if err != nil { - return nil, nil, err - } - } else if nulType, ok := fieldValue.Interface().(driver.Valuer); ok { - val, _ = nulType.Value() - if val == nil && !requiredField { - continue - } - } else { - if !col.IsJSON { - table, err := statement.tagParser.ParseWithCache(fieldValue) - if err != nil { - val = fieldValue.Interface() - } else { - if len(table.PrimaryKeys) == 1 { - pkField := reflect.Indirect(fieldValue).FieldByName(table.PKColumns()[0].FieldName) - // fix non-int pk issues - if pkField.IsValid() && (!requiredField && !utils.IsZero(pkField.Interface())) { - val = pkField.Interface() - } else { - continue - } - } else { - return nil, nil, errors.New("Not supported multiple primary keys") - } - } - } else { - // Blank struct could not be as update data - if requiredField || !utils.IsStructZero(fieldValue) { - bytes, err := json.DefaultJSONHandler.Marshal(fieldValue.Interface()) - if err != nil { - return nil, nil, fmt.Errorf("mashal %v failed", fieldValue.Interface()) - } - if col.SQLType.IsText() { - val = string(bytes) - } else if col.SQLType.IsBlob() { - val = bytes - } - } else { - continue - } - } - } - case reflect.Array, reflect.Slice, reflect.Map: - if !requiredField { - if fieldValue == reflect.Zero(fieldType) { - continue - } - if fieldType.Kind() == reflect.Array { - if utils.IsArrayZero(fieldValue) { - continue - } - } else if fieldValue.IsNil() || !fieldValue.IsValid() || fieldValue.Len() == 0 { - continue - } - } - - if col.SQLType.IsText() { - bytes, err := json.DefaultJSONHandler.Marshal(fieldValue.Interface()) - if err != nil { - return nil, nil, err - } - val = string(bytes) - } else if col.SQLType.IsBlob() { - var bytes []byte - var err error - if fieldType.Kind() == reflect.Slice && - fieldType.Elem().Kind() == reflect.Uint8 { - if fieldValue.Len() > 0 { - val = fieldValue.Bytes() - } else { - continue - } - } else if fieldType.Kind() == reflect.Array && - fieldType.Elem().Kind() == reflect.Uint8 { - val = fieldValue.Slice(0, 0).Interface() - } else { - bytes, err = json.DefaultJSONHandler.Marshal(fieldValue.Interface()) - if err != nil { - return nil, nil, err - } - val = bytes - } - } else { - continue - } - default: - val = fieldValue.Interface() - } - - APPEND: - args = append(args, val) - colNames = append(colNames, fmt.Sprintf("%v = ?", statement.quote(col.Name))) - } - - return colNames, args, nil -} diff --git a/vendor/xorm.io/xorm/internal/statements/values.go b/vendor/xorm.io/xorm/internal/statements/values.go deleted file mode 100644 index 4c1360ed..00000000 --- a/vendor/xorm.io/xorm/internal/statements/values.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright 2017 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package statements - -import ( - "database/sql" - "database/sql/driver" - "fmt" - "math/big" - "reflect" - "time" - - "xorm.io/xorm/convert" - "xorm.io/xorm/dialects" - "xorm.io/xorm/internal/json" - "xorm.io/xorm/schemas" -) - -var ( - nullFloatType = reflect.TypeOf(sql.NullFloat64{}) - bigFloatType = reflect.TypeOf(big.Float{}) -) - -// Value2Interface convert a field value of a struct to interface for putting into database -func (statement *Statement) Value2Interface(col *schemas.Column, fieldValue reflect.Value) (interface{}, error) { - if fieldValue.CanAddr() { - if fieldConvert, ok := fieldValue.Addr().Interface().(convert.Conversion); ok { - data, err := fieldConvert.ToDB() - if err != nil { - return nil, err - } - if data == nil { - if col.Nullable { - return nil, nil - } - data = []byte{} - } - if col.SQLType.IsBlob() { - return data, nil - } - return string(data), nil - } - } - - isNil := fieldValue.Kind() == reflect.Ptr && fieldValue.IsNil() - if !isNil { - if fieldConvert, ok := fieldValue.Interface().(convert.Conversion); ok { - data, err := fieldConvert.ToDB() - if err != nil { - return nil, err - } - if data == nil { - if col.Nullable { - return nil, nil - } - data = []byte{} - } - if col.SQLType.IsBlob() { - return data, nil - } - return string(data), nil - } - } - - fieldType := fieldValue.Type() - k := fieldType.Kind() - if k == reflect.Ptr { - if fieldValue.IsNil() { - return nil, nil - } else if !fieldValue.IsValid() { - return nil, nil - } else { - // !nashtsai! deference pointer type to instance type - fieldValue = fieldValue.Elem() - fieldType = fieldValue.Type() - k = fieldType.Kind() - } - } - - switch k { - case reflect.Bool: - return fieldValue.Bool(), nil - case reflect.String: - return fieldValue.String(), nil - case reflect.Struct: - if fieldType.ConvertibleTo(schemas.TimeType) { - t := fieldValue.Convert(schemas.TimeType).Interface().(time.Time) - tf, err := dialects.FormatColumnTime(statement.dialect, statement.defaultTimeZone, col, t) - return tf, err - } else if fieldType.ConvertibleTo(nullFloatType) { - t := fieldValue.Convert(nullFloatType).Interface().(sql.NullFloat64) - if !t.Valid { - return nil, nil - } - return t.Float64, nil - } else if fieldType.ConvertibleTo(bigFloatType) { - t := fieldValue.Convert(bigFloatType).Interface().(big.Float) - return t.String(), nil - } - - if !col.IsJSON { - // !! 增加支持driver.Valuer接口的结构,如sql.NullString - if v, ok := fieldValue.Interface().(driver.Valuer); ok { - return v.Value() - } - - fieldTable, err := statement.tagParser.ParseWithCache(fieldValue) - if err != nil { - return nil, err - } - if len(fieldTable.PrimaryKeys) == 1 { - pkField := reflect.Indirect(fieldValue).FieldByName(fieldTable.PKColumns()[0].FieldName) - return pkField.Interface(), nil - } - return nil, fmt.Errorf("no primary key for col %v", col.Name) - } - - if col.SQLType.IsText() { - bytes, err := json.DefaultJSONHandler.Marshal(fieldValue.Interface()) - if err != nil { - return nil, err - } - return string(bytes), nil - } else if col.SQLType.IsBlob() { - bytes, err := json.DefaultJSONHandler.Marshal(fieldValue.Interface()) - if err != nil { - return nil, err - } - return bytes, nil - } - return nil, fmt.Errorf("Unsupported type %v", fieldValue.Type()) - case reflect.Complex64, reflect.Complex128: - bytes, err := json.DefaultJSONHandler.Marshal(fieldValue.Interface()) - if err != nil { - return nil, err - } - return string(bytes), nil - case reflect.Array, reflect.Slice, reflect.Map: - if !fieldValue.IsValid() { - return fieldValue.Interface(), nil - } - - if col.SQLType.IsText() { - bytes, err := json.DefaultJSONHandler.Marshal(fieldValue.Interface()) - if err != nil { - return nil, err - } - return string(bytes), nil - } else if col.SQLType.IsBlob() { - var bytes []byte - var err error - if (k == reflect.Slice) && - (fieldValue.Type().Elem().Kind() == reflect.Uint8) { - bytes = fieldValue.Bytes() - } else { - bytes, err = json.DefaultJSONHandler.Marshal(fieldValue.Interface()) - if err != nil { - return nil, err - } - } - return bytes, nil - } - return nil, ErrUnSupportedType - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return fieldValue.Uint(), nil - default: - return fieldValue.Interface(), nil - } -} diff --git a/vendor/xorm.io/xorm/internal/utils/builder.go b/vendor/xorm.io/xorm/internal/utils/builder.go deleted file mode 100644 index bc97526f..00000000 --- a/vendor/xorm.io/xorm/internal/utils/builder.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2022 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package utils - -import ( - "fmt" - - "xorm.io/builder" -) - -type BuildReader interface { - String() string - Args() []interface{} -} - -// WriteBuilder writes writers to one -func WriteBuilder(w *builder.BytesWriter, inputs ...BuildReader) error { - for _, input := range inputs { - if _, err := fmt.Fprint(w, input.String()); err != nil { - return err - } - w.Append(input.Args()...) - } - return nil -} diff --git a/vendor/xorm.io/xorm/internal/utils/name.go b/vendor/xorm.io/xorm/internal/utils/name.go deleted file mode 100644 index aeef683d..00000000 --- a/vendor/xorm.io/xorm/internal/utils/name.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2020 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package utils - -import ( - "fmt" - "strings" -) - -// IndexName returns index name -func IndexName(tableName, idxName string) string { - return fmt.Sprintf("IDX_%v_%v", tableName, idxName) -} - -// SeqName returns sequence name for some table -func SeqName(tableName string) string { - return "SEQ_" + strings.ToUpper(tableName) -} diff --git a/vendor/xorm.io/xorm/internal/utils/new.go b/vendor/xorm.io/xorm/internal/utils/new.go deleted file mode 100644 index e3b4eae8..00000000 --- a/vendor/xorm.io/xorm/internal/utils/new.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2021 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package utils - -import "reflect" - -// New creates a value according type -func New(tp reflect.Type, length, cap int) reflect.Value { - switch tp.Kind() { - case reflect.Slice: - slice := reflect.MakeSlice(tp, length, cap) - x := reflect.New(slice.Type()) - x.Elem().Set(slice) - return x - case reflect.Map: - mp := reflect.MakeMapWithSize(tp, cap) - x := reflect.New(mp.Type()) - x.Elem().Set(mp) - return x - default: - return reflect.New(tp) - } -} diff --git a/vendor/xorm.io/xorm/internal/utils/reflect.go b/vendor/xorm.io/xorm/internal/utils/reflect.go deleted file mode 100644 index 7973d4d3..00000000 --- a/vendor/xorm.io/xorm/internal/utils/reflect.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2020 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package utils - -import ( - "reflect" -) - -// ReflectValue returns value of a bean -func ReflectValue(bean interface{}) reflect.Value { - return reflect.Indirect(reflect.ValueOf(bean)) -} diff --git a/vendor/xorm.io/xorm/internal/utils/slice.go b/vendor/xorm.io/xorm/internal/utils/slice.go deleted file mode 100644 index 82289b1a..00000000 --- a/vendor/xorm.io/xorm/internal/utils/slice.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2020 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package utils - -import "sort" - -// SliceEq return true if two slice have the same elements even if different sort. -func SliceEq(left, right []string) bool { - if len(left) != len(right) { - return false - } - sort.Strings(left) - sort.Strings(right) - for i := 0; i < len(left); i++ { - if left[i] != right[i] { - return false - } - } - return true -} - -// IndexSlice search c in slice s and return the index, return -1 if s don't contain c -func IndexSlice(s []string, c string) int { - for i, ss := range s { - if c == ss { - return i - } - } - return -1 -} diff --git a/vendor/xorm.io/xorm/internal/utils/sql.go b/vendor/xorm.io/xorm/internal/utils/sql.go deleted file mode 100644 index 369ca2b8..00000000 --- a/vendor/xorm.io/xorm/internal/utils/sql.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2020 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package utils - -import ( - "strings" -) - -// IsSubQuery returns true if it contains a sub query -func IsSubQuery(tbName string) bool { - const selStr = "select" - if len(tbName) <= len(selStr)+1 { - return false - } - - return strings.EqualFold(tbName[:len(selStr)], selStr) || - strings.EqualFold(tbName[:len(selStr)+1], "("+selStr) -} diff --git a/vendor/xorm.io/xorm/internal/utils/strings.go b/vendor/xorm.io/xorm/internal/utils/strings.go deleted file mode 100644 index 159e2876..00000000 --- a/vendor/xorm.io/xorm/internal/utils/strings.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2017 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package utils - -import ( - "strings" -) - -// IndexNoCase index a string in a string with no care of capitalize -func IndexNoCase(s, sep string) int { - return strings.Index(strings.ToLower(s), strings.ToLower(sep)) -} - -// SplitNoCase split a string by a separator with no care of capitalize -func SplitNoCase(s, sep string) []string { - idx := IndexNoCase(s, sep) - if idx < 0 { - return []string{s} - } - return strings.Split(s, s[idx:idx+len(sep)]) -} - -// SplitNNoCase split n by a separator with no care of capitalize -func SplitNNoCase(s, sep string, n int) []string { - idx := IndexNoCase(s, sep) - if idx < 0 { - return []string{s} - } - return strings.SplitN(s, s[idx:idx+len(sep)], n) -} diff --git a/vendor/xorm.io/xorm/internal/utils/zero.go b/vendor/xorm.io/xorm/internal/utils/zero.go deleted file mode 100644 index 007e3c33..00000000 --- a/vendor/xorm.io/xorm/internal/utils/zero.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2020 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package utils - -import ( - "reflect" - "time" -) - -// Zeroable represents an interface which could know if it's a zero value -type Zeroable interface { - IsZero() bool -} - -var nilTime *time.Time - -// IsZero returns false if k is nil or has a zero value -func IsZero(k interface{}) bool { - if k == nil { - return true - } - - switch t := k.(type) { - case int: - return t == 0 - case int8: - return t == 0 - case int16: - return t == 0 - case int32: - return t == 0 - case int64: - return t == 0 - case uint: - return t == 0 - case uint8: - return t == 0 - case uint16: - return t == 0 - case uint32: - return t == 0 - case uint64: - return t == 0 - case float32: - return t == 0 - case float64: - return t == 0 - case bool: - return !t - case string: - return t == "" - case *time.Time: - return t == nilTime || IsTimeZero(*t) - case time.Time: - return IsTimeZero(t) - case Zeroable: - return k.(Zeroable) == nil || k.(Zeroable).IsZero() - case reflect.Value: // for go version less than 1.13 because reflect.Value has no method IsZero - return IsValueZero(k.(reflect.Value)) - } - - return IsValueZero(reflect.ValueOf(k)) -} - -var zeroType = reflect.TypeOf((*Zeroable)(nil)).Elem() - -// IsValueZero returns true if the reflect Value is a zero -func IsValueZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Slice: - return v.IsNil() - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int, reflect.Int64: - return v.Int() == 0 - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint, reflect.Uint64: - return v.Uint() == 0 - case reflect.String: - return v.Len() == 0 - case reflect.Ptr: - if v.IsNil() { - return true - } - return IsValueZero(v.Elem()) - case reflect.Struct: - return IsStructZero(v) - case reflect.Array: - return IsArrayZero(v) - } - return false -} - -// IsStructZero returns true if the Value is a struct and all fields is zero -func IsStructZero(v reflect.Value) bool { - if !v.IsValid() || v.NumField() == 0 { - return true - } - - if v.Type().Implements(zeroType) { - f := v.MethodByName("IsZero") - if f.IsValid() { - res := f.Call(nil) - return len(res) == 1 && res[0].Bool() - } - } - - for i := 0; i < v.NumField(); i++ { - field := v.Field(i) - switch field.Kind() { - case reflect.Ptr: - field = field.Elem() - fallthrough - case reflect.Struct: - if !IsStructZero(field) { - return false - } - default: - if field.CanInterface() && !IsZero(field.Interface()) { - return false - } - } - } - return true -} - -// IsArrayZero returns true is a slice of array is zero -func IsArrayZero(v reflect.Value) bool { - if !v.IsValid() || v.Len() == 0 { - return true - } - - for i := 0; i < v.Len(); i++ { - if !IsZero(v.Index(i).Interface()) { - return false - } - } - - return true -} - -// represents all zero times -const ( - ZeroTime0 = "0000-00-00 00:00:00" - ZeroTime1 = "0001-01-01 00:00:00" -) - -// IsTimeZero return true if a time is zero -func IsTimeZero(t time.Time) bool { - return t.IsZero() || t.Format("2006-01-02 15:04:05") == ZeroTime0 || - t.Format("2006-01-02 15:04:05") == ZeroTime1 -} diff --git a/vendor/xorm.io/xorm/log/logger.go b/vendor/xorm.io/xorm/log/logger.go deleted file mode 100644 index b8798c3f..00000000 --- a/vendor/xorm.io/xorm/log/logger.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2015 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package log - -import ( - "fmt" - "io" - "log" -) - -// LogLevel defines a log level -type LogLevel int - -// enumerate all LogLevels -const ( - // !nashtsai! following level also match syslog.Priority value - LOG_DEBUG LogLevel = iota - LOG_INFO - LOG_WARNING - LOG_ERR - LOG_OFF - LOG_UNKNOWN -) - -// default log options -const ( - DEFAULT_LOG_PREFIX = "[xorm]" - DEFAULT_LOG_FLAG = log.Ldate | log.Lmicroseconds - DEFAULT_LOG_LEVEL = LOG_DEBUG -) - -// Logger is a logger interface -type Logger interface { - Debug(v ...interface{}) - Debugf(format string, v ...interface{}) - Error(v ...interface{}) - Errorf(format string, v ...interface{}) - Info(v ...interface{}) - Infof(format string, v ...interface{}) - Warn(v ...interface{}) - Warnf(format string, v ...interface{}) - - Level() LogLevel - SetLevel(l LogLevel) - - ShowSQL(show ...bool) - IsShowSQL() bool -} - -var _ Logger = DiscardLogger{} - -// DiscardLogger don't log implementation for ILogger -type DiscardLogger struct{} - -// Debug empty implementation -func (DiscardLogger) Debug(v ...interface{}) {} - -// Debugf empty implementation -func (DiscardLogger) Debugf(format string, v ...interface{}) {} - -// Error empty implementation -func (DiscardLogger) Error(v ...interface{}) {} - -// Errorf empty implementation -func (DiscardLogger) Errorf(format string, v ...interface{}) {} - -// Info empty implementation -func (DiscardLogger) Info(v ...interface{}) {} - -// Infof empty implementation -func (DiscardLogger) Infof(format string, v ...interface{}) {} - -// Warn empty implementation -func (DiscardLogger) Warn(v ...interface{}) {} - -// Warnf empty implementation -func (DiscardLogger) Warnf(format string, v ...interface{}) {} - -// Level empty implementation -func (DiscardLogger) Level() LogLevel { - return LOG_UNKNOWN -} - -// SetLevel empty implementation -func (DiscardLogger) SetLevel(l LogLevel) {} - -// ShowSQL empty implementation -func (DiscardLogger) ShowSQL(show ...bool) {} - -// IsShowSQL empty implementation -func (DiscardLogger) IsShowSQL() bool { - return false -} - -// SimpleLogger is the default implment of ILogger -type SimpleLogger struct { - DEBUG *log.Logger - ERR *log.Logger - INFO *log.Logger - WARN *log.Logger - level LogLevel - showSQL bool -} - -var _ Logger = &SimpleLogger{} - -// NewSimpleLogger use a special io.Writer as logger output -func NewSimpleLogger(out io.Writer) *SimpleLogger { - return NewSimpleLogger2(out, DEFAULT_LOG_PREFIX, DEFAULT_LOG_FLAG) -} - -// NewSimpleLogger2 let you customrize your logger prefix and flag -func NewSimpleLogger2(out io.Writer, prefix string, flag int) *SimpleLogger { - return NewSimpleLogger3(out, prefix, flag, DEFAULT_LOG_LEVEL) -} - -// NewSimpleLogger3 let you customrize your logger prefix and flag and logLevel -func NewSimpleLogger3(out io.Writer, prefix string, flag int, l LogLevel) *SimpleLogger { - return &SimpleLogger{ - DEBUG: log.New(out, fmt.Sprintf("%s [debug] ", prefix), flag), - ERR: log.New(out, fmt.Sprintf("%s [error] ", prefix), flag), - INFO: log.New(out, fmt.Sprintf("%s [info] ", prefix), flag), - WARN: log.New(out, fmt.Sprintf("%s [warn] ", prefix), flag), - level: l, - } -} - -// Error implement ILogger -func (s *SimpleLogger) Error(v ...interface{}) { - if s.level <= LOG_ERR { - _ = s.ERR.Output(2, fmt.Sprintln(v...)) - } -} - -// Errorf implement ILogger -func (s *SimpleLogger) Errorf(format string, v ...interface{}) { - if s.level <= LOG_ERR { - _ = s.ERR.Output(2, fmt.Sprintf(format, v...)) - } -} - -// Debug implement ILogger -func (s *SimpleLogger) Debug(v ...interface{}) { - if s.level <= LOG_DEBUG { - _ = s.DEBUG.Output(2, fmt.Sprintln(v...)) - } -} - -// Debugf implement ILogger -func (s *SimpleLogger) Debugf(format string, v ...interface{}) { - if s.level <= LOG_DEBUG { - _ = s.DEBUG.Output(2, fmt.Sprintf(format, v...)) - } -} - -// Info implement ILogger -func (s *SimpleLogger) Info(v ...interface{}) { - if s.level <= LOG_INFO { - _ = s.INFO.Output(2, fmt.Sprintln(v...)) - } -} - -// Infof implement ILogger -func (s *SimpleLogger) Infof(format string, v ...interface{}) { - if s.level <= LOG_INFO { - _ = s.INFO.Output(2, fmt.Sprintf(format, v...)) - } -} - -// Warn implement ILogger -func (s *SimpleLogger) Warn(v ...interface{}) { - if s.level <= LOG_WARNING { - _ = s.WARN.Output(2, fmt.Sprintln(v...)) - } -} - -// Warnf implement ILogger -func (s *SimpleLogger) Warnf(format string, v ...interface{}) { - if s.level <= LOG_WARNING { - _ = s.WARN.Output(2, fmt.Sprintf(format, v...)) - } -} - -// Level implement ILogger -func (s *SimpleLogger) Level() LogLevel { - return s.level -} - -// SetLevel implement ILogger -func (s *SimpleLogger) SetLevel(l LogLevel) { - s.level = l -} - -// ShowSQL implement ILogger -func (s *SimpleLogger) ShowSQL(show ...bool) { - if len(show) == 0 { - s.showSQL = true - return - } - s.showSQL = show[0] -} - -// IsShowSQL implement ILogger -func (s *SimpleLogger) IsShowSQL() bool { - return s.showSQL -} diff --git a/vendor/xorm.io/xorm/log/logger_context.go b/vendor/xorm.io/xorm/log/logger_context.go deleted file mode 100644 index 46802576..00000000 --- a/vendor/xorm.io/xorm/log/logger_context.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2020 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package log - -import ( - "fmt" - - "xorm.io/xorm/contexts" -) - -// LogContext represents a log context -type LogContext contexts.ContextHook - -// SQLLogger represents an interface to log SQL -type SQLLogger interface { - BeforeSQL(context LogContext) // only invoked when IsShowSQL is true - AfterSQL(context LogContext) // only invoked when IsShowSQL is true -} - -// ContextLogger represents a logger interface with context -type ContextLogger interface { - SQLLogger - - Debugf(format string, v ...interface{}) - Errorf(format string, v ...interface{}) - Infof(format string, v ...interface{}) - Warnf(format string, v ...interface{}) - - Level() LogLevel - SetLevel(l LogLevel) - - ShowSQL(show ...bool) - IsShowSQL() bool -} - -var ( - _ ContextLogger = &LoggerAdapter{} -) - -// enumerate all the context keys -var ( - SessionIDKey = "__xorm_session_id" - SessionKey = "__xorm_session_key" - SessionShowSQLKey = "__xorm_show_sql" -) - -// LoggerAdapter wraps a Logger interface as LoggerContext interface -type LoggerAdapter struct { - logger Logger -} - -// NewLoggerAdapter creates an adapter for old xorm logger interface -func NewLoggerAdapter(logger Logger) ContextLogger { - return &LoggerAdapter{ - logger: logger, - } -} - -// BeforeSQL implements ContextLogger -func (l *LoggerAdapter) BeforeSQL(ctx LogContext) {} - -// AfterSQL implements ContextLogger -func (l *LoggerAdapter) AfterSQL(ctx LogContext) { - var sessionPart string - v := ctx.Ctx.Value(SessionIDKey) - if key, ok := v.(string); ok { - sessionPart = fmt.Sprintf(" [%s]", key) - } - if ctx.ExecuteTime > 0 { - l.logger.Infof("[SQL]%s %s %v - %v", sessionPart, ctx.SQL, ctx.Args, ctx.ExecuteTime) - } else { - l.logger.Infof("[SQL]%s %s %v", sessionPart, ctx.SQL, ctx.Args) - } -} - -// Debugf implements ContextLogger -func (l *LoggerAdapter) Debugf(format string, v ...interface{}) { - l.logger.Debugf(format, v...) -} - -// Errorf implements ContextLogger -func (l *LoggerAdapter) Errorf(format string, v ...interface{}) { - l.logger.Errorf(format, v...) -} - -// Infof implements ContextLogger -func (l *LoggerAdapter) Infof(format string, v ...interface{}) { - l.logger.Infof(format, v...) -} - -// Warnf implements ContextLogger -func (l *LoggerAdapter) Warnf(format string, v ...interface{}) { - l.logger.Warnf(format, v...) -} - -// Level implements ContextLogger -func (l *LoggerAdapter) Level() LogLevel { - return l.logger.Level() -} - -// SetLevel implements ContextLogger -func (l *LoggerAdapter) SetLevel(lv LogLevel) { - l.logger.SetLevel(lv) -} - -// ShowSQL implements ContextLogger -func (l *LoggerAdapter) ShowSQL(show ...bool) { - l.logger.ShowSQL(show...) -} - -// IsShowSQL implements ContextLogger -func (l *LoggerAdapter) IsShowSQL() bool { - return l.logger.IsShowSQL() -} diff --git a/vendor/xorm.io/xorm/log/syslogger.go b/vendor/xorm.io/xorm/log/syslogger.go deleted file mode 100644 index 44272586..00000000 --- a/vendor/xorm.io/xorm/log/syslogger.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2015 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !windows && !nacl && !plan9 -// +build !windows,!nacl,!plan9 - -package log - -import ( - "fmt" - "log/syslog" -) - -var _ Logger = &SyslogLogger{} - -// SyslogLogger will be depricated -type SyslogLogger struct { - w *syslog.Writer - showSQL bool -} - -// NewSyslogLogger implements Logger -func NewSyslogLogger(w *syslog.Writer) *SyslogLogger { - return &SyslogLogger{w: w} -} - -// Debug log content as Debug -func (s *SyslogLogger) Debug(v ...interface{}) { - _ = s.w.Debug(fmt.Sprint(v...)) -} - -// Debugf log content as Debug and format -func (s *SyslogLogger) Debugf(format string, v ...interface{}) { - _ = s.w.Debug(fmt.Sprintf(format, v...)) -} - -// Error log content as Error -func (s *SyslogLogger) Error(v ...interface{}) { - _ = s.w.Err(fmt.Sprint(v...)) -} - -// Errorf log content as Errorf and format -func (s *SyslogLogger) Errorf(format string, v ...interface{}) { - _ = s.w.Err(fmt.Sprintf(format, v...)) -} - -// Info log content as Info -func (s *SyslogLogger) Info(v ...interface{}) { - _ = s.w.Info(fmt.Sprint(v...)) -} - -// Infof log content as Infof and format -func (s *SyslogLogger) Infof(format string, v ...interface{}) { - _ = s.w.Info(fmt.Sprintf(format, v...)) -} - -// Warn log content as Warn -func (s *SyslogLogger) Warn(v ...interface{}) { - _ = s.w.Warning(fmt.Sprint(v...)) -} - -// Warnf log content as Warnf and format -func (s *SyslogLogger) Warnf(format string, v ...interface{}) { - _ = s.w.Warning(fmt.Sprintf(format, v...)) -} - -// Level shows log level -func (s *SyslogLogger) Level() LogLevel { - return LOG_UNKNOWN -} - -// SetLevel always return error, as current log/syslog package doesn't allow to set priority level after syslog.Writer created -func (s *SyslogLogger) SetLevel(l LogLevel) {} - -// ShowSQL set if logging SQL -func (s *SyslogLogger) ShowSQL(show ...bool) { - if len(show) == 0 { - s.showSQL = true - return - } - s.showSQL = show[0] -} - -// IsShowSQL if logging SQL -func (s *SyslogLogger) IsShowSQL() bool { - return s.showSQL -} diff --git a/vendor/xorm.io/xorm/names/mapper.go b/vendor/xorm.io/xorm/names/mapper.go deleted file mode 100644 index 69f67171..00000000 --- a/vendor/xorm.io/xorm/names/mapper.go +++ /dev/null @@ -1,281 +0,0 @@ -// Copyright 2019 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package names - -import ( - "strings" - "sync" - "unsafe" -) - -// Mapper represents a name convertation between struct's fields name and table's column name -type Mapper interface { - Obj2Table(string) string - Table2Obj(string) string -} - -// CacheMapper represents a cache mapper -type CacheMapper struct { - oriMapper Mapper - obj2tableCache map[string]string - obj2tableMutex sync.RWMutex - table2objCache map[string]string - table2objMutex sync.RWMutex -} - -// NewCacheMapper creates a cache mapper -func NewCacheMapper(mapper Mapper) *CacheMapper { - return &CacheMapper{oriMapper: mapper, obj2tableCache: make(map[string]string), - table2objCache: make(map[string]string), - } -} - -// Obj2Table implements Mapper -func (m *CacheMapper) Obj2Table(o string) string { - m.obj2tableMutex.RLock() - t, ok := m.obj2tableCache[o] - m.obj2tableMutex.RUnlock() - if ok { - return t - } - - t = m.oriMapper.Obj2Table(o) - m.obj2tableMutex.Lock() - m.obj2tableCache[o] = t - m.obj2tableMutex.Unlock() - return t -} - -// Table2Obj implements Mapper -func (m *CacheMapper) Table2Obj(t string) string { - m.table2objMutex.RLock() - o, ok := m.table2objCache[t] - m.table2objMutex.RUnlock() - if ok { - return o - } - - o = m.oriMapper.Table2Obj(t) - m.table2objMutex.Lock() - m.table2objCache[t] = o - m.table2objMutex.Unlock() - return o -} - -// SameMapper implements Mapper and provides same name between struct and -// database table -type SameMapper struct { -} - -// Obj2Table implements Mapper -func (m SameMapper) Obj2Table(o string) string { - return o -} - -// Table2Obj implements Mapper -func (m SameMapper) Table2Obj(t string) string { - return t -} - -// SnakeMapper implements IMapper and provides name translation between -// struct and database table -type SnakeMapper struct { -} - -func b2s(b []byte) string { - return *(*string)(unsafe.Pointer(&b)) -} - -func snakeCasedName(name string) string { - newstr := make([]byte, 0, len(name)+1) - for i := 0; i < len(name); i++ { - c := name[i] - if isUpper := 'A' <= c && c <= 'Z'; isUpper { - if i > 0 { - newstr = append(newstr, '_') - } - c += 'a' - 'A' - } - newstr = append(newstr, c) - } - - return b2s(newstr) -} - -// Obj2Table implements Mapper -func (mapper SnakeMapper) Obj2Table(name string) string { - return snakeCasedName(name) -} - -func titleCasedName(name string) string { - newstr := make([]byte, 0, len(name)) - upNextChar := true - - name = strings.ToLower(name) - - for i := 0; i < len(name); i++ { - c := name[i] - switch { - case upNextChar: - upNextChar = false - if 'a' <= c && c <= 'z' { - c -= 'a' - 'A' - } - case c == '_': - upNextChar = true - continue - } - - newstr = append(newstr, c) - } - - return b2s(newstr) -} - -// Table2Obj implements Mapper -func (mapper SnakeMapper) Table2Obj(name string) string { - return titleCasedName(name) -} - -// GonicMapper implements IMapper. It will consider initialisms when mapping names. -// E.g. id -> ID, user -> User and to table names: UserID -> user_id, MyUID -> my_uid -type GonicMapper map[string]bool - -func isASCIIUpper(r rune) bool { - return 'A' <= r && r <= 'Z' -} - -func toASCIIUpper(r rune) rune { - if 'a' <= r && r <= 'z' { - r -= ('a' - 'A') - } - return r -} - -func gonicCasedName(name string) string { - newstr := make([]rune, 0, len(name)+3) - for idx, chr := range name { - if isASCIIUpper(chr) && idx > 0 { - if !isASCIIUpper(newstr[len(newstr)-1]) { - newstr = append(newstr, '_') - } - } - - if !isASCIIUpper(chr) && idx > 1 { - l := len(newstr) - if isASCIIUpper(newstr[l-1]) && isASCIIUpper(newstr[l-2]) { - newstr = append(newstr, newstr[l-1]) - newstr[l-1] = '_' - } - } - - newstr = append(newstr, chr) - } - return strings.ToLower(string(newstr)) -} - -// Obj2Table implements Mapper -func (mapper GonicMapper) Obj2Table(name string) string { - return gonicCasedName(name) -} - -// Table2Obj implements Mapper -func (mapper GonicMapper) Table2Obj(name string) string { - newstr := make([]rune, 0) - - name = strings.ToLower(name) - parts := strings.Split(name, "_") - - for _, p := range parts { - _, isInitialism := mapper[strings.ToUpper(p)] - for i, r := range p { - if i == 0 || isInitialism { - r = toASCIIUpper(r) - } - newstr = append(newstr, r) - } - } - - return string(newstr) -} - -// LintGonicMapper is A GonicMapper that contains a list of common initialisms taken from golang/lint -var LintGonicMapper = GonicMapper{ - "API": true, - "ASCII": true, - "CPU": true, - "CSS": true, - "DNS": true, - "EOF": true, - "GUID": true, - "HTML": true, - "HTTP": true, - "HTTPS": true, - "ID": true, - "IP": true, - "JSON": true, - "LHS": true, - "QPS": true, - "RAM": true, - "RHS": true, - "RPC": true, - "SLA": true, - "SMTP": true, - "SSH": true, - "TLS": true, - "TTL": true, - "UI": true, - "UID": true, - "UUID": true, - "URI": true, - "URL": true, - "UTF8": true, - "VM": true, - "XML": true, - "XSRF": true, - "XSS": true, -} - -// PrefixMapper provides prefix table name support -type PrefixMapper struct { - Mapper Mapper - Prefix string -} - -// Obj2Table implements Mapper -func (mapper PrefixMapper) Obj2Table(name string) string { - return mapper.Prefix + mapper.Mapper.Obj2Table(name) -} - -// Table2Obj implements Mapper -func (mapper PrefixMapper) Table2Obj(name string) string { - return mapper.Mapper.Table2Obj(name[len(mapper.Prefix):]) -} - -// NewPrefixMapper creates a prefix mapper -func NewPrefixMapper(mapper Mapper, prefix string) PrefixMapper { - return PrefixMapper{mapper, prefix} -} - -// SuffixMapper provides suffix table name support -type SuffixMapper struct { - Mapper Mapper - Suffix string -} - -// Obj2Table implements Mapper -func (mapper SuffixMapper) Obj2Table(name string) string { - return mapper.Mapper.Obj2Table(name) + mapper.Suffix -} - -// Table2Obj implements Mapper -func (mapper SuffixMapper) Table2Obj(name string) string { - return mapper.Mapper.Table2Obj(name[:len(name)-len(mapper.Suffix)]) -} - -// NewSuffixMapper creates a suffix mapper -func NewSuffixMapper(mapper Mapper, suffix string) SuffixMapper { - return SuffixMapper{mapper, suffix} -} diff --git a/vendor/xorm.io/xorm/names/table_name.go b/vendor/xorm.io/xorm/names/table_name.go deleted file mode 100644 index d7d71b51..00000000 --- a/vendor/xorm.io/xorm/names/table_name.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2020 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package names - -import ( - "reflect" - "sync" -) - -// TableName table name interface to define customerize table name -type TableName interface { - TableName() string -} - -type TableComment interface { - TableComment() string -} - -var ( - tpTableName = reflect.TypeOf((*TableName)(nil)).Elem() - tpTableComment = reflect.TypeOf((*TableComment)(nil)).Elem() - tvCache sync.Map - tcCache sync.Map -) - -// GetTableName returns table name -func GetTableName(mapper Mapper, v reflect.Value) string { - if v.Type().Implements(tpTableName) { - return v.Interface().(TableName).TableName() - } - - if v.Kind() == reflect.Ptr { - v = v.Elem() - if v.Type().Implements(tpTableName) { - return v.Interface().(TableName).TableName() - } - } else if v.CanAddr() { - v1 := v.Addr() - if v1.Type().Implements(tpTableName) { - return v1.Interface().(TableName).TableName() - } - } else { - name, ok := tvCache.Load(v.Type()) - if ok { - if name.(string) != "" { - return name.(string) - } - } else { - v2 := reflect.New(v.Type()) - if v2.Type().Implements(tpTableName) { - tableName := v2.Interface().(TableName).TableName() - tvCache.Store(v.Type(), tableName) - return tableName - } - - tvCache.Store(v.Type(), "") - } - } - - return mapper.Obj2Table(v.Type().Name()) -} - -// GetTableComment returns table comment -func GetTableComment(v reflect.Value) string { - if v.Type().Implements(tpTableComment) { - return v.Interface().(TableComment).TableComment() - } - - if v.Kind() == reflect.Ptr { - v = v.Elem() - if v.Type().Implements(tpTableComment) { - return v.Interface().(TableComment).TableComment() - } - } else if v.CanAddr() { - v1 := v.Addr() - if v1.Type().Implements(tpTableComment) { - return v1.Interface().(TableComment).TableComment() - } - } else { - comment, ok := tcCache.Load(v.Type()) - if ok { - if comment.(string) != "" { - return comment.(string) - } - } else { - v2 := reflect.New(v.Type()) - if v2.Type().Implements(tpTableComment) { - tableComment := v2.Interface().(TableComment).TableComment() - tcCache.Store(v.Type(), tableComment) - return tableComment - } - - tcCache.Store(v.Type(), "") - } - } - - return "" -} diff --git a/vendor/xorm.io/xorm/processors.go b/vendor/xorm.io/xorm/processors.go deleted file mode 100644 index 8697e302..00000000 --- a/vendor/xorm.io/xorm/processors.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2015 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xorm - -// BeforeInsertProcessor executed before an object is initially persisted to the database -type BeforeInsertProcessor interface { - BeforeInsert() -} - -// BeforeUpdateProcessor executed before an object is updated -type BeforeUpdateProcessor interface { - BeforeUpdate() -} - -// BeforeDeleteProcessor executed before an object is deleted -type BeforeDeleteProcessor interface { - BeforeDelete() -} - -// BeforeSetProcessor executed before data set to the struct fields -type BeforeSetProcessor interface { - BeforeSet(string, Cell) -} - -// AfterSetProcessor executed after data set to the struct fields -type AfterSetProcessor interface { - AfterSet(string, Cell) -} - -// AfterInsertProcessor executed after an object is persisted to the database -type AfterInsertProcessor interface { - AfterInsert() -} - -// AfterUpdateProcessor executed after an object has been updated -type AfterUpdateProcessor interface { - AfterUpdate() -} - -// AfterDeleteProcessor executed after an object has been deleted -type AfterDeleteProcessor interface { - AfterDelete() -} - -// AfterLoadProcessor executed after an ojbect has been loaded from database -type AfterLoadProcessor interface { - AfterLoad() -} - -// AfterLoadSessionProcessor executed after an ojbect has been loaded from database with session parameter -type AfterLoadSessionProcessor interface { - AfterLoad(*Session) -} - -type executedProcessorFunc func(*Session, interface{}) error - -type executedProcessor struct { - fun executedProcessorFunc - session *Session - bean interface{} -} - -func (executor *executedProcessor) execute() error { - return executor.fun(executor.session, executor.bean) -} - -func (session *Session) executeProcessors() error { - processors := session.afterProcessors - session.afterProcessors = make([]executedProcessor, 0) - for _, processor := range processors { - if err := processor.execute(); err != nil { - return err - } - } - return nil -} - -func cleanupProcessorsClosures(slices *[]func(interface{})) { - if len(*slices) > 0 { - *slices = make([]func(interface{}), 0) - } -} - -func executeBeforeClosures(session *Session, bean interface{}) { - // handle before delete processors - for _, closure := range session.beforeClosures { - closure(bean) - } - cleanupProcessorsClosures(&session.beforeClosures) -} - -func executeBeforeSet(bean interface{}, fields []string, scanResults []interface{}) { - if b, hasBeforeSet := bean.(BeforeSetProcessor); hasBeforeSet { - for ii, key := range fields { - b.BeforeSet(key, Cell(scanResults[ii].(*interface{}))) - } - } -} - -func executeAfterSet(bean interface{}, fields []string, scanResults []interface{}) { - if b, hasAfterSet := bean.(AfterSetProcessor); hasAfterSet { - for ii, key := range fields { - b.AfterSet(key, Cell(scanResults[ii].(*interface{}))) - } - } -} - -func buildAfterProcessors(session *Session, bean interface{}) { - // handle afterClosures - for _, closure := range session.afterClosures { - session.afterProcessors = append(session.afterProcessors, executedProcessor{ - fun: func(sess *Session, bean interface{}) error { - closure(bean) - return nil - }, - session: session, - bean: bean, - }) - } - - if a, has := bean.(AfterLoadProcessor); has { - session.afterProcessors = append(session.afterProcessors, executedProcessor{ - fun: func(sess *Session, bean interface{}) error { - a.AfterLoad() - return nil - }, - session: session, - bean: bean, - }) - } - - if a, has := bean.(AfterLoadSessionProcessor); has { - session.afterProcessors = append(session.afterProcessors, executedProcessor{ - fun: func(sess *Session, bean interface{}) error { - a.AfterLoad(sess) - return nil - }, - session: session, - bean: bean, - }) - } -} diff --git a/vendor/xorm.io/xorm/rows.go b/vendor/xorm.io/xorm/rows.go deleted file mode 100644 index 4801c300..00000000 --- a/vendor/xorm.io/xorm/rows.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright 2015 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xorm - -import ( - "errors" - "fmt" - "reflect" - - "xorm.io/builder" - "xorm.io/xorm/core" -) - -// Rows rows wrapper a rows to -type Rows struct { - session *Session - rows *core.Rows - beanType reflect.Type -} - -func newRows(session *Session, bean interface{}) (*Rows, error) { - rows := new(Rows) - rows.session = session - rows.beanType = reflect.Indirect(reflect.ValueOf(bean)).Type() - - var sqlStr string - var args []interface{} - var err error - - beanValue := reflect.ValueOf(bean) - if beanValue.Kind() != reflect.Ptr { - return nil, errors.New("needs a pointer to a value") - } else if beanValue.Elem().Kind() == reflect.Ptr { - return nil, errors.New("a pointer to a pointer is not allowed") - } - - if err = rows.session.statement.SetRefBean(bean); err != nil { - return nil, err - } - - if len(session.statement.TableName()) == 0 { - return nil, ErrTableNotFound - } - - if rows.session.statement.RawSQL == "" { - var autoCond builder.Cond - var addedTableName = (len(session.statement.JoinStr) > 0) - var table = rows.session.statement.RefTable - - if !session.statement.NoAutoCondition { - var err error - autoCond, err = session.statement.BuildConds(table, bean, true, true, false, true, addedTableName) - if err != nil { - return nil, err - } - } else { - // !oinume! Add " IS NULL" to WHERE whatever condiBean is given. - // See https://gitea.com/xorm/xorm/issues/179 - if col := table.DeletedColumn(); col != nil && !session.statement.GetUnscoped() { // tag "deleted" is enabled - autoCond = session.statement.CondDeleted(col) - } - } - - sqlStr, args, err = rows.session.statement.GenFindSQL(autoCond) - if err != nil { - return nil, err - } - } else { - sqlStr = rows.session.statement.GenRawSQL() - args = rows.session.statement.RawParams - } - - rows.rows, err = rows.session.queryRows(sqlStr, args...) - if err != nil { - rows.Close() - return nil, err - } - - return rows, nil -} - -// Next move cursor to next record, return false if end has reached -func (rows *Rows) Next() bool { - if rows.rows != nil { - return rows.rows.Next() - } - return false -} - -// Err returns the error, if any, that was encountered during iteration. Err may be called after an explicit or implicit Close. -func (rows *Rows) Err() error { - if rows.rows != nil { - return rows.rows.Err() - } - return nil -} - -// Scan row record to bean properties -func (rows *Rows) Scan(beans ...interface{}) error { - if rows.Err() != nil { - return rows.Err() - } - - var bean = beans[0] - var tp = reflect.TypeOf(bean) - if tp.Kind() == reflect.Ptr { - tp = tp.Elem() - } - var beanKind = tp.Kind() - - if len(beans) == 1 { - if reflect.Indirect(reflect.ValueOf(bean)).Type() != rows.beanType { - return fmt.Errorf("scan arg is incompatible type to [%v]", rows.beanType) - } - - if err := rows.session.statement.SetRefBean(bean); err != nil { - return err - } - } - - fields, err := rows.rows.Columns() - if err != nil { - return err - } - types, err := rows.rows.ColumnTypes() - if err != nil { - return err - } - - if err := rows.session.scan(rows.rows, rows.session.statement.RefTable, beanKind, beans, types, fields); err != nil { - return err - } - - return rows.session.executeProcessors() -} - -// Close session if session.IsAutoClose is true, and claimed any opened resources -func (rows *Rows) Close() error { - if rows.session.isAutoClose { - defer rows.session.Close() - } - - if rows.rows != nil { - return rows.rows.Close() - } - - return nil -} diff --git a/vendor/xorm.io/xorm/scan.go b/vendor/xorm.io/xorm/scan.go deleted file mode 100644 index 00cee4d7..00000000 --- a/vendor/xorm.io/xorm/scan.go +++ /dev/null @@ -1,439 +0,0 @@ -// Copyright 2021 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xorm - -import ( - "database/sql" - "fmt" - "math/big" - "reflect" - "time" - - "xorm.io/xorm/convert" - "xorm.io/xorm/core" - "xorm.io/xorm/dialects" - "xorm.io/xorm/schemas" -) - -// genScanResultsByBeanNullabale generates scan result -func genScanResultsByBeanNullable(bean interface{}) (interface{}, bool, error) { - switch t := bean.(type) { - case *interface{}: - return t, false, nil - case *sql.NullInt64, *sql.NullBool, *sql.NullFloat64, *sql.NullString, *sql.RawBytes, *[]byte: - return t, false, nil - case *time.Time: - return &sql.NullString{}, true, nil - case *sql.NullTime: - return &sql.NullString{}, true, nil - case *string: - return &sql.NullString{}, true, nil - case *int, *int8, *int16, *int32: - return &sql.NullInt32{}, true, nil - case *int64: - return &sql.NullInt64{}, true, nil - case *uint, *uint8, *uint16, *uint32: - return &convert.NullUint32{}, true, nil - case *uint64: - return &convert.NullUint64{}, true, nil - case *float32, *float64: - return &sql.NullFloat64{}, true, nil - case *bool: - return &sql.NullBool{}, true, nil - case sql.NullInt64, sql.NullBool, sql.NullFloat64, sql.NullString, - time.Time, - string, - int, int8, int16, int32, int64, - uint, uint8, uint16, uint32, uint64, - float32, float64, - bool: - return nil, false, fmt.Errorf("unsupported scan type: %t", t) - case convert.Conversion: - return &sql.RawBytes{}, true, nil - } - - tp := reflect.TypeOf(bean).Elem() - switch tp.Kind() { - case reflect.String: - return &sql.NullString{}, true, nil - case reflect.Int64: - return &sql.NullInt64{}, true, nil - case reflect.Int32, reflect.Int, reflect.Int16, reflect.Int8: - return &sql.NullInt32{}, true, nil - case reflect.Uint64: - return &convert.NullUint64{}, true, nil - case reflect.Uint32, reflect.Uint, reflect.Uint16, reflect.Uint8: - return &convert.NullUint32{}, true, nil - default: - return nil, false, fmt.Errorf("genScanResultsByBeanNullable: unsupported type: %#v", bean) - } -} - -func genScanResultsByBean(bean interface{}) (interface{}, bool, error) { - switch t := bean.(type) { - case *interface{}: - return t, false, nil - case *sql.NullInt64, *sql.NullBool, *sql.NullFloat64, *sql.NullString, - *sql.RawBytes, - *string, - *int, *int8, *int16, *int32, *int64, - *uint, *uint8, *uint16, *uint32, *uint64, - *float32, *float64, - *bool: - return t, false, nil - case *time.Time, *sql.NullTime: - return &sql.NullString{}, true, nil - case sql.NullInt64, sql.NullBool, sql.NullFloat64, sql.NullString, - time.Time, - string, - int, int8, int16, int32, int64, - uint, uint8, uint16, uint32, uint64, - bool: - return nil, false, fmt.Errorf("unsupported scan type: %t", t) - case convert.Conversion: - return &sql.RawBytes{}, true, nil - } - - tp := reflect.TypeOf(bean).Elem() - switch tp.Kind() { - case reflect.String: - return new(string), true, nil - case reflect.Int64: - return new(int64), true, nil - case reflect.Int32: - return new(int32), true, nil - case reflect.Int: - return new(int32), true, nil - case reflect.Int16: - return new(int32), true, nil - case reflect.Int8: - return new(int32), true, nil - case reflect.Uint64: - return new(uint64), true, nil - case reflect.Uint32: - return new(uint32), true, nil - case reflect.Uint: - return new(uint), true, nil - case reflect.Uint16: - return new(uint16), true, nil - case reflect.Uint8: - return new(uint8), true, nil - case reflect.Float32: - return new(float32), true, nil - case reflect.Float64: - return new(float64), true, nil - default: - return nil, false, fmt.Errorf("genScanResultsByBean: unsupported type: %#v", bean) - } -} - -func (engine *Engine) scanStringInterface(rows *core.Rows, fields []string, types []*sql.ColumnType) ([]interface{}, error) { - scanResults := make([]interface{}, len(types)) - for i := 0; i < len(types); i++ { - var s sql.NullString - scanResults[i] = &s - } - - if err := engine.scan(rows, fields, types, scanResults...); err != nil { - return nil, err - } - return scanResults, nil -} - -// scan is a wrap of driver.Scan but will automatically change the input values according requirements -func (engine *Engine) scan(rows *core.Rows, fields []string, types []*sql.ColumnType, vv ...interface{}) error { - scanResults := make([]interface{}, 0, len(types)) - replaces := make([]bool, 0, len(types)) - var err error - for _, v := range vv { - var replaced bool - var scanResult interface{} - switch t := v.(type) { - case *big.Float, *time.Time, *sql.NullTime: - scanResult = &sql.NullString{} - replaced = true - case sql.Scanner: - scanResult = t - case convert.Conversion: - scanResult = &sql.RawBytes{} - replaced = true - default: - nullable, ok := types[0].Nullable() - if !ok || nullable { - scanResult, replaced, err = genScanResultsByBeanNullable(v) - } else { - scanResult, replaced, err = genScanResultsByBean(v) - } - if err != nil { - return err - } - } - - scanResults = append(scanResults, scanResult) - replaces = append(replaces, replaced) - } - - if err = engine.driver.Scan(&dialects.ScanContext{ - DBLocation: engine.DatabaseTZ, - UserLocation: engine.TZLocation, - }, rows, types, scanResults...); err != nil { - return err - } - - for i, replaced := range replaces { - if replaced { - if err = convert.Assign(vv[i], scanResults[i], engine.DatabaseTZ, engine.TZLocation); err != nil { - return err - } - } - } - - return nil -} - -func (engine *Engine) scanInterfaces(rows *core.Rows, fields []string, types []*sql.ColumnType) ([]interface{}, error) { - scanResultContainers := make([]interface{}, len(types)) - for i := 0; i < len(types); i++ { - scanResult, err := engine.driver.GenScanResult(types[i].DatabaseTypeName()) - if err != nil { - return nil, err - } - scanResultContainers[i] = scanResult - } - if err := engine.scan(rows, fields, types, scanResultContainers...); err != nil { - return nil, err - } - return scanResultContainers, nil -} - -//////////////////// -// row -> map[string]interface{} - -func (engine *Engine) row2mapInterface(rows *core.Rows, types []*sql.ColumnType, fields []string) (map[string]interface{}, error) { - resultsMap := make(map[string]interface{}, len(fields)) - scanResultContainers := make([]interface{}, len(fields)) - for i := 0; i < len(fields); i++ { - scanResult, err := engine.driver.GenScanResult(types[i].DatabaseTypeName()) - if err != nil { - return nil, err - } - scanResultContainers[i] = scanResult - } - if err := engine.scan(rows, fields, types, scanResultContainers...); err != nil { - return nil, err - } - - for ii, key := range fields { - res, err := convert.Interface2Interface(engine.TZLocation, scanResultContainers[ii]) - if err != nil { - return nil, err - } - resultsMap[key] = res - } - return resultsMap, nil -} - -// ScanInterfaceMap scan result from *core.Rows and return a map -func (engine *Engine) ScanInterfaceMap(rows *core.Rows) (map[string]interface{}, error) { - fields, err := rows.Columns() - if err != nil { - return nil, err - } - types, err := rows.ColumnTypes() - if err != nil { - return nil, err - } - - return engine.row2mapInterface(rows, types, fields) -} - -// ScanInterfaceMaps scan results from *core.Rows and return a slice of map -func (engine *Engine) ScanInterfaceMaps(rows *core.Rows) (resultsSlice []map[string]interface{}, err error) { - fields, err := rows.Columns() - if err != nil { - return nil, err - } - types, err := rows.ColumnTypes() - if err != nil { - return nil, err - } - for rows.Next() { - result, err := engine.row2mapInterface(rows, types, fields) - if err != nil { - return nil, err - } - resultsSlice = append(resultsSlice, result) - } - if rows.Err() != nil { - return nil, rows.Err() - } - - return resultsSlice, nil -} - -//////////////////// -// row -> map[string]string - -func (engine *Engine) row2mapStr(rows *core.Rows, types []*sql.ColumnType, fields []string) (map[string]string, error) { - scanResults := make([]interface{}, len(fields)) - for i := 0; i < len(fields); i++ { - var s sql.NullString - scanResults[i] = &s - } - - if err := engine.driver.Scan(&dialects.ScanContext{ - DBLocation: engine.DatabaseTZ, - UserLocation: engine.TZLocation, - }, rows, types, scanResults...); err != nil { - return nil, err - } - - result := make(map[string]string, len(fields)) - for i, key := range fields { - s := scanResults[i].(*sql.NullString) - if s.String == "" { - result[key] = "" - continue - } - - if schemas.TIME_TYPE == engine.dialect.ColumnTypeKind(types[i].DatabaseTypeName()) { - t, err := convert.String2Time(s.String, engine.DatabaseTZ, engine.TZLocation) - if err != nil { - return nil, err - } - result[key] = t.Format("2006-01-02 15:04:05") - } else { - result[key] = s.String - } - } - return result, nil -} - -// ScanStringMap scan results from *core.Rows and return a map -func (engine *Engine) ScanStringMap(rows *core.Rows) (map[string]string, error) { - fields, err := rows.Columns() - if err != nil { - return nil, err - } - types, err := rows.ColumnTypes() - if err != nil { - return nil, err - } - return engine.row2mapStr(rows, types, fields) -} - -// ScanStringMaps scan results from *core.Rows and return a slice of map -func (engine *Engine) ScanStringMaps(rows *core.Rows) (resultsSlice []map[string]string, err error) { - fields, err := rows.Columns() - if err != nil { - return nil, err - } - types, err := rows.ColumnTypes() - if err != nil { - return nil, err - } - - for rows.Next() { - result, err := engine.row2mapStr(rows, types, fields) - if err != nil { - return nil, err - } - resultsSlice = append(resultsSlice, result) - } - if rows.Err() != nil { - return nil, rows.Err() - } - - return resultsSlice, nil -} - -//////////////////// -// row -> map[string][]byte - -func convertMapStr2Bytes(m map[string]string) map[string][]byte { - r := make(map[string][]byte, len(m)) - for k, v := range m { - r[k] = []byte(v) - } - return r -} - -func (engine *Engine) scanByteMaps(rows *core.Rows) (resultsSlice []map[string][]byte, err error) { - fields, err := rows.Columns() - if err != nil { - return nil, err - } - types, err := rows.ColumnTypes() - if err != nil { - return nil, err - } - for rows.Next() { - result, err := engine.row2mapStr(rows, types, fields) - if err != nil { - return nil, err - } - resultsSlice = append(resultsSlice, convertMapStr2Bytes(result)) - } - if rows.Err() != nil { - return nil, rows.Err() - } - - return resultsSlice, nil -} - -//////////////////// -// row -> []string - -func (engine *Engine) row2sliceStr(rows *core.Rows, types []*sql.ColumnType, fields []string) ([]string, error) { - scanResults, err := engine.scanStringInterface(rows, fields, types) - if err != nil { - return nil, err - } - - results := make([]string, 0, len(fields)) - for i := 0; i < len(fields); i++ { - results = append(results, scanResults[i].(*sql.NullString).String) - } - return results, nil -} - -// ScanStringSlice scan results from *core.Rows and return a slice of one row -func (engine *Engine) ScanStringSlice(rows *core.Rows) ([]string, error) { - fields, err := rows.Columns() - if err != nil { - return nil, err - } - types, err := rows.ColumnTypes() - if err != nil { - return nil, err - } - - return engine.row2sliceStr(rows, types, fields) -} - -// ScanStringSlices scan results from *core.Rows and return a slice of all rows -func (engine *Engine) ScanStringSlices(rows *core.Rows) (resultsSlice [][]string, err error) { - fields, err := rows.Columns() - if err != nil { - return nil, err - } - types, err := rows.ColumnTypes() - if err != nil { - return nil, err - } - - for rows.Next() { - record, err := engine.row2sliceStr(rows, types, fields) - if err != nil { - return nil, err - } - resultsSlice = append(resultsSlice, record) - } - if rows.Err() != nil { - return nil, rows.Err() - } - - return resultsSlice, nil -} diff --git a/vendor/xorm.io/xorm/schemas/column.go b/vendor/xorm.io/xorm/schemas/column.go deleted file mode 100644 index 001769cd..00000000 --- a/vendor/xorm.io/xorm/schemas/column.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2019 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package schemas - -import ( - "errors" - "reflect" - "strconv" - "time" -) - -// enumerates all database mapping way -const ( - TWOSIDES = iota + 1 - ONLYTODB - ONLYFROMDB -) - -// Column defines database column -type Column struct { - Name string - TableName string - FieldName string // Available only when parsed from a struct - FieldIndex []int // Available only when parsed from a struct - SQLType SQLType - IsJSON bool - Length int64 - Length2 int64 - Nullable bool - Default string - Indexes map[string]int - IsPrimaryKey bool - IsAutoIncrement bool - MapType int - IsCreated bool - IsUpdated bool - IsDeleted bool - IsCascade bool - IsVersion bool - DefaultIsEmpty bool // false means column has no default set, but not default value is empty - EnumOptions map[string]int - SetOptions map[string]int - DisableTimeZone bool - TimeZone *time.Location // column specified time zone - Comment string -} - -// NewColumn creates a new column -func NewColumn(name, fieldName string, sqlType SQLType, len1, len2 int64, nullable bool) *Column { - return &Column{ - Name: name, - IsJSON: sqlType.IsJson(), - TableName: "", - FieldName: fieldName, - SQLType: sqlType, - Length: len1, - Length2: len2, - Nullable: nullable, - Default: "", - Indexes: make(map[string]int), - IsPrimaryKey: false, - IsAutoIncrement: false, - MapType: TWOSIDES, - IsCreated: false, - IsUpdated: false, - IsDeleted: false, - IsCascade: false, - IsVersion: false, - DefaultIsEmpty: true, // default should be no default - EnumOptions: make(map[string]int), - Comment: "", - } -} - -// ValueOf returns column's filed of struct's value -func (col *Column) ValueOf(bean interface{}) (*reflect.Value, error) { - dataStruct := reflect.Indirect(reflect.ValueOf(bean)) - return col.ValueOfV(&dataStruct) -} - -// ValueOfV returns column's filed of struct's value accept reflevt value -func (col *Column) ValueOfV(dataStruct *reflect.Value) (*reflect.Value, error) { - v := *dataStruct - for _, i := range col.FieldIndex { - if v.Kind() == reflect.Ptr { - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - v = v.Elem() - } - v = v.FieldByIndex([]int{i}) - } - return &v, nil -} - -// ConvertID converts id content to suitable type according column type -func (col *Column) ConvertID(sid string) (interface{}, error) { - if col.SQLType.IsNumeric() { - n, err := strconv.ParseInt(sid, 10, 64) - if err != nil { - return nil, err - } - return n, nil - } else if col.SQLType.IsText() { - return sid, nil - } - return nil, errors.New("not supported") -} diff --git a/vendor/xorm.io/xorm/schemas/index.go b/vendor/xorm.io/xorm/schemas/index.go deleted file mode 100644 index 47027ea4..00000000 --- a/vendor/xorm.io/xorm/schemas/index.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2019 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package schemas - -import ( - "fmt" - "strings" -) - -// enumerate all index types -const ( - IndexType = iota + 1 - UniqueType -) - -// Index represents a database index -type Index struct { - IsRegular bool - Name string - Type int - Cols []string -} - -// NewIndex new an index object -func NewIndex(name string, indexType int) *Index { - return &Index{true, name, indexType, make([]string, 0)} -} - -// XName returns the special index name for the table -func (index *Index) XName(tableName string) string { - if !strings.HasPrefix(index.Name, "UQE_") && - !strings.HasPrefix(index.Name, "IDX_") { - tableParts := strings.Split(strings.ReplaceAll(tableName, `"`, ""), ".") - tableName = tableParts[len(tableParts)-1] - if index.Type == UniqueType { - return fmt.Sprintf("UQE_%v_%v", tableName, index.Name) - } - return fmt.Sprintf("IDX_%v_%v", tableName, index.Name) - } - return index.Name -} - -// AddColumn add columns which will be composite index -func (index *Index) AddColumn(cols ...string) { - index.Cols = append(index.Cols, cols...) -} - -// Equal return true if the two Index is equal -func (index *Index) Equal(dst *Index) bool { - if index.Type != dst.Type { - return false - } - if len(index.Cols) != len(dst.Cols) { - return false - } - - for i := 0; i < len(index.Cols); i++ { - var found bool - for j := 0; j < len(dst.Cols); j++ { - if index.Cols[i] == dst.Cols[j] { - found = true - break - } - } - if !found { - return false - } - } - return true -} diff --git a/vendor/xorm.io/xorm/schemas/pk.go b/vendor/xorm.io/xorm/schemas/pk.go deleted file mode 100644 index da3c7899..00000000 --- a/vendor/xorm.io/xorm/schemas/pk.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2019 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package schemas - -import ( - "bytes" - "encoding/gob" - - "xorm.io/xorm/internal/utils" -) - -// PK represents primary key values -type PK []interface{} - -// NewPK creates primay keys -func NewPK(pks ...interface{}) *PK { - p := PK(pks) - return &p -} - -// IsZero return true if primay keys are zero -func (p *PK) IsZero() bool { - for _, k := range *p { - if utils.IsZero(k) { - return true - } - } - return false -} - -// ToString convert to SQL string -func (p *PK) ToString() (string, error) { - buf := new(bytes.Buffer) - enc := gob.NewEncoder(buf) - err := enc.Encode(*p) - return buf.String(), err -} - -// FromString reads content to load primary keys -func (p *PK) FromString(content string) error { - dec := gob.NewDecoder(bytes.NewBufferString(content)) - err := dec.Decode(p) - return err -} diff --git a/vendor/xorm.io/xorm/schemas/quote.go b/vendor/xorm.io/xorm/schemas/quote.go deleted file mode 100644 index 4cab30fe..00000000 --- a/vendor/xorm.io/xorm/schemas/quote.go +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright 2020 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package schemas - -import ( - "strings" -) - -// Quoter represents a quoter to the SQL table name and column name -type Quoter struct { - Prefix byte - Suffix byte - IsReserved func(string) bool -} - -var ( - // AlwaysNoReserve always think it's not a reverse word - AlwaysNoReserve = func(string) bool { return false } - - // AlwaysReserve always reverse the word - AlwaysReserve = func(string) bool { return true } - - // CommanQuoteMark represnets the common quote mark - CommanQuoteMark byte = '`' - - // CommonQuoter represetns a common quoter - CommonQuoter = Quoter{CommanQuoteMark, CommanQuoteMark, AlwaysReserve} -) - -// IsEmpty return true if no prefix and suffix -func (q Quoter) IsEmpty() bool { - return q.Prefix == 0 && q.Suffix == 0 -} - -// Quote quote a string -func (q Quoter) Quote(s string) string { - var buf strings.Builder - _ = q.QuoteTo(&buf, s) - return buf.String() -} - -// Trim removes quotes from s -func (q Quoter) Trim(s string) string { - if len(s) < 2 { - return s - } - - var buf strings.Builder - for i := 0; i < len(s); i++ { - switch { - case i == 0 && s[i] == q.Prefix: - case i == len(s)-1 && s[i] == q.Suffix: - case s[i] == q.Suffix && s[i+1] == '.': - case s[i] == q.Prefix && s[i-1] == '.': - default: - buf.WriteByte(s[i]) - } - } - return buf.String() -} - -// Join joins a slice with quoters -func (q Quoter) Join(a []string, sep string) string { - var b strings.Builder - _ = q.JoinWrite(&b, a, sep) - return b.String() -} - -// JoinWrite writes quoted content to a builder -func (q Quoter) JoinWrite(b *strings.Builder, a []string, sep string) error { - if len(a) == 0 { - return nil - } - - n := len(sep) * (len(a) - 1) - for i := 0; i < len(a); i++ { - n += len(a[i]) - } - - b.Grow(n) - for i, s := range a { - if i > 0 { - if _, err := b.WriteString(sep); err != nil { - return err - } - } - if err := q.QuoteTo(b, strings.TrimSpace(s)); err != nil { - return err - } - } - return nil -} - -func findWord(v string, start int) int { - for j := start; j < len(v); j++ { - switch v[j] { - case '.', ' ': - return j - } - } - return len(v) -} - -func findStart(value string, start int) int { - if value[start] == '.' { - return start + 1 - } - if value[start] != ' ' { - return start - } - - var k = -1 - for j := start; j < len(value); j++ { - if value[j] != ' ' { - k = j - break - } - } - if k == -1 { - return len(value) - } - - if (value[k] == 'A' || value[k] == 'a') && (value[k+1] == 'S' || value[k+1] == 's') { - k += 2 - } - - for j := k; j < len(value); j++ { - if value[j] != ' ' { - return j - } - } - return len(value) -} - -func (q Quoter) quoteWordTo(buf *strings.Builder, word string) error { - var realWord = word - if (word[0] == CommanQuoteMark && word[len(word)-1] == CommanQuoteMark) || - (word[0] == q.Prefix && word[len(word)-1] == q.Suffix) { - realWord = word[1 : len(word)-1] - } - - if q.IsEmpty() { - _, err := buf.WriteString(realWord) - return err - } - - isReserved := q.IsReserved(realWord) - if isReserved && realWord != "*" { - if err := buf.WriteByte(q.Prefix); err != nil { - return err - } - } - if _, err := buf.WriteString(realWord); err != nil { - return err - } - if isReserved && realWord != "*" { - return buf.WriteByte(q.Suffix) - } - - return nil -} - -// QuoteTo quotes the table or column names. i.e. if the quotes are [ and ] -// name -> [name] -// `name` -> [name] -// [name] -> [name] -// schema.name -> [schema].[name] -// `schema`.`name` -> [schema].[name] -// `schema`.name -> [schema].[name] -// schema.`name` -> [schema].[name] -// [schema].name -> [schema].[name] -// schema.[name] -> [schema].[name] -// name AS a -> [name] AS a -// schema.name AS a -> [schema].[name] AS a -func (q Quoter) QuoteTo(buf *strings.Builder, value string) error { - var i int - for i < len(value) { - start := findStart(value, i) - if start > i { - if _, err := buf.WriteString(value[i:start]); err != nil { - return err - } - } - if start == len(value) { - return nil - } - - var nextEnd = findWord(value, start) - if err := q.quoteWordTo(buf, value[start:nextEnd]); err != nil { - return err - } - i = nextEnd - } - return nil -} - -// Strings quotes a slice of string -func (q Quoter) Strings(s []string) []string { - var res = make([]string, 0, len(s)) - for _, a := range s { - res = append(res, q.Quote(a)) - } - return res -} - -// Replace replaces common quote(`) as the quotes on the sql -func (q Quoter) Replace(sql string) string { - if q.IsEmpty() { - return sql - } - - var buf strings.Builder - buf.Grow(len(sql)) - - var beginSingleQuote bool - for i := 0; i < len(sql); i++ { - if !beginSingleQuote && sql[i] == CommanQuoteMark { - var j = i + 1 - for ; j < len(sql); j++ { - if sql[j] == CommanQuoteMark { - break - } - } - word := sql[i+1 : j] - isReserved := q.IsReserved(word) - if isReserved { - buf.WriteByte(q.Prefix) - } - buf.WriteString(word) - if isReserved { - buf.WriteByte(q.Suffix) - } - i = j - } else { - if sql[i] == '\'' { - beginSingleQuote = !beginSingleQuote - } - buf.WriteByte(sql[i]) - } - } - return buf.String() -} diff --git a/vendor/xorm.io/xorm/schemas/table.go b/vendor/xorm.io/xorm/schemas/table.go deleted file mode 100644 index 91b33e06..00000000 --- a/vendor/xorm.io/xorm/schemas/table.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2019 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package schemas - -import ( - "reflect" - "strconv" - "strings" -) - -// Table represents a database table -type Table struct { - Name string - Type reflect.Type - columnsSeq []string - columnsMap map[string][]*Column - columns []*Column - Indexes map[string]*Index - PrimaryKeys []string - AutoIncrement string - Created map[string]bool - Updated string - Deleted string - Version string - StoreEngine string - Charset string - Comment string -} - -// NewEmptyTable creates an empty table -func NewEmptyTable() *Table { - return NewTable("", nil) -} - -// NewTable creates a new Table object -func NewTable(name string, t reflect.Type) *Table { - return &Table{Name: name, Type: t, - columnsSeq: make([]string, 0), - columns: make([]*Column, 0), - columnsMap: make(map[string][]*Column), - Indexes: make(map[string]*Index), - Created: make(map[string]bool), - PrimaryKeys: make([]string, 0), - } -} - -// Columns returns table's columns -func (table *Table) Columns() []*Column { - return table.columns -} - -// ColumnsSeq returns table's column names according sequence -func (table *Table) ColumnsSeq() []string { - return table.columnsSeq -} - -func (table *Table) columnsByName(name string) []*Column { - return table.columnsMap[strings.ToLower(name)] -} - -// GetColumn returns column according column name, if column not found, return nil -func (table *Table) GetColumn(name string) *Column { - cols := table.columnsByName(name) - if cols != nil { - return cols[0] - } - - return nil -} - -// GetColumnIdx returns column according name and idx -func (table *Table) GetColumnIdx(name string, idx int) *Column { - cols := table.columnsByName(name) - if cols != nil && idx < len(cols) { - return cols[idx] - } - - return nil -} - -// PKColumns reprents all primary key columns -func (table *Table) PKColumns() []*Column { - columns := make([]*Column, len(table.PrimaryKeys)) - for i, name := range table.PrimaryKeys { - columns[i] = table.GetColumn(name) - } - return columns -} - -// ColumnType returns a column's type -func (table *Table) ColumnType(name string) reflect.Type { - t, _ := table.Type.FieldByName(name) - return t.Type -} - -// AutoIncrColumn returns autoincrement column -func (table *Table) AutoIncrColumn() *Column { - return table.GetColumn(table.AutoIncrement) -} - -// VersionColumn returns version column's information -func (table *Table) VersionColumn() *Column { - return table.GetColumn(table.Version) -} - -// UpdatedColumn returns updated column's information -func (table *Table) UpdatedColumn() *Column { - return table.GetColumn(table.Updated) -} - -// DeletedColumn returns deleted column's information -func (table *Table) DeletedColumn() *Column { - return table.GetColumn(table.Deleted) -} - -// AddColumn adds a column to table -func (table *Table) AddColumn(col *Column) { - table.columnsSeq = append(table.columnsSeq, col.Name) - table.columns = append(table.columns, col) - colName := strings.ToLower(col.Name) - if c, ok := table.columnsMap[colName]; ok { - table.columnsMap[colName] = append(c, col) - } else { - table.columnsMap[colName] = []*Column{col} - } - - if col.IsPrimaryKey { - table.PrimaryKeys = append(table.PrimaryKeys, col.Name) - } - if col.IsAutoIncrement { - table.AutoIncrement = col.Name - } - if col.IsCreated { - table.Created[col.Name] = true - } - if col.IsUpdated { - table.Updated = col.Name - } - if col.IsDeleted { - table.Deleted = col.Name - } - if col.IsVersion { - table.Version = col.Name - } -} - -// AddIndex adds an index or an unique to table -func (table *Table) AddIndex(index *Index) { - table.Indexes[index.Name] = index -} - -// IDOfV get id from one value of struct -func (table *Table) IDOfV(rv reflect.Value) (PK, error) { - v := reflect.Indirect(rv) - pk := make([]interface{}, len(table.PrimaryKeys)) - for i, col := range table.PKColumns() { - var err error - - pkField := v.FieldByIndex(col.FieldIndex) - - switch pkField.Kind() { - case reflect.String: - pk[i], err = col.ConvertID(pkField.String()) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - pk[i], err = col.ConvertID(strconv.FormatInt(pkField.Int(), 10)) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - // id of uint will be converted to int64 - pk[i], err = col.ConvertID(strconv.FormatUint(pkField.Uint(), 10)) - } - - if err != nil { - return nil, err - } - } - return PK(pk), nil -} diff --git a/vendor/xorm.io/xorm/schemas/type.go b/vendor/xorm.io/xorm/schemas/type.go deleted file mode 100644 index b8b30851..00000000 --- a/vendor/xorm.io/xorm/schemas/type.go +++ /dev/null @@ -1,362 +0,0 @@ -// Copyright 2019 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package schemas - -import ( - "database/sql" - "math/big" - "reflect" - "strings" - "time" -) - -// DBType represents a database type -type DBType string - -// enumerates all database types -const ( - POSTGRES DBType = "postgres" - SQLITE DBType = "sqlite3" - MYSQL DBType = "mysql" - MSSQL DBType = "mssql" - ORACLE DBType = "oracle" - DAMENG DBType = "dameng" -) - -// SQLType represents SQL types -type SQLType struct { - Name string - DefaultLength int64 - DefaultLength2 int64 -} - -// enumerates all columns types -const ( - UNKNOW_TYPE = iota - TEXT_TYPE - BLOB_TYPE - TIME_TYPE - NUMERIC_TYPE - ARRAY_TYPE - BOOL_TYPE -) - -// IsType reutrns ture if the column type is the same as the parameter -func (s *SQLType) IsType(st int) bool { - if t, ok := SqlTypes[s.Name]; ok && t == st { - return true - } - return false -} - -// IsText returns true if column is a text type -func (s *SQLType) IsText() bool { - return s.IsType(TEXT_TYPE) -} - -// IsBlob returns true if column is a binary type -func (s *SQLType) IsBlob() bool { - return s.IsType(BLOB_TYPE) -} - -// IsTime returns true if column is a time type -func (s *SQLType) IsTime() bool { - return s.IsType(TIME_TYPE) -} - -// IsBool returns true if column is a boolean type -func (s *SQLType) IsBool() bool { - return s.IsType(BOOL_TYPE) -} - -// IsNumeric returns true if column is a numeric type -func (s *SQLType) IsNumeric() bool { - return s.IsType(NUMERIC_TYPE) -} - -// IsArray returns true if column is an array type -func (s *SQLType) IsArray() bool { - return s.IsType(ARRAY_TYPE) -} - -// IsJson returns true if column is an array type -func (s *SQLType) IsJson() bool { - return s.Name == Json || s.Name == Jsonb -} - -// IsXML returns true if column is an xml type -func (s *SQLType) IsXML() bool { - return s.Name == XML -} - -// enumerates all the database column types -var ( - Bit = "BIT" - UnsignedBit = "UNSIGNED BIT" - TinyInt = "TINYINT" - UnsignedTinyInt = "UNSIGNED TINYINT" - SmallInt = "SMALLINT" - UnsignedSmallInt = "UNSIGNED SMALLINT" - MediumInt = "MEDIUMINT" - UnsignedMediumInt = "UNSIGNED MEDIUMINT" - Int = "INT" - UnsignedInt = "UNSIGNED INT" - Integer = "INTEGER" - BigInt = "BIGINT" - UnsignedBigInt = "UNSIGNED BIGINT" - Number = "NUMBER" - - Enum = "ENUM" - Set = "SET" - - Char = "CHAR" - Varchar = "VARCHAR" - VARCHAR2 = "VARCHAR2" - NChar = "NCHAR" - NVarchar = "NVARCHAR" - TinyText = "TINYTEXT" - Text = "TEXT" - NText = "NTEXT" - Clob = "CLOB" - MediumText = "MEDIUMTEXT" - LongText = "LONGTEXT" - Uuid = "UUID" - UniqueIdentifier = "UNIQUEIDENTIFIER" - SysName = "SYSNAME" - - Date = "DATE" - DateTime = "DATETIME" - SmallDateTime = "SMALLDATETIME" - Time = "TIME" - TimeStamp = "TIMESTAMP" - TimeStampz = "TIMESTAMPZ" - Year = "YEAR" - - Decimal = "DECIMAL" - Numeric = "NUMERIC" - Money = "MONEY" - SmallMoney = "SMALLMONEY" - - Real = "REAL" - Float = "FLOAT" - Double = "DOUBLE" - - Binary = "BINARY" - VarBinary = "VARBINARY" - TinyBlob = "TINYBLOB" - Blob = "BLOB" - MediumBlob = "MEDIUMBLOB" - LongBlob = "LONGBLOB" - Bytea = "BYTEA" - - Bool = "BOOL" - Boolean = "BOOLEAN" - - Serial = "SERIAL" - BigSerial = "BIGSERIAL" - - Json = "JSON" - Jsonb = "JSONB" - - XML = "XML" - Array = "ARRAY" - - SqlTypes = map[string]int{ - Bit: NUMERIC_TYPE, - UnsignedBit: NUMERIC_TYPE, - TinyInt: NUMERIC_TYPE, - UnsignedTinyInt: NUMERIC_TYPE, - SmallInt: NUMERIC_TYPE, - UnsignedSmallInt: NUMERIC_TYPE, - MediumInt: NUMERIC_TYPE, - UnsignedMediumInt: NUMERIC_TYPE, - Int: NUMERIC_TYPE, - UnsignedInt: NUMERIC_TYPE, - Integer: NUMERIC_TYPE, - BigInt: NUMERIC_TYPE, - UnsignedBigInt: NUMERIC_TYPE, - Number: NUMERIC_TYPE, - - Enum: TEXT_TYPE, - Set: TEXT_TYPE, - Json: TEXT_TYPE, - Jsonb: TEXT_TYPE, - - XML: TEXT_TYPE, - - Char: TEXT_TYPE, - NChar: TEXT_TYPE, - Varchar: TEXT_TYPE, - VARCHAR2: TEXT_TYPE, - NVarchar: TEXT_TYPE, - TinyText: TEXT_TYPE, - Text: TEXT_TYPE, - NText: TEXT_TYPE, - MediumText: TEXT_TYPE, - LongText: TEXT_TYPE, - Uuid: TEXT_TYPE, - Clob: TEXT_TYPE, - SysName: TEXT_TYPE, - - Date: TIME_TYPE, - DateTime: TIME_TYPE, - Time: TIME_TYPE, - TimeStamp: TIME_TYPE, - TimeStampz: TIME_TYPE, - SmallDateTime: TIME_TYPE, - Year: TIME_TYPE, - - Decimal: NUMERIC_TYPE, - Numeric: NUMERIC_TYPE, - Real: NUMERIC_TYPE, - Float: NUMERIC_TYPE, - Double: NUMERIC_TYPE, - Money: NUMERIC_TYPE, - SmallMoney: NUMERIC_TYPE, - - Binary: BLOB_TYPE, - VarBinary: BLOB_TYPE, - - TinyBlob: BLOB_TYPE, - Blob: BLOB_TYPE, - MediumBlob: BLOB_TYPE, - LongBlob: BLOB_TYPE, - Bytea: BLOB_TYPE, - UniqueIdentifier: BLOB_TYPE, - - Bool: BOOL_TYPE, - Boolean: BOOL_TYPE, - - Serial: NUMERIC_TYPE, - BigSerial: NUMERIC_TYPE, - - "INT8": NUMERIC_TYPE, - - Array: ARRAY_TYPE, - } -) - -// enumerates all types -var ( - IntType = reflect.TypeOf((*int)(nil)).Elem() - Int8Type = reflect.TypeOf((*int8)(nil)).Elem() - Int16Type = reflect.TypeOf((*int16)(nil)).Elem() - Int32Type = reflect.TypeOf((*int32)(nil)).Elem() - Int64Type = reflect.TypeOf((*int64)(nil)).Elem() - - UintType = reflect.TypeOf((*uint)(nil)).Elem() - Uint8Type = reflect.TypeOf((*uint8)(nil)).Elem() - Uint16Type = reflect.TypeOf((*uint16)(nil)).Elem() - Uint32Type = reflect.TypeOf((*uint32)(nil)).Elem() - Uint64Type = reflect.TypeOf((*uint64)(nil)).Elem() - - Float32Type = reflect.TypeOf((*float32)(nil)).Elem() - Float64Type = reflect.TypeOf((*float64)(nil)).Elem() - - Complex64Type = reflect.TypeOf((*complex64)(nil)).Elem() - Complex128Type = reflect.TypeOf((*complex128)(nil)).Elem() - - StringType = reflect.TypeOf((*string)(nil)).Elem() - BoolType = reflect.TypeOf((*bool)(nil)).Elem() - ByteType = reflect.TypeOf((*byte)(nil)).Elem() - BytesType = reflect.SliceOf(ByteType) - - TimeType = reflect.TypeOf((*time.Time)(nil)).Elem() - BigFloatType = reflect.TypeOf((*big.Float)(nil)).Elem() - NullFloat64Type = reflect.TypeOf((*sql.NullFloat64)(nil)).Elem() - NullStringType = reflect.TypeOf((*sql.NullString)(nil)).Elem() - NullInt32Type = reflect.TypeOf((*sql.NullInt32)(nil)).Elem() - NullInt64Type = reflect.TypeOf((*sql.NullInt64)(nil)).Elem() - NullBoolType = reflect.TypeOf((*sql.NullBool)(nil)).Elem() -) - -// Type2SQLType generate SQLType acorrding Go's type -func Type2SQLType(t reflect.Type) (st SQLType) { - switch k := t.Kind(); k { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32: - st = SQLType{Int, 0, 0} - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32: - st = SQLType{UnsignedInt, 0, 0} - case reflect.Int64: - st = SQLType{BigInt, 0, 0} - case reflect.Uint64: - st = SQLType{UnsignedBigInt, 0, 0} - case reflect.Float32: - st = SQLType{Float, 0, 0} - case reflect.Float64: - st = SQLType{Double, 0, 0} - case reflect.Complex64, reflect.Complex128: - st = SQLType{Varchar, 64, 0} - case reflect.Array, reflect.Slice, reflect.Map: - if t.Elem() == ByteType { - st = SQLType{Blob, 0, 0} - } else { - st = SQLType{Text, 0, 0} - } - case reflect.Bool: - st = SQLType{Bool, 0, 0} - case reflect.String: - st = SQLType{Varchar, 255, 0} - case reflect.Struct: - if t.ConvertibleTo(TimeType) { - st = SQLType{DateTime, 0, 0} - } else if t.ConvertibleTo(NullFloat64Type) { - st = SQLType{Double, 0, 0} - } else if t.ConvertibleTo(NullStringType) { - st = SQLType{Varchar, 255, 0} - } else if t.ConvertibleTo(NullInt32Type) { - st = SQLType{Integer, 0, 0} - } else if t.ConvertibleTo(NullInt64Type) { - st = SQLType{BigInt, 0, 0} - } else if t.ConvertibleTo(NullBoolType) { - st = SQLType{Boolean, 0, 0} - } else { - // TODO need to handle association struct - st = SQLType{Text, 0, 0} - } - case reflect.Ptr: - st = Type2SQLType(t.Elem()) - default: - st = SQLType{Text, 0, 0} - } - return -} - -// SQLType2Type convert default sql type change to go types -func SQLType2Type(st SQLType) reflect.Type { - name := strings.ToUpper(st.Name) - switch name { - case Bit, TinyInt, SmallInt, MediumInt, Int, Integer, Serial: - return IntType - case BigInt, BigSerial: - return Int64Type - case UnsignedBit, UnsignedTinyInt, UnsignedSmallInt, UnsignedMediumInt, UnsignedInt: - return UintType - case UnsignedBigInt: - return Uint64Type - case Float, Real: - return Float32Type - case Double: - return Float64Type - case Char, NChar, Varchar, NVarchar, TinyText, Text, NText, MediumText, LongText, Enum, Set, Uuid, Clob, SysName: - return StringType - case TinyBlob, Blob, LongBlob, Bytea, Binary, MediumBlob, VarBinary, UniqueIdentifier: - return BytesType - case Bool: - return BoolType - case DateTime, Date, Time, TimeStamp, TimeStampz, SmallDateTime, Year: - return TimeType - case Decimal, Numeric, Money, SmallMoney: - return StringType - default: - return StringType - } -} - -// SQLTypeName returns sql type name -func SQLTypeName(tp string) string { - fields := strings.Split(tp, "(") - return fields[0] -} diff --git a/vendor/xorm.io/xorm/schemas/version.go b/vendor/xorm.io/xorm/schemas/version.go deleted file mode 100644 index ba789679..00000000 --- a/vendor/xorm.io/xorm/schemas/version.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2021 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package schemas - -// Version represents a database version -type Version struct { - Number string // the version number which could be compared - Level string - Edition string -} diff --git a/vendor/xorm.io/xorm/session.go b/vendor/xorm.io/xorm/session.go deleted file mode 100644 index 388678cd..00000000 --- a/vendor/xorm.io/xorm/session.go +++ /dev/null @@ -1,796 +0,0 @@ -// Copyright 2015 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xorm - -import ( - "context" - "crypto/rand" - "crypto/sha256" - "database/sql" - "encoding/hex" - "errors" - "fmt" - "hash/crc32" - "io" - "reflect" - "strconv" - "strings" - - "xorm.io/xorm/contexts" - "xorm.io/xorm/convert" - "xorm.io/xorm/core" - "xorm.io/xorm/internal/json" - "xorm.io/xorm/internal/statements" - "xorm.io/xorm/log" - "xorm.io/xorm/schemas" -) - -// ErrFieldIsNotExist columns does not exist -type ErrFieldIsNotExist struct { - FieldName string - TableName string -} - -func (e ErrFieldIsNotExist) Error() string { - return fmt.Sprintf("field %s is not exist on table %s", e.FieldName, e.TableName) -} - -// ErrFieldIsNotValid is not valid -type ErrFieldIsNotValid struct { - FieldName string - TableName string -} - -func (e ErrFieldIsNotValid) Error() string { - return fmt.Sprintf("field %s is not valid on table %s", e.FieldName, e.TableName) -} - -type sessionType bool - -const ( - engineSession sessionType = false - groupSession sessionType = true -) - -// Session keep a pointer to sql.DB and provides all execution of all -// kind of database operations. -type Session struct { - engine *Engine - tx *core.Tx - statement *statements.Statement - isAutoCommit bool - isCommitedOrRollbacked bool - isAutoClose bool - isClosed bool - prepareStmt bool - // Automatically reset the statement after operations that execute a SQL - // query such as Count(), Find(), Get(), ... - autoResetStatement bool - - // !nashtsai! storing these beans due to yet committed tx - afterInsertBeans map[interface{}]*[]func(interface{}) - afterUpdateBeans map[interface{}]*[]func(interface{}) - afterDeleteBeans map[interface{}]*[]func(interface{}) - // -- - - beforeClosures []func(interface{}) - afterClosures []func(interface{}) - afterProcessors []executedProcessor - - stmtCache map[uint32]*core.Stmt // key: hash.Hash32 of (queryStr, len(queryStr)) - txStmtCache map[uint32]*core.Stmt // for tx statement - - lastSQL string - lastSQLArgs []interface{} - - ctx context.Context - sessionType sessionType -} - -func newSessionID() string { - hash := sha256.New() - _, err := io.CopyN(hash, rand.Reader, 50) - if err != nil { - return "????????????????????" - } - md := hash.Sum(nil) - mdStr := hex.EncodeToString(md) - return mdStr[0:20] -} - -func newSession(engine *Engine) *Session { - var ctx context.Context - if engine.logSessionID { - ctx = context.WithValue(engine.defaultContext, log.SessionIDKey, newSessionID()) - } else { - ctx = engine.defaultContext - } - - session := &Session{ - ctx: ctx, - engine: engine, - tx: nil, - statement: statements.NewStatement( - engine.dialect, - engine.tagParser, - engine.DatabaseTZ, - ), - isClosed: false, - isAutoCommit: true, - isCommitedOrRollbacked: false, - isAutoClose: false, - autoResetStatement: true, - prepareStmt: false, - - afterInsertBeans: make(map[interface{}]*[]func(interface{})), - afterUpdateBeans: make(map[interface{}]*[]func(interface{})), - afterDeleteBeans: make(map[interface{}]*[]func(interface{})), - beforeClosures: make([]func(interface{}), 0), - afterClosures: make([]func(interface{}), 0), - afterProcessors: make([]executedProcessor, 0), - stmtCache: make(map[uint32]*core.Stmt), - txStmtCache: make(map[uint32]*core.Stmt), - - lastSQL: "", - lastSQLArgs: make([]interface{}, 0), - - sessionType: engineSession, - } - if engine.logSessionID { - session.ctx = context.WithValue(session.ctx, log.SessionKey, session) - } - return session -} - -// Close release the connection from pool -func (session *Session) Close() error { - for _, v := range session.stmtCache { - if err := v.Close(); err != nil { - return err - } - } - - for _, v := range session.txStmtCache { - if err := v.Close(); err != nil { - return err - } - } - - if !session.isClosed { - // When Close be called, if session is a transaction and do not call - // Commit or Rollback, then call Rollback. - if session.tx != nil && !session.isCommitedOrRollbacked { - if err := session.Rollback(); err != nil { - return err - } - } - session.tx = nil - session.stmtCache = nil - session.txStmtCache = nil - session.isClosed = true - } - return nil -} - -func (session *Session) db() *core.DB { - return session.engine.db -} - -// Engine returns session Engine -func (session *Session) Engine() *Engine { - return session.engine -} - -// Tx returns session tx -func (session *Session) Tx() *core.Tx { - return session.tx -} - -func (session *Session) getQueryer() core.Queryer { - if session.tx != nil { - return session.tx - } - return session.db() -} - -// ContextCache enable context cache or not -func (session *Session) ContextCache(context contexts.ContextCache) *Session { - session.statement.SetContextCache(context) - return session -} - -// IsClosed returns if session is closed -func (session *Session) IsClosed() bool { - return session.isClosed -} - -func (session *Session) resetStatement() { - if session.autoResetStatement { - session.statement.Reset() - session.prepareStmt = false - } -} - -// Prepare set a flag to session that should be prepare statement before execute query -func (session *Session) Prepare() *Session { - session.prepareStmt = true - return session -} - -// Before Apply before Processor, affected bean is passed to closure arg -func (session *Session) Before(closures func(interface{})) *Session { - if closures != nil { - session.beforeClosures = append(session.beforeClosures, closures) - } - return session -} - -// After Apply after Processor, affected bean is passed to closure arg -func (session *Session) After(closures func(interface{})) *Session { - if closures != nil { - session.afterClosures = append(session.afterClosures, closures) - } - return session -} - -// Table can input a string or pointer to struct for special a table to operate. -func (session *Session) Table(tableNameOrBean interface{}) *Session { - if err := session.statement.SetTable(tableNameOrBean); err != nil { - session.statement.LastError = err - } - return session -} - -// Alias set the table alias -func (session *Session) Alias(alias string) *Session { - session.statement.Alias(alias) - return session -} - -// NoCascade indicate that no cascade load child object -func (session *Session) NoCascade() *Session { - session.statement.UseCascade = false - return session -} - -// ForUpdate Set Read/Write locking for UPDATE -func (session *Session) ForUpdate() *Session { - session.statement.IsForUpdate = true - return session -} - -// NoAutoCondition disable generate SQL condition from beans -func (session *Session) NoAutoCondition(no ...bool) *Session { - session.statement.SetNoAutoCondition(no...) - return session -} - -// Limit provide limit and offset query condition -func (session *Session) Limit(limit int, start ...int) *Session { - session.statement.Limit(limit, start...) - return session -} - -// OrderBy provide order by query condition, the input parameter is the content -// after order by on a sql statement. -func (session *Session) OrderBy(order interface{}, args ...interface{}) *Session { - session.statement.OrderBy(order, args...) - return session -} - -// Desc provide desc order by query condition, the input parameters are columns. -func (session *Session) Desc(colNames ...string) *Session { - session.statement.Desc(colNames...) - return session -} - -// Asc provide asc order by query condition, the input parameters are columns. -func (session *Session) Asc(colNames ...string) *Session { - session.statement.Asc(colNames...) - return session -} - -// StoreEngine is only avialble mysql dialect currently -func (session *Session) StoreEngine(storeEngine string) *Session { - session.statement.StoreEngine = storeEngine - return session -} - -// Charset is only avialble mysql dialect currently -func (session *Session) Charset(charset string) *Session { - session.statement.Charset = charset - return session -} - -// Cascade indicates if loading sub Struct -func (session *Session) Cascade(trueOrFalse ...bool) *Session { - if len(trueOrFalse) >= 1 { - session.statement.UseCascade = trueOrFalse[0] - } - return session -} - -// MustLogSQL means record SQL or not and don't follow engine's setting -func (session *Session) MustLogSQL(logs ...bool) *Session { - showSQL := true - if len(logs) > 0 { - showSQL = logs[0] - } - session.ctx = context.WithValue(session.ctx, log.SessionShowSQLKey, showSQL) - return session -} - -// NoCache ask this session do not retrieve data from cache system and -// get data from database directly. -func (session *Session) NoCache() *Session { - session.statement.UseCache = false - return session -} - -// Join join_operator should be one of INNER, LEFT OUTER, CROSS etc - this will be prepended to JOIN -func (session *Session) Join(joinOperator string, tablename interface{}, condition string, args ...interface{}) *Session { - session.statement.Join(joinOperator, tablename, condition, args...) - return session -} - -// GroupBy Generate Group By statement -func (session *Session) GroupBy(keys string) *Session { - session.statement.GroupBy(keys) - return session -} - -// Having Generate Having statement -func (session *Session) Having(conditions string) *Session { - session.statement.Having(conditions) - return session -} - -// DB db return the wrapper of sql.DB -func (session *Session) DB() *core.DB { - return session.db() -} - -func (session *Session) canCache() bool { - if session.statement.RefTable == nil || - session.statement.JoinStr != "" || - session.statement.RawSQL != "" || - !session.statement.UseCache || - session.statement.IsForUpdate || - session.tx != nil || - len(session.statement.SelectStr) > 0 { - return false - } - return true -} - -func (session *Session) doPrepare(db *core.DB, sqlStr string) (stmt *core.Stmt, err error) { - crc := crc32.ChecksumIEEE([]byte(sqlStr)) - // TODO try hash(sqlStr+len(sqlStr)) - var has bool - stmt, has = session.stmtCache[crc] - if !has { - stmt, err = db.PrepareContext(session.ctx, sqlStr) - if err != nil { - return nil, err - } - session.stmtCache[crc] = stmt - } - return -} - -func (session *Session) doPrepareTx(sqlStr string) (stmt *core.Stmt, err error) { - crc := crc32.ChecksumIEEE([]byte(sqlStr)) - // TODO try hash(sqlStr+len(sqlStr)) - var has bool - stmt, has = session.txStmtCache[crc] - if !has { - stmt, err = session.tx.PrepareContext(session.ctx, sqlStr) - if err != nil { - return nil, err - } - session.txStmtCache[crc] = stmt - } - return -} - -func getField(dataStruct *reflect.Value, table *schemas.Table, colName string, idx int) (*schemas.Column, *reflect.Value, error) { - col := table.GetColumnIdx(colName, idx) - if col == nil { - return nil, nil, ErrFieldIsNotExist{colName, table.Name} - } - - fieldValue, err := col.ValueOfV(dataStruct) - if err != nil { - return nil, nil, err - } - if fieldValue == nil { - return nil, nil, ErrFieldIsNotValid{colName, table.Name} - } - if !fieldValue.IsValid() || !fieldValue.CanSet() { - return nil, nil, ErrFieldIsNotValid{colName, table.Name} - } - - return col, fieldValue, nil -} - -// Cell cell is a result of one column field -type Cell *interface{} - -func (session *Session) rows2Beans(rows *core.Rows, fields []string, types []*sql.ColumnType, - table *schemas.Table, newElemFunc func([]string) reflect.Value, - sliceValueSetFunc func(*reflect.Value, schemas.PK) error, -) error { - for rows.Next() { - newValue := newElemFunc(fields) - bean := newValue.Interface() - dataStruct := newValue.Elem() - - // handle beforeClosures - scanResults, err := session.row2Slice(rows, fields, types, bean) - if err != nil { - return err - } - pk, err := session.slice2Bean(scanResults, fields, bean, &dataStruct, table) - if err != nil { - return err - } - session.afterProcessors = append(session.afterProcessors, executedProcessor{ - fun: func(*Session, interface{}) error { - return sliceValueSetFunc(&newValue, pk) - }, - session: session, - bean: bean, - }) - } - return rows.Err() -} - -func (session *Session) row2Slice(rows *core.Rows, fields []string, types []*sql.ColumnType, bean interface{}) ([]interface{}, error) { - for _, closure := range session.beforeClosures { - closure(bean) - } - - scanResults := make([]interface{}, len(fields)) - for i := 0; i < len(fields); i++ { - var cell interface{} - scanResults[i] = &cell - } - if err := session.engine.scan(rows, fields, types, scanResults...); err != nil { - return nil, err - } - - executeBeforeSet(bean, fields, scanResults) - - return scanResults, nil -} - -func setJSON(fieldValue *reflect.Value, fieldType reflect.Type, scanResult interface{}) error { - bs, ok := convert.AsBytes(scanResult) - if !ok { - return fmt.Errorf("unsupported database data type: %#v", scanResult) - } - if len(bs) == 0 { - return nil - } - - if fieldType.Kind() == reflect.String { - fieldValue.SetString(string(bs)) - return nil - } - - if fieldValue.CanAddr() { - err := json.DefaultJSONHandler.Unmarshal(bs, fieldValue.Addr().Interface()) - if err != nil { - return err - } - } else { - x := reflect.New(fieldType) - err := json.DefaultJSONHandler.Unmarshal(bs, x.Interface()) - if err != nil { - return err - } - fieldValue.Set(x.Elem()) - } - return nil -} - -func asKind(vv reflect.Value, tp reflect.Type) (interface{}, error) { - switch tp.Kind() { - case reflect.Ptr: - return asKind(vv.Elem(), tp.Elem()) - case reflect.Int64: - return vv.Int(), nil - case reflect.Int: - return int(vv.Int()), nil - case reflect.Int32: - return int32(vv.Int()), nil - case reflect.Int16: - return int16(vv.Int()), nil - case reflect.Int8: - return int8(vv.Int()), nil - case reflect.Uint64: - return vv.Uint(), nil - case reflect.Uint: - return uint(vv.Uint()), nil - case reflect.Uint32: - return uint32(vv.Uint()), nil - case reflect.Uint16: - return uint16(vv.Uint()), nil - case reflect.Uint8: - return uint8(vv.Uint()), nil - case reflect.String: - return vv.String(), nil - case reflect.Slice: - if tp.Elem().Kind() == reflect.Uint8 { - v, err := strconv.ParseInt(string(vv.Interface().([]byte)), 10, 64) - if err != nil { - return nil, err - } - return v, nil - } - } - return nil, fmt.Errorf("unsupported primary key type: %v, %v", tp, vv) -} - -var uint8ZeroValue = reflect.ValueOf(uint8(0)) - -func (session *Session) convertBeanField(col *schemas.Column, fieldValue *reflect.Value, - scanResult interface{}, table *schemas.Table, -) error { - v, ok := scanResult.(*interface{}) - if ok { - scanResult = *v - } - if scanResult == nil { - return nil - } - - if fieldValue.CanAddr() { - if structConvert, ok := fieldValue.Addr().Interface().(convert.Conversion); ok { - data, ok := convert.AsBytes(scanResult) - if !ok { - return fmt.Errorf("cannot convert %#v as bytes", scanResult) - } - if data == nil { - return nil - } - return structConvert.FromDB(data) - } - } - - if structConvert, ok := fieldValue.Interface().(convert.Conversion); ok { - data, ok := convert.AsBytes(scanResult) - if !ok { - return fmt.Errorf("cannot convert %#v as bytes", scanResult) - } - if data == nil { - return nil - } - - if fieldValue.Kind() == reflect.Ptr && fieldValue.IsNil() { - fieldValue.Set(reflect.New(fieldValue.Type().Elem())) - return fieldValue.Interface().(convert.Conversion).FromDB(data) - } - return structConvert.FromDB(data) - } - - vv := reflect.ValueOf(scanResult) - fieldType := fieldValue.Type() - - if col.IsJSON { - return setJSON(fieldValue, fieldType, scanResult) - } - - switch fieldType.Kind() { - case reflect.Ptr: - var e reflect.Value - if fieldValue.IsNil() { - e = reflect.New(fieldType.Elem()).Elem() - } else { - e = fieldValue.Elem() - } - if err := session.convertBeanField(col, &e, scanResult, table); err != nil { - return err - } - if fieldValue.IsNil() { - fieldValue.Set(e.Addr()) - } - return nil - case reflect.Complex64, reflect.Complex128: - return setJSON(fieldValue, fieldType, scanResult) - case reflect.Slice: - bs, ok := convert.AsBytes(scanResult) - if ok && fieldType.Elem().Kind() == reflect.Uint8 { - if col.SQLType.IsText() { - x := reflect.New(fieldType) - err := json.DefaultJSONHandler.Unmarshal(bs, x.Interface()) - if err != nil { - return err - } - fieldValue.Set(x.Elem()) - } else { - fieldValue.Set(reflect.ValueOf(bs)) - } - return nil - } - case reflect.Array: - bs, ok := convert.AsBytes(scanResult) - if ok && fieldType.Elem().Kind() == reflect.Uint8 { - if col.SQLType.IsText() { - x := reflect.New(fieldType) - err := json.DefaultJSONHandler.Unmarshal(bs, x.Interface()) - if err != nil { - return err - } - fieldValue.Set(x.Elem()) - } else { - if fieldValue.Len() < vv.Len() { - return fmt.Errorf("Set field %s[Array] failed because of data too long", col.Name) - } - for i := 0; i < fieldValue.Len(); i++ { - if i < vv.Len() { - fieldValue.Index(i).Set(vv.Index(i)) - } else { - fieldValue.Index(i).Set(uint8ZeroValue) - } - } - } - return nil - } - case reflect.Struct: - if fieldType.ConvertibleTo(schemas.BigFloatType) { - v, err := convert.AsBigFloat(scanResult) - if err != nil { - return err - } - fieldValue.Set(reflect.ValueOf(v).Elem().Convert(fieldType)) - return nil - } - - if fieldType.ConvertibleTo(schemas.TimeType) { - dbTZ := session.engine.DatabaseTZ - if col.TimeZone != nil { - dbTZ = col.TimeZone - } - - t, err := convert.AsTime(scanResult, dbTZ, session.engine.TZLocation) - if err != nil { - return err - } - - fieldValue.Set(reflect.ValueOf(*t).Convert(fieldType)) - return nil - } else if nulVal, ok := fieldValue.Addr().Interface().(sql.Scanner); ok { - err := nulVal.Scan(scanResult) - if err == nil { - return nil - } - session.engine.logger.Errorf("sql.Sanner error: %v", err) - } else if session.statement.UseCascade { - table, err := session.engine.tagParser.ParseWithCache(*fieldValue) - if err != nil { - return err - } - - if len(table.PrimaryKeys) != 1 { - return errors.New("unsupported non or composited primary key cascade") - } - pk := make(schemas.PK, len(table.PrimaryKeys)) - pk[0], err = asKind(vv, reflect.TypeOf(scanResult)) - if err != nil { - return err - } - - if !pk.IsZero() { - // !nashtsai! TODO for hasOne relationship, it's preferred to use join query for eager fetch - // however, also need to consider adding a 'lazy' attribute to xorm tag which allow hasOne - // property to be fetched lazily - structInter := reflect.New(fieldValue.Type()) - has, err := session.ID(pk).NoCascade().get(structInter.Interface()) - if err != nil { - return err - } - if has { - fieldValue.Set(structInter.Elem()) - } else { - return errors.New("cascade obj is not exist") - } - } - return nil - } - } // switch fieldType.Kind() - - return convert.AssignValue(fieldValue.Addr(), scanResult) -} - -func (session *Session) slice2Bean(scanResults []interface{}, fields []string, bean interface{}, dataStruct *reflect.Value, table *schemas.Table) (schemas.PK, error) { - defer func() { - executeAfterSet(bean, fields, scanResults) - }() - - buildAfterProcessors(session, bean) - - tempMap := make(map[string]int) - var pk schemas.PK - for i, colName := range fields { - var idx int - lKey := strings.ToLower(colName) - var ok bool - - if idx, ok = tempMap[lKey]; !ok { - idx = 0 - } else { - idx++ - } - tempMap[lKey] = idx - - col, fieldValue, err := getField(dataStruct, table, colName, idx) - if _, ok := err.(ErrFieldIsNotExist); ok { - continue - } else if err != nil { - return nil, err - } - - if fieldValue == nil { - continue - } - - if err := session.convertBeanField(col, fieldValue, scanResults[i], table); err != nil { - return nil, err - } - if col.IsPrimaryKey { - pk = append(pk, scanResults[i]) - } - } - return pk, nil -} - -// saveLastSQL stores executed query information -func (session *Session) saveLastSQL(sql string, args ...interface{}) { - session.lastSQL = sql - session.lastSQLArgs = args -} - -// LastSQL returns last query information -func (session *Session) LastSQL() (string, []interface{}) { - return session.lastSQL, session.lastSQLArgs -} - -// Unscoped always disable struct tag "deleted" -func (session *Session) Unscoped() *Session { - session.statement.SetUnscoped() - return session -} - -func (session *Session) incrVersionFieldValue(fieldValue *reflect.Value) { - switch fieldValue.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - fieldValue.SetInt(fieldValue.Int() + 1) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - fieldValue.SetUint(fieldValue.Uint() + 1) - } -} - -// Context sets the context on this session -func (session *Session) Context(ctx context.Context) *Session { - if session.engine.logSessionID && session.ctx != nil { - ctx = context.WithValue(ctx, log.SessionIDKey, session.ctx.Value(log.SessionIDKey)) - ctx = context.WithValue(ctx, log.SessionKey, session.ctx.Value(log.SessionKey)) - ctx = context.WithValue(ctx, log.SessionShowSQLKey, session.ctx.Value(log.SessionShowSQLKey)) - } - - session.ctx = ctx - return session -} - -// PingContext test if database is ok -func (session *Session) PingContext(ctx context.Context) error { - if session.isAutoClose { - defer session.Close() - } - - session.engine.logger.Infof("PING DATABASE %v", session.engine.DriverName()) - return session.DB().PingContext(ctx) -} diff --git a/vendor/xorm.io/xorm/session_cols.go b/vendor/xorm.io/xorm/session_cols.go deleted file mode 100644 index ca3589ab..00000000 --- a/vendor/xorm.io/xorm/session_cols.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2017 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xorm - -import ( - "reflect" - "strings" - "time" - - "xorm.io/xorm/schemas" -) - -func setColumnInt(bean interface{}, col *schemas.Column, t int64) { - v, err := col.ValueOf(bean) - if err != nil { - return - } - if v.CanSet() { - switch v.Type().Kind() { - case reflect.Int, reflect.Int64, reflect.Int32: - v.SetInt(t) - case reflect.Uint, reflect.Uint64, reflect.Uint32: - v.SetUint(uint64(t)) - } - } -} - -func setColumnTime(bean interface{}, col *schemas.Column, t time.Time) { - v, err := col.ValueOf(bean) - if err != nil { - return - } - if v.CanSet() { - switch v.Type().Kind() { - case reflect.Struct: - v.Set(reflect.ValueOf(t).Convert(v.Type())) - case reflect.Int, reflect.Int64, reflect.Int32: - v.SetInt(t.Unix()) - case reflect.Uint, reflect.Uint64, reflect.Uint32: - v.SetUint(uint64(t.Unix())) - } - } -} - -func getFlagForColumn(m map[string]bool, col *schemas.Column) (val bool, has bool) { - if len(m) == 0 { - return false, false - } - - n := len(col.Name) - - for mk := range m { - if len(mk) != n { - continue - } - if strings.EqualFold(mk, col.Name) { - return m[mk], true - } - } - - return false, false -} - -// Incr provides a query string like "count = count + 1" -func (session *Session) Incr(column string, arg ...interface{}) *Session { - session.statement.Incr(column, arg...) - return session -} - -// Decr provides a query string like "count = count - 1" -func (session *Session) Decr(column string, arg ...interface{}) *Session { - session.statement.Decr(column, arg...) - return session -} - -// SetExpr provides a query string like "column = {expression}" -func (session *Session) SetExpr(column string, expression interface{}) *Session { - session.statement.SetExpr(column, expression) - return session -} - -// Select provides some columns to special -func (session *Session) Select(str string) *Session { - session.statement.Select(str) - return session -} - -// Cols provides some columns to special -func (session *Session) Cols(columns ...string) *Session { - session.statement.Cols(columns...) - return session -} - -// AllCols ask all columns -func (session *Session) AllCols() *Session { - session.statement.AllCols() - return session -} - -// MustCols specify some columns must use even if they are empty -func (session *Session) MustCols(columns ...string) *Session { - session.statement.MustCols(columns...) - return session -} - -// UseBool automatically retrieve condition according struct, but -// if struct has bool field, it will ignore them. So use UseBool -// to tell system to do not ignore them. -// If no parameters, it will use all the bool field of struct, or -// it will use parameters's columns -func (session *Session) UseBool(columns ...string) *Session { - session.statement.UseBool(columns...) - return session -} - -// Distinct use for distinct columns. Caution: when you are using cache, -// distinct will not be cached because cache system need id, -// but distinct will not provide id -func (session *Session) Distinct(columns ...string) *Session { - session.statement.Distinct(columns...) - return session -} - -// Omit Only not use the parameters as select or update columns -func (session *Session) Omit(columns ...string) *Session { - session.statement.Omit(columns...) - return session -} - -// Nullable Set null when column is zero-value and nullable for update -func (session *Session) Nullable(columns ...string) *Session { - session.statement.Nullable(columns...) - return session -} - -// NoAutoTime means do not automatically give created field and updated field -// the current time on the current session temporarily -func (session *Session) NoAutoTime() *Session { - session.statement.UseAutoTime = false - return session -} diff --git a/vendor/xorm.io/xorm/session_cond.go b/vendor/xorm.io/xorm/session_cond.go deleted file mode 100644 index 25d17148..00000000 --- a/vendor/xorm.io/xorm/session_cond.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2017 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xorm - -import "xorm.io/builder" - -// SQL provides raw sql input parameter. When you have a complex SQL statement -// and cannot use Where, Id, In and etc. Methods to describe, you can use SQL. -func (session *Session) SQL(query interface{}, args ...interface{}) *Session { - session.statement.SQL(query, args...) - return session -} - -// Where provides custom query condition. -func (session *Session) Where(query interface{}, args ...interface{}) *Session { - session.statement.Where(query, args...) - return session -} - -// And provides custom query condition. -func (session *Session) And(query interface{}, args ...interface{}) *Session { - session.statement.And(query, args...) - return session -} - -// Or provides custom query condition. -func (session *Session) Or(query interface{}, args ...interface{}) *Session { - session.statement.Or(query, args...) - return session -} - -// ID provides converting id as a query condition -func (session *Session) ID(id interface{}) *Session { - session.statement.ID(id) - return session -} - -// In provides a query string like "id in (1, 2, 3)" -func (session *Session) In(column string, args ...interface{}) *Session { - session.statement.In(column, args...) - return session -} - -// NotIn provides a query string like "id in (1, 2, 3)" -func (session *Session) NotIn(column string, args ...interface{}) *Session { - session.statement.NotIn(column, args...) - return session -} - -// Conds returns session query conditions except auto bean conditions -func (session *Session) Conds() builder.Cond { - return session.statement.Conds() -} diff --git a/vendor/xorm.io/xorm/session_delete.go b/vendor/xorm.io/xorm/session_delete.go deleted file mode 100644 index 322d5a44..00000000 --- a/vendor/xorm.io/xorm/session_delete.go +++ /dev/null @@ -1,257 +0,0 @@ -// Copyright 2016 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xorm - -import ( - "errors" - "fmt" - "strconv" - - "xorm.io/builder" - "xorm.io/xorm/caches" - "xorm.io/xorm/internal/utils" - "xorm.io/xorm/schemas" -) - -var ( - // ErrNeedDeletedCond delete needs less one condition error - ErrNeedDeletedCond = errors.New("Delete action needs at least one condition") - - // ErrNotImplemented not implemented - ErrNotImplemented = errors.New("Not implemented") -) - -func (session *Session) cacheDelete(table *schemas.Table, tableName, sqlStr string, args ...interface{}) error { - if table == nil || - session.tx != nil { - return ErrCacheFailed - } - - for _, filter := range session.engine.dialect.Filters() { - sqlStr = filter.Do(sqlStr) - } - - newsql := session.statement.ConvertIDSQL(sqlStr) - if newsql == "" { - return ErrCacheFailed - } - - cacher := session.engine.cacherMgr.GetCacher(tableName) - pkColumns := table.PKColumns() - ids, err := caches.GetCacheSql(cacher, tableName, newsql, args) - if err != nil { - rows, err := session.queryRows(newsql, args...) - if err != nil { - return err - } - defer rows.Close() - - resultsSlice, err := session.engine.ScanStringMaps(rows) - if err != nil { - return err - } - ids = make([]schemas.PK, 0) - if len(resultsSlice) > 0 { - for _, data := range resultsSlice { - var id int64 - var pk schemas.PK = make([]interface{}, 0) - for _, col := range pkColumns { - if v, ok := data[col.Name]; !ok { - return errors.New("no id") - } else if col.SQLType.IsText() { - pk = append(pk, v) - } else if col.SQLType.IsNumeric() { - id, err = strconv.ParseInt(v, 10, 64) - if err != nil { - return err - } - pk = append(pk, id) - } else { - return errors.New("not supported primary key type") - } - } - ids = append(ids, pk) - } - } - } - - for _, id := range ids { - session.engine.logger.Debugf("[cache] delete cache obj: %v, %v", tableName, id) - sid, err := id.ToString() - if err != nil { - return err - } - cacher.DelBean(tableName, sid) - } - session.engine.logger.Debugf("[cache] clear cache table: %v", tableName) - cacher.ClearIds(tableName) - return nil -} - -// Delete records, bean's non-empty fields are conditions -func (session *Session) Delete(beans ...interface{}) (int64, error) { - if session.isAutoClose { - defer session.Close() - } - - if session.statement.LastError != nil { - return 0, session.statement.LastError - } - - var ( - condWriter = builder.NewWriter() - err error - bean interface{} - ) - if len(beans) > 0 { - bean = beans[0] - if err = session.statement.SetRefBean(bean); err != nil { - return 0, err - } - - executeBeforeClosures(session, bean) - - if processor, ok := interface{}(bean).(BeforeDeleteProcessor); ok { - processor.BeforeDelete() - } - - if err = session.statement.MergeConds(bean); err != nil { - return 0, err - } - } - - if err = session.statement.Conds().WriteTo(session.statement.QuoteReplacer(condWriter)); err != nil { - return 0, err - } - - pLimitN := session.statement.LimitN - if condWriter.Len() == 0 && (pLimitN == nil || *pLimitN == 0) { - return 0, ErrNeedDeletedCond - } - - tableNameNoQuote := session.statement.TableName() - tableName := session.engine.Quote(tableNameNoQuote) - table := session.statement.RefTable - deleteSQLWriter := builder.NewWriter() - fmt.Fprintf(deleteSQLWriter, "DELETE FROM %v", tableName) - if condWriter.Len() > 0 { - fmt.Fprintf(deleteSQLWriter, " WHERE %v", condWriter.String()) - deleteSQLWriter.Append(condWriter.Args()...) - } - - orderSQLWriter := builder.NewWriter() - if err := session.statement.WriteOrderBy(orderSQLWriter); err != nil { - return 0, err - } - - if pLimitN != nil && *pLimitN > 0 { - limitNValue := *pLimitN - if _, err := fmt.Fprintf(orderSQLWriter, " LIMIT %d", limitNValue); err != nil { - return 0, err - } - } - - orderCondWriter := builder.NewWriter() - if orderSQLWriter.Len() > 0 { - switch session.engine.dialect.URI().DBType { - case schemas.POSTGRES: - if condWriter.Len() > 0 { - fmt.Fprintf(orderCondWriter, " AND ") - } else { - fmt.Fprintf(orderCondWriter, " WHERE ") - } - fmt.Fprintf(orderCondWriter, "ctid IN (SELECT ctid FROM %s%s)", tableName, orderSQLWriter.String()) - orderCondWriter.Append(orderSQLWriter.Args()...) - case schemas.SQLITE: - if condWriter.Len() > 0 { - fmt.Fprintf(orderCondWriter, " AND ") - } else { - fmt.Fprintf(orderCondWriter, " WHERE ") - } - fmt.Fprintf(orderCondWriter, "rowid IN (SELECT rowid FROM %s%s)", tableName, orderSQLWriter.String()) - // TODO: how to handle delete limit on mssql? - case schemas.MSSQL: - return 0, ErrNotImplemented - default: - fmt.Fprint(orderCondWriter, orderSQLWriter.String()) - orderCondWriter.Append(orderSQLWriter.Args()...) - } - } - - realSQLWriter := builder.NewWriter() - argsForCache := make([]interface{}, 0, len(deleteSQLWriter.Args())*2) - copy(argsForCache, deleteSQLWriter.Args()) - argsForCache = append(deleteSQLWriter.Args(), argsForCache...) - if session.statement.GetUnscoped() || table == nil || table.DeletedColumn() == nil { // tag "deleted" is disabled - if err := utils.WriteBuilder(realSQLWriter, deleteSQLWriter, orderCondWriter); err != nil { - return 0, err - } - } else { - deletedColumn := table.DeletedColumn() - if _, err := fmt.Fprintf(realSQLWriter, "UPDATE %v SET %v = ? WHERE %v", - session.engine.Quote(session.statement.TableName()), - session.engine.Quote(deletedColumn.Name), - condWriter.String()); err != nil { - return 0, err - } - val, t, err := session.engine.nowTime(deletedColumn) - if err != nil { - return 0, err - } - realSQLWriter.Append(val) - realSQLWriter.Append(condWriter.Args()...) - - if err := utils.WriteBuilder(realSQLWriter, orderCondWriter); err != nil { - return 0, err - } - - colName := deletedColumn.Name - session.afterClosures = append(session.afterClosures, func(bean interface{}) { - col := table.GetColumn(colName) - setColumnTime(bean, col, t) - }) - } - - if cacher := session.engine.GetCacher(tableNameNoQuote); cacher != nil && session.statement.UseCache { - _ = session.cacheDelete(table, tableNameNoQuote, deleteSQLWriter.String(), argsForCache...) - } - - session.statement.RefTable = table - res, err := session.exec(realSQLWriter.String(), realSQLWriter.Args()...) - if err != nil { - return 0, err - } - - if bean != nil { - // handle after delete processors - if session.isAutoCommit { - for _, closure := range session.afterClosures { - closure(bean) - } - if processor, ok := interface{}(bean).(AfterDeleteProcessor); ok { - processor.AfterDelete() - } - } else { - lenAfterClosures := len(session.afterClosures) - if lenAfterClosures > 0 && len(beans) > 0 { - if value, has := session.afterDeleteBeans[beans[0]]; has && value != nil { - *value = append(*value, session.afterClosures...) - } else { - afterClosures := make([]func(interface{}), lenAfterClosures) - copy(afterClosures, session.afterClosures) - session.afterDeleteBeans[bean] = &afterClosures - } - } else { - if _, ok := interface{}(bean).(AfterDeleteProcessor); ok { - session.afterDeleteBeans[bean] = nil - } - } - } - } - cleanupProcessorsClosures(&session.afterClosures) - // -- - - return res.RowsAffected() -} diff --git a/vendor/xorm.io/xorm/session_exist.go b/vendor/xorm.io/xorm/session_exist.go deleted file mode 100644 index b5e4a655..00000000 --- a/vendor/xorm.io/xorm/session_exist.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2017 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xorm - -// Exist returns true if the record exist otherwise return false -func (session *Session) Exist(bean ...interface{}) (bool, error) { - if session.isAutoClose { - defer session.Close() - } - - if session.statement.LastError != nil { - return false, session.statement.LastError - } - - sqlStr, args, err := session.statement.GenExistSQL(bean...) - if err != nil { - return false, err - } - - rows, err := session.queryRows(sqlStr, args...) - if err != nil { - return false, err - } - defer rows.Close() - - if rows.Next() { - return true, nil - } - return false, rows.Err() -} diff --git a/vendor/xorm.io/xorm/session_find.go b/vendor/xorm.io/xorm/session_find.go deleted file mode 100644 index 2270454b..00000000 --- a/vendor/xorm.io/xorm/session_find.go +++ /dev/null @@ -1,483 +0,0 @@ -// Copyright 2016 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xorm - -import ( - "errors" - "reflect" - - "xorm.io/builder" - "xorm.io/xorm/caches" - "xorm.io/xorm/convert" - "xorm.io/xorm/internal/statements" - "xorm.io/xorm/internal/utils" - "xorm.io/xorm/schemas" -) - -const ( - tpStruct = iota - tpNonStruct -) - -// Find retrieve records from table, condiBeans's non-empty fields -// are conditions. beans could be []Struct, []*Struct, map[int64]Struct -// map[int64]*Struct -func (session *Session) Find(rowsSlicePtr interface{}, condiBean ...interface{}) error { - if session.isAutoClose { - defer session.Close() - } - return session.find(rowsSlicePtr, condiBean...) -} - -// FindAndCount find the results and also return the counts -func (session *Session) FindAndCount(rowsSlicePtr interface{}, condiBean ...interface{}) (int64, error) { - if session.isAutoClose { - defer session.Close() - } - - session.autoResetStatement = false - err := session.find(rowsSlicePtr, condiBean...) - if err != nil { - return 0, err - } - - sliceValue := reflect.Indirect(reflect.ValueOf(rowsSlicePtr)) - if sliceValue.Kind() != reflect.Slice && sliceValue.Kind() != reflect.Map { - return 0, errors.New("needs a pointer to a slice or a map") - } - - sliceElementType := sliceValue.Type().Elem() - if sliceElementType.Kind() == reflect.Ptr { - sliceElementType = sliceElementType.Elem() - } - session.autoResetStatement = true - - if session.statement.SelectStr != "" { - session.statement.SelectStr = "" - } - if len(session.statement.ColumnMap) > 0 && !session.statement.IsDistinct { - session.statement.ColumnMap = []string{} - } - session.statement.ResetOrderBy() - if session.statement.LimitN != nil { - session.statement.LimitN = nil - } - if session.statement.Start > 0 { - session.statement.Start = 0 - } - - // session has stored the conditions so we use `unscoped` to avoid duplicated condition. - if sliceElementType.Kind() == reflect.Struct { - return session.Unscoped().Count(reflect.New(sliceElementType).Interface()) - } - - return session.Unscoped().Count() -} - -func (session *Session) find(rowsSlicePtr interface{}, condiBean ...interface{}) error { - defer session.resetStatement() - if session.statement.LastError != nil { - return session.statement.LastError - } - - sliceValue := reflect.Indirect(reflect.ValueOf(rowsSlicePtr)) - isSlice := sliceValue.Kind() == reflect.Slice - isMap := sliceValue.Kind() == reflect.Map - if !isSlice && !isMap { - return errors.New("needs a pointer to a slice or a map") - } - - sliceElementType := sliceValue.Type().Elem() - - tp := tpStruct - if session.statement.RefTable == nil { - if sliceElementType.Kind() == reflect.Ptr { - if sliceElementType.Elem().Kind() == reflect.Struct { - pv := reflect.New(sliceElementType.Elem()) - if err := session.statement.SetRefValue(pv); err != nil { - return err - } - } else { - tp = tpNonStruct - } - } else if sliceElementType.Kind() == reflect.Struct { - pv := reflect.New(sliceElementType) - if err := session.statement.SetRefValue(pv); err != nil { - return err - } - } else { - tp = tpNonStruct - } - } - - var ( - table = session.statement.RefTable - addedTableName = (len(session.statement.JoinStr) > 0) - autoCond builder.Cond - ) - if tp == tpStruct { - if !session.statement.NoAutoCondition && len(condiBean) > 0 { - condTable, err := session.engine.tagParser.Parse(reflect.ValueOf(condiBean[0])) - if err != nil { - return err - } - autoCond, err = session.statement.BuildConds(condTable, condiBean[0], true, true, false, true, addedTableName) - if err != nil { - return err - } - } else { - if col := table.DeletedColumn(); col != nil && !session.statement.GetUnscoped() { // tag "deleted" is enabled - autoCond = session.statement.CondDeleted(col) - } - } - } - - // if it's a map with Cols but primary key not in column list, we still need the primary key - if isMap && !session.statement.ColumnMap.IsEmpty() { - for _, k := range session.statement.RefTable.PrimaryKeys { - session.statement.ColumnMap.Add(k) - } - } - - sqlStr, args, err := session.statement.GenFindSQL(autoCond) - if err != nil { - return err - } - - if session.statement.ColumnMap.IsEmpty() && session.canCache() { - if cacher := session.engine.GetCacher(session.statement.TableName()); cacher != nil && - !session.statement.IsDistinct && - !session.statement.GetUnscoped() { - err = session.cacheFind(sliceElementType, sqlStr, rowsSlicePtr, args...) - if err != ErrCacheFailed { - return err - } - session.engine.logger.Warnf("Cache Find Failed") - } - } - - return session.noCacheFind(table, sliceValue, sqlStr, args...) -} - -func (session *Session) noCacheFind(table *schemas.Table, containerValue reflect.Value, sqlStr string, args ...interface{}) error { - elemType := containerValue.Type().Elem() - var isPointer bool - if elemType.Kind() == reflect.Ptr { - isPointer = true - elemType = elemType.Elem() - } - if elemType.Kind() == reflect.Ptr { - return errors.New("pointer to pointer is not supported") - } - - rows, err := session.queryRows(sqlStr, args...) - if err != nil { - return err - } - defer rows.Close() - - fields, err := rows.Columns() - if err != nil { - return err - } - - types, err := rows.ColumnTypes() - if err != nil { - return err - } - - newElemFunc := func(fields []string) reflect.Value { - return utils.New(elemType, len(fields), len(fields)) - } - - var containerValueSetFunc func(*reflect.Value, schemas.PK) error - - if containerValue.Kind() == reflect.Slice { - containerValueSetFunc = func(newValue *reflect.Value, pk schemas.PK) error { - if isPointer { - containerValue.Set(reflect.Append(containerValue, newValue.Elem().Addr())) - } else { - containerValue.Set(reflect.Append(containerValue, newValue.Elem())) - } - return nil - } - } else { - keyType := containerValue.Type().Key() - if len(table.PrimaryKeys) == 0 { - return errors.New("don't support multiple primary key's map has non-slice key type") - } - if len(table.PrimaryKeys) > 1 && keyType.Kind() != reflect.Slice { - return errors.New("don't support multiple primary key's map has non-slice key type") - } - - containerValueSetFunc = func(newValue *reflect.Value, pk schemas.PK) error { - keyValue := reflect.New(keyType) - cols := table.PKColumns() - if len(cols) == 1 { - if err := convert.AssignValue(keyValue, pk[0]); err != nil { - return err - } - } else { - keyValue.Set(reflect.ValueOf(&pk)) - } - - if isPointer { - containerValue.SetMapIndex(keyValue.Elem(), newValue.Elem().Addr()) - } else { - containerValue.SetMapIndex(keyValue.Elem(), newValue.Elem()) - } - return nil - } - } - - if elemType.Kind() == reflect.Struct { - newValue := newElemFunc(fields) - tb, err := session.engine.tagParser.ParseWithCache(newValue) - if err != nil { - return err - } - err = session.rows2Beans(rows, fields, types, tb, newElemFunc, containerValueSetFunc) - rows.Close() - if err != nil { - return err - } - return session.executeProcessors() - } - - for rows.Next() { - newValue := newElemFunc(fields) - bean := newValue.Interface() - - switch elemType.Kind() { - case reflect.Slice: - err = session.getSlice(rows, types, fields, bean) - case reflect.Map: - err = session.getMap(rows, types, fields, bean) - default: - err = rows.Scan(bean) - } - if err != nil { - return err - } - - if err := containerValueSetFunc(&newValue, nil); err != nil { - return err - } - } - return rows.Err() -} - -func (session *Session) cacheFind(t reflect.Type, sqlStr string, rowsSlicePtr interface{}, args ...interface{}) (err error) { - if !session.canCache() || - utils.IndexNoCase(sqlStr, "having") != -1 || - utils.IndexNoCase(sqlStr, "group by") != -1 { - return ErrCacheFailed - } - - tableName := session.statement.TableName() - cacher := session.engine.cacherMgr.GetCacher(tableName) - if cacher == nil { - return nil - } - - for _, filter := range session.engine.dialect.Filters() { - sqlStr = filter.Do(sqlStr) - } - - newsql := session.statement.ConvertIDSQL(sqlStr) - if newsql == "" { - return ErrCacheFailed - } - - table := session.statement.RefTable - ids, err := caches.GetCacheSql(cacher, tableName, newsql, args) - if err != nil { - rows, err := session.queryRows(newsql, args...) - if err != nil { - return err - } - defer rows.Close() - - var i int - ids = make([]schemas.PK, 0) - for rows.Next() { - i++ - if i > 500 { - session.engine.logger.Debugf("[cacheFind] ids length > 500, no cache") - return ErrCacheFailed - } - res := make([]string, len(table.PrimaryKeys)) - err = rows.ScanSlice(&res) - if err != nil { - return err - } - var pk schemas.PK = make([]interface{}, len(table.PrimaryKeys)) - for i, col := range table.PKColumns() { - pk[i], err = col.ConvertID(res[i]) - if err != nil { - return err - } - } - - ids = append(ids, pk) - } - if rows.Err() != nil { - return rows.Err() - } - - session.engine.logger.Debugf("[cache] cache sql: %v, %v, %v, %v, %v", ids, tableName, sqlStr, newsql, args) - err = caches.PutCacheSql(cacher, ids, tableName, newsql, args) - if err != nil { - return err - } - } else { - session.engine.logger.Debugf("[cache] cache hit sql: %v, %v, %v, %v", tableName, sqlStr, newsql, args) - } - - sliceValue := reflect.Indirect(reflect.ValueOf(rowsSlicePtr)) - - ididxes := make(map[string]int) - var ides []schemas.PK - temps := make([]interface{}, len(ids)) - - for idx, id := range ids { - sid, err := id.ToString() - if err != nil { - return err - } - bean := cacher.GetBean(tableName, sid) - - // fix issue #894 - isHit := func() (ht bool) { - if bean == nil { - ht = false - return - } - ckb := reflect.ValueOf(bean).Elem().Type() - ht = ckb == t - if !ht && t.Kind() == reflect.Ptr { - ht = t.Elem() == ckb - } - return - } - if !isHit() { - ides = append(ides, id) - ididxes[sid] = idx - } else { - session.engine.logger.Debugf("[cache] cache hit bean: %v, %v, %v", tableName, id, bean) - - pk, err := table.IDOfV(reflect.ValueOf(bean)) - if err != nil { - return err - } - - xid, err := pk.ToString() - if err != nil { - return err - } - - if sid != xid { - session.engine.logger.Errorf("[cache] error cache: %v, %v, %v", xid, sid, bean) - return ErrCacheFailed - } - temps[idx] = bean - } - } - - if len(ides) > 0 { - slices := reflect.New(reflect.SliceOf(t)) - beans := slices.Interface() - - statement := session.statement - session.statement = statements.NewStatement( - session.engine.dialect, - session.engine.tagParser, - session.engine.DatabaseTZ, - ) - if len(table.PrimaryKeys) == 1 { - ff := make([]interface{}, 0, len(ides)) - for _, ie := range ides { - ff = append(ff, ie[0]) - } - - session.In("`"+table.PrimaryKeys[0]+"`", ff...) - } else { - for _, ie := range ides { - cond := builder.NewCond() - for i, name := range table.PrimaryKeys { - cond = cond.And(builder.Eq{"`" + name + "`": ie[i]}) - } - session.Or(cond) - } - } - - err = session.NoCache().Table(tableName).find(beans) - if err != nil { - return err - } - session.statement = statement - - vs := reflect.Indirect(reflect.ValueOf(beans)) - for i := 0; i < vs.Len(); i++ { - rv := vs.Index(i) - if rv.Kind() != reflect.Ptr { - rv = rv.Addr() - } - id, err := table.IDOfV(rv) - if err != nil { - return err - } - sid, err := id.ToString() - if err != nil { - return err - } - - bean := rv.Interface() - temps[ididxes[sid]] = bean - session.engine.logger.Debugf("[cache] cache bean: %v, %v, %v, %v", tableName, id, bean, temps) - cacher.PutBean(tableName, sid, bean) - } - } - - for j := 0; j < len(temps); j++ { - bean := temps[j] - if bean == nil { - session.engine.logger.Warnf("[cache] cache no hit: %v, %v, %v", tableName, ids[j], temps) - // return errors.New("cache error") // !nashtsai! no need to return error, but continue instead - continue - } - if sliceValue.Kind() == reflect.Slice { - if t.Kind() == reflect.Ptr { - sliceValue.Set(reflect.Append(sliceValue, reflect.ValueOf(bean))) - } else { - sliceValue.Set(reflect.Append(sliceValue, reflect.Indirect(reflect.ValueOf(bean)))) - } - } else if sliceValue.Kind() == reflect.Map { - key := ids[j] - keyType := sliceValue.Type().Key() - keyValue := reflect.New(keyType) - var ikey interface{} - if len(key) == 1 { - if err := convert.AssignValue(keyValue, key[0]); err != nil { - return err - } - ikey = keyValue.Elem().Interface() - } else { - if keyType.Kind() != reflect.Slice { - return errors.New("table have multiple primary keys, key is not schemas.PK or slice") - } - ikey = key - } - - if t.Kind() == reflect.Ptr { - sliceValue.SetMapIndex(reflect.ValueOf(ikey), reflect.ValueOf(bean)) - } else { - sliceValue.SetMapIndex(reflect.ValueOf(ikey), reflect.Indirect(reflect.ValueOf(bean))) - } - } - } - - return nil -} diff --git a/vendor/xorm.io/xorm/session_get.go b/vendor/xorm.io/xorm/session_get.go deleted file mode 100644 index 9bb92a8b..00000000 --- a/vendor/xorm.io/xorm/session_get.go +++ /dev/null @@ -1,366 +0,0 @@ -// Copyright 2016 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xorm - -import ( - "database/sql" - "errors" - "fmt" - "math/big" - "reflect" - "strconv" - "time" - - "xorm.io/xorm/caches" - "xorm.io/xorm/convert" - "xorm.io/xorm/core" - "xorm.io/xorm/internal/utils" - "xorm.io/xorm/schemas" -) - -var ( - // ErrObjectIsNil return error of object is nil - ErrObjectIsNil = errors.New("object should not be nil") -) - -// Get retrieve one record from database, bean's non-empty fields -// will be as conditions -func (session *Session) Get(beans ...interface{}) (bool, error) { - if session.isAutoClose { - defer session.Close() - } - return session.get(beans...) -} - -func isPtrOfTime(v interface{}) bool { - if _, ok := v.(*time.Time); ok { - return true - } - - el := reflect.ValueOf(v).Elem() - if el.Kind() != reflect.Struct { - return false - } - - return el.Type().ConvertibleTo(schemas.TimeType) -} - -func (session *Session) get(beans ...interface{}) (bool, error) { - defer session.resetStatement() - - if session.statement.LastError != nil { - return false, session.statement.LastError - } - if len(beans) == 0 { - return false, errors.New("needs at least one parameter for get") - } - - beanValue := reflect.ValueOf(beans[0]) - if beanValue.Kind() != reflect.Ptr { - return false, errors.New("needs a pointer to a value") - } else if beanValue.Elem().Kind() == reflect.Ptr { - return false, errors.New("a pointer to a pointer is not allowed") - } else if beanValue.IsNil() { - return false, ErrObjectIsNil - } - - var isStruct = beanValue.Elem().Kind() == reflect.Struct && !isPtrOfTime(beans[0]) - if isStruct { - if err := session.statement.SetRefBean(beans[0]); err != nil { - return false, err - } - } - - var sqlStr string - var args []interface{} - var err error - - if session.statement.RawSQL == "" { - if len(session.statement.TableName()) == 0 { - return false, ErrTableNotFound - } - session.statement.Limit(1) - sqlStr, args, err = session.statement.GenGetSQL(beans[0]) - if err != nil { - return false, err - } - } else { - sqlStr = session.statement.GenRawSQL() - args = session.statement.RawParams - } - - table := session.statement.RefTable - - if session.statement.ColumnMap.IsEmpty() && session.canCache() && isStruct { - if cacher := session.engine.GetCacher(session.statement.TableName()); cacher != nil && - !session.statement.GetUnscoped() { - has, err := session.cacheGet(beans[0], sqlStr, args...) - if err != ErrCacheFailed { - return has, err - } - } - } - - context := session.statement.Context - if context != nil && isStruct { - res := context.Get(fmt.Sprintf("%v-%v", sqlStr, args)) - if res != nil { - session.engine.logger.Debugf("hit context cache: %s", sqlStr) - - structValue := reflect.Indirect(reflect.ValueOf(beans[0])) - structValue.Set(reflect.Indirect(reflect.ValueOf(res))) - session.lastSQL = "" - session.lastSQLArgs = nil - return true, nil - } - } - - has, err := session.nocacheGet(beanValue.Elem().Kind(), table, beans, sqlStr, args...) - if err != nil || !has { - return has, err - } - - if context != nil && isStruct { - context.Put(fmt.Sprintf("%v-%v", sqlStr, args), beans[0]) - } - - return true, nil -} - -func isScannableStruct(bean interface{}, typeLen int) bool { - switch bean.(type) { - case *time.Time: - return false - case sql.Scanner: - return false - case convert.Conversion: - return typeLen > 1 - case *big.Float: - return false - } - return true -} - -func (session *Session) nocacheGet(beanKind reflect.Kind, table *schemas.Table, beans []interface{}, sqlStr string, args ...interface{}) (bool, error) { - rows, err := session.queryRows(sqlStr, args...) - if err != nil { - return false, err - } - defer rows.Close() - - if !rows.Next() { - return false, rows.Err() - } - - // WARN: Alougth rows return true, but we may also return error. - types, err := rows.ColumnTypes() - if err != nil { - return true, err - } - fields, err := rows.Columns() - if err != nil { - return true, err - } - - if err := session.scan(rows, table, beanKind, beans, types, fields); err != nil { - return true, err - } - rows.Close() - - return true, session.executeProcessors() -} - -func (session *Session) scan(rows *core.Rows, table *schemas.Table, firstBeanKind reflect.Kind, beans []interface{}, types []*sql.ColumnType, fields []string) error { - if len(beans) == 1 { - bean := beans[0] - switch firstBeanKind { - case reflect.Struct: - if !isScannableStruct(bean, len(types)) { - break - } - scanResults, err := session.row2Slice(rows, fields, types, bean) - if err != nil { - return err - } - - dataStruct := utils.ReflectValue(bean) - _, err = session.slice2Bean(scanResults, fields, bean, &dataStruct, table) - return err - case reflect.Slice: - return session.getSlice(rows, types, fields, bean) - case reflect.Map: - return session.getMap(rows, types, fields, bean) - } - } - - if len(beans) != len(types) { - return fmt.Errorf("expected columns %d, but only %d variables", len(types), len(beans)) - } - - return session.engine.scan(rows, fields, types, beans...) -} - -func (session *Session) getSlice(rows *core.Rows, types []*sql.ColumnType, fields []string, bean interface{}) error { - switch t := bean.(type) { - case *[]string: - res, err := session.engine.scanStringInterface(rows, fields, types) - if err != nil { - return err - } - - var needAppend = len(*t) == 0 // both support slice is empty or has been initlized - for i, r := range res { - if needAppend { - *t = append(*t, r.(*sql.NullString).String) - } else { - (*t)[i] = r.(*sql.NullString).String - } - } - return nil - case *[]interface{}: - scanResults, err := session.engine.scanInterfaces(rows, fields, types) - if err != nil { - return err - } - var needAppend = len(*t) == 0 - for ii := range fields { - s, err := convert.Interface2Interface(session.engine.DatabaseTZ, scanResults[ii]) - if err != nil { - return err - } - if needAppend { - *t = append(*t, s) - } else { - (*t)[ii] = s - } - } - return nil - default: - return fmt.Errorf("unspoorted slice type: %t", t) - } -} - -func (session *Session) getMap(rows *core.Rows, types []*sql.ColumnType, fields []string, bean interface{}) error { - switch t := bean.(type) { - case *map[string]string: - scanResults, err := session.engine.scanStringInterface(rows, fields, types) - if err != nil { - return err - } - for ii, key := range fields { - (*t)[key] = scanResults[ii].(*sql.NullString).String - } - return nil - case *map[string]interface{}: - scanResults, err := session.engine.scanInterfaces(rows, fields, types) - if err != nil { - return err - } - for ii, key := range fields { - s, err := convert.Interface2Interface(session.engine.DatabaseTZ, scanResults[ii]) - if err != nil { - return err - } - (*t)[key] = s - } - return nil - default: - return fmt.Errorf("unspoorted map type: %t", t) - } -} - -func (session *Session) cacheGet(bean interface{}, sqlStr string, args ...interface{}) (has bool, err error) { - // if has no reftable, then don't use cache currently - if !session.canCache() { - return false, ErrCacheFailed - } - - for _, filter := range session.engine.dialect.Filters() { - sqlStr = filter.Do(sqlStr) - } - newsql := session.statement.ConvertIDSQL(sqlStr) - if newsql == "" { - return false, ErrCacheFailed - } - - tableName := session.statement.TableName() - cacher := session.engine.cacherMgr.GetCacher(tableName) - - session.engine.logger.Debugf("[cache] Get SQL: %s, %v", newsql, args) - table := session.statement.RefTable - ids, err := caches.GetCacheSql(cacher, tableName, newsql, args) - if err != nil { - var res = make([]string, len(table.PrimaryKeys)) - rows, err := session.NoCache().queryRows(newsql, args...) - if err != nil { - return false, err - } - defer rows.Close() - - if rows.Next() { - err = rows.ScanSlice(&res) - if err != nil { - return true, err - } - } else { - if rows.Err() != nil { - return false, rows.Err() - } - return false, ErrCacheFailed - } - - var pk schemas.PK = make([]interface{}, len(table.PrimaryKeys)) - for i, col := range table.PKColumns() { - if col.SQLType.IsText() { - pk[i] = res[i] - } else if col.SQLType.IsNumeric() { - n, err := strconv.ParseInt(res[i], 10, 64) - if err != nil { - return false, err - } - pk[i] = n - } else { - return false, errors.New("unsupported") - } - } - - ids = []schemas.PK{pk} - session.engine.logger.Debugf("[cache] cache ids: %s, %v", newsql, ids) - err = caches.PutCacheSql(cacher, ids, tableName, newsql, args) - if err != nil { - return false, err - } - } else { - session.engine.logger.Debugf("[cache] cache hit: %s, %v", newsql, ids) - } - - if len(ids) > 0 { - structValue := reflect.Indirect(reflect.ValueOf(bean)) - id := ids[0] - session.engine.logger.Debugf("[cache] get bean: %s, %v", tableName, id) - sid, err := id.ToString() - if err != nil { - return false, err - } - cacheBean := cacher.GetBean(tableName, sid) - if cacheBean == nil { - cacheBean = bean - has, err = session.nocacheGet(reflect.Struct, table, []interface{}{cacheBean}, sqlStr, args...) - if err != nil || !has { - return has, err - } - - session.engine.logger.Debugf("[cache] cache bean: %s, %v, %v", tableName, id, cacheBean) - cacher.PutBean(tableName, sid, cacheBean) - } else { - session.engine.logger.Debugf("[cache] cache hit: %s, %v, %v", tableName, id, cacheBean) - has = true - } - structValue.Set(reflect.Indirect(reflect.ValueOf(cacheBean))) - - return has, nil - } - return false, nil -} diff --git a/vendor/xorm.io/xorm/session_insert.go b/vendor/xorm.io/xorm/session_insert.go deleted file mode 100644 index fc025613..00000000 --- a/vendor/xorm.io/xorm/session_insert.go +++ /dev/null @@ -1,709 +0,0 @@ -// Copyright 2016 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xorm - -import ( - "errors" - "fmt" - "reflect" - "sort" - "strings" - "time" - - "xorm.io/xorm/convert" - "xorm.io/xorm/dialects" - "xorm.io/xorm/internal/utils" - "xorm.io/xorm/schemas" -) - -// ErrNoElementsOnSlice represents an error there is no element when insert -var ErrNoElementsOnSlice = errors.New("no element on slice when insert") - -// Insert insert one or more beans -func (session *Session) Insert(beans ...interface{}) (int64, error) { - var affected int64 - var err error - - if session.isAutoClose { - defer session.Close() - } - - session.autoResetStatement = false - defer func() { - session.autoResetStatement = true - session.resetStatement() - }() - - for _, bean := range beans { - var cnt int64 - var err error - switch v := bean.(type) { - case map[string]interface{}: - cnt, err = session.insertMapInterface(v) - case []map[string]interface{}: - cnt, err = session.insertMultipleMapInterface(v) - case map[string]string: - cnt, err = session.insertMapString(v) - case []map[string]string: - cnt, err = session.insertMultipleMapString(v) - default: - sliceValue := reflect.Indirect(reflect.ValueOf(bean)) - if sliceValue.Kind() == reflect.Slice { - cnt, err = session.insertMultipleStruct(bean) - } else { - cnt, err = session.insertStruct(bean) - } - } - if err != nil { - return affected, err - } - affected += cnt - } - - return affected, err -} - -func (session *Session) insertMultipleStruct(rowsSlicePtr interface{}) (int64, error) { - sliceValue := reflect.Indirect(reflect.ValueOf(rowsSlicePtr)) - if sliceValue.Kind() != reflect.Slice { - return 0, errors.New("needs a pointer to a slice") - } - - if sliceValue.Len() <= 0 { - return 0, ErrNoElementsOnSlice - } - - if err := session.statement.SetRefBean(sliceValue.Index(0).Interface()); err != nil { - return 0, err - } - - tableName := session.statement.TableName() - if len(tableName) == 0 { - return 0, ErrTableNotFound - } - - var ( - table = session.statement.RefTable - size = sliceValue.Len() - colNames []string - colMultiPlaces []string - args []interface{} - ) - - for i := 0; i < size; i++ { - v := sliceValue.Index(i) - var vv reflect.Value - switch v.Kind() { - case reflect.Interface: - vv = reflect.Indirect(v.Elem()) - default: - vv = reflect.Indirect(v) - } - elemValue := v.Interface() - var colPlaces []string - - // handle BeforeInsertProcessor - // !nashtsai! does user expect it's same slice to passed closure when using Before()/After() when insert multi?? - for _, closure := range session.beforeClosures { - closure(elemValue) - } - - if processor, ok := interface{}(elemValue).(BeforeInsertProcessor); ok { - processor.BeforeInsert() - } - // -- - - for _, col := range table.Columns() { - ptrFieldValue, err := col.ValueOfV(&vv) - if err != nil { - return 0, err - } - fieldValue := *ptrFieldValue - if col.IsAutoIncrement && utils.IsZero(fieldValue.Interface()) { - if session.engine.dialect.Features().AutoincrMode == dialects.SequenceAutoincrMode { - if i == 0 { - colNames = append(colNames, col.Name) - } - colPlaces = append(colPlaces, utils.SeqName(tableName)+".nextval") - } - continue - } - if col.MapType == schemas.ONLYFROMDB { - continue - } - if col.IsDeleted { - continue - } - if session.statement.OmitColumnMap.Contain(col.Name) { - continue - } - if len(session.statement.ColumnMap) > 0 && !session.statement.ColumnMap.Contain(col.Name) { - continue - } - // !satorunooshie! set fieldValue as nil when column is nullable and zero-value - if _, ok := getFlagForColumn(session.statement.NullableMap, col); ok { - if col.Nullable && utils.IsValueZero(fieldValue) { - var nilValue *int - fieldValue = reflect.ValueOf(nilValue) - } - } - if (col.IsCreated || col.IsUpdated) && session.statement.UseAutoTime { - val, t, err := session.engine.nowTime(col) - if err != nil { - return 0, err - } - args = append(args, val) - - var colName = col.Name - session.afterClosures = append(session.afterClosures, func(bean interface{}) { - col := table.GetColumn(colName) - setColumnTime(bean, col, t) - }) - } else if col.IsVersion && session.statement.CheckVersion { - args = append(args, 1) - var colName = col.Name - session.afterClosures = append(session.afterClosures, func(bean interface{}) { - col := table.GetColumn(colName) - setColumnInt(bean, col, 1) - }) - } else { - arg, err := session.statement.Value2Interface(col, fieldValue) - if err != nil { - return 0, err - } - args = append(args, arg) - } - - if i == 0 { - colNames = append(colNames, col.Name) - } - colPlaces = append(colPlaces, "?") - } - - colMultiPlaces = append(colMultiPlaces, strings.Join(colPlaces, ", ")) - } - cleanupProcessorsClosures(&session.beforeClosures) - - quoter := session.engine.dialect.Quoter() - var sql string - colStr := quoter.Join(colNames, ",") - if session.engine.dialect.URI().DBType == schemas.ORACLE { - temp := fmt.Sprintf(") INTO %s (%v) VALUES (", - quoter.Quote(tableName), - colStr) - sql = fmt.Sprintf("INSERT ALL INTO %s (%v) VALUES (%v) SELECT 1 FROM DUAL", - quoter.Quote(tableName), - colStr, - strings.Join(colMultiPlaces, temp)) - } else { - sql = fmt.Sprintf("INSERT INTO %s (%v) VALUES (%v)", - quoter.Quote(tableName), - colStr, - strings.Join(colMultiPlaces, "),(")) - } - res, err := session.exec(sql, args...) - if err != nil { - return 0, err - } - - _ = session.cacheInsert(tableName) - - lenAfterClosures := len(session.afterClosures) - for i := 0; i < size; i++ { - elemValue := reflect.Indirect(sliceValue.Index(i)).Addr().Interface() - - // handle AfterInsertProcessor - if session.isAutoCommit { - // !nashtsai! does user expect it's same slice to passed closure when using Before()/After() when insert multi?? - for _, closure := range session.afterClosures { - closure(elemValue) - } - if processor, ok := elemValue.(AfterInsertProcessor); ok { - processor.AfterInsert() - } - } else { - if lenAfterClosures > 0 { - if value, has := session.afterInsertBeans[elemValue]; has && value != nil { - *value = append(*value, session.afterClosures...) - } else { - afterClosures := make([]func(interface{}), lenAfterClosures) - copy(afterClosures, session.afterClosures) - session.afterInsertBeans[elemValue] = &afterClosures - } - } else { - if _, ok := elemValue.(AfterInsertProcessor); ok { - session.afterInsertBeans[elemValue] = nil - } - } - } - } - - cleanupProcessorsClosures(&session.afterClosures) - return res.RowsAffected() -} - -// InsertMulti insert multiple records -func (session *Session) InsertMulti(rowsSlicePtr interface{}) (int64, error) { - if session.isAutoClose { - defer session.Close() - } - - sliceValue := reflect.Indirect(reflect.ValueOf(rowsSlicePtr)) - if sliceValue.Kind() != reflect.Slice { - return 0, ErrPtrSliceType - } - - return session.insertMultipleStruct(rowsSlicePtr) -} - -func (session *Session) insertStruct(bean interface{}) (int64, error) { - if err := session.statement.SetRefBean(bean); err != nil { - return 0, err - } - if len(session.statement.TableName()) == 0 { - return 0, ErrTableNotFound - } - - // handle BeforeInsertProcessor - for _, closure := range session.beforeClosures { - closure(bean) - } - cleanupProcessorsClosures(&session.beforeClosures) // cleanup after used - - if processor, ok := interface{}(bean).(BeforeInsertProcessor); ok { - processor.BeforeInsert() - } - - var tableName = session.statement.TableName() - table := session.statement.RefTable - - colNames, args, err := session.genInsertColumns(bean) - if err != nil { - return 0, err - } - - sqlStr, args, err := session.statement.GenInsertSQL(colNames, args) - if err != nil { - return 0, err - } - sqlStr = session.engine.dialect.Quoter().Replace(sqlStr) - - handleAfterInsertProcessorFunc := func(bean interface{}) { - if session.isAutoCommit { - for _, closure := range session.afterClosures { - closure(bean) - } - if processor, ok := interface{}(bean).(AfterInsertProcessor); ok { - processor.AfterInsert() - } - } else { - lenAfterClosures := len(session.afterClosures) - if lenAfterClosures > 0 { - if value, has := session.afterInsertBeans[bean]; has && value != nil { - *value = append(*value, session.afterClosures...) - } else { - afterClosures := make([]func(interface{}), lenAfterClosures) - copy(afterClosures, session.afterClosures) - session.afterInsertBeans[bean] = &afterClosures - } - } else { - if _, ok := interface{}(bean).(AfterInsertProcessor); ok { - session.afterInsertBeans[bean] = nil - } - } - } - cleanupProcessorsClosures(&session.afterClosures) // cleanup after used - } - - // if there is auto increment column and driver don't support return it - if len(table.AutoIncrement) > 0 && !session.engine.driver.Features().SupportReturnInsertedID { - var sql string - var newArgs []interface{} - var needCommit bool - var id int64 - if session.engine.dialect.URI().DBType == schemas.ORACLE || session.engine.dialect.URI().DBType == schemas.DAMENG { - if session.isAutoCommit { // if it's not in transaction - if err := session.Begin(); err != nil { - return 0, err - } - needCommit = true - } - _, err := session.exec(sqlStr, args...) - if err != nil { - return 0, err - } - i := utils.IndexSlice(colNames, table.AutoIncrement) - if i > -1 { - id, err = convert.AsInt64(args[i]) - if err != nil { - return 0, err - } - } else { - sql = fmt.Sprintf("select %s.currval from dual", utils.SeqName(tableName)) - } - } else { - sql = sqlStr - newArgs = args - } - - if id == 0 { - err := session.queryRow(sql, newArgs...).Scan(&id) - if err != nil { - return 0, err - } - if needCommit { - if err := session.Commit(); err != nil { - return 0, err - } - } - if id == 0 { - return 0, errors.New("insert successfully but not returned id") - } - } - - defer handleAfterInsertProcessorFunc(bean) - - _ = session.cacheInsert(tableName) - - if table.Version != "" && session.statement.CheckVersion { - verValue, err := table.VersionColumn().ValueOf(bean) - if err != nil { - session.engine.logger.Errorf("%v", err) - } else if verValue.IsValid() && verValue.CanSet() { - session.incrVersionFieldValue(verValue) - } - } - - aiValue, err := table.AutoIncrColumn().ValueOf(bean) - if err != nil { - session.engine.logger.Errorf("%v", err) - } - - if aiValue == nil || !aiValue.IsValid() || !aiValue.CanSet() { - return 1, nil - } - - return 1, convert.AssignValue(*aiValue, id) - } - - res, err := session.exec(sqlStr, args...) - if err != nil { - return 0, err - } - - defer handleAfterInsertProcessorFunc(bean) - - _ = session.cacheInsert(tableName) - - if table.Version != "" && session.statement.CheckVersion { - verValue, err := table.VersionColumn().ValueOf(bean) - if err != nil { - session.engine.logger.Errorf("%v", err) - } else if verValue.IsValid() && verValue.CanSet() { - session.incrVersionFieldValue(verValue) - } - } - - if table.AutoIncrement == "" { - return res.RowsAffected() - } - - var id int64 - id, err = res.LastInsertId() - if err != nil || id <= 0 { - return res.RowsAffected() - } - - aiValue, err := table.AutoIncrColumn().ValueOf(bean) - if err != nil { - session.engine.logger.Errorf("%v", err) - } - - if aiValue == nil || !aiValue.IsValid() || !aiValue.CanSet() { - return res.RowsAffected() - } - - if err := convert.AssignValue(*aiValue, id); err != nil { - return 0, err - } - - return res.RowsAffected() -} - -// InsertOne insert only one struct into database as a record. -// The in parameter bean must a struct or a point to struct. The return -// parameter is inserted and error -// Deprecated: Please use Insert directly -func (session *Session) InsertOne(bean interface{}) (int64, error) { - if session.isAutoClose { - defer session.Close() - } - - return session.insertStruct(bean) -} - -func (session *Session) cacheInsert(table string) error { - if !session.statement.UseCache { - return nil - } - cacher := session.engine.cacherMgr.GetCacher(table) - if cacher == nil { - return nil - } - session.engine.logger.Debugf("[cache] clear SQL: %v", table) - cacher.ClearIds(table) - return nil -} - -// genInsertColumns generates insert needed columns -func (session *Session) genInsertColumns(bean interface{}) ([]string, []interface{}, error) { - table := session.statement.RefTable - colNames := make([]string, 0, len(table.ColumnsSeq())) - args := make([]interface{}, 0, len(table.ColumnsSeq())) - - for _, col := range table.Columns() { - if col.MapType == schemas.ONLYFROMDB { - continue - } - if session.statement.OmitColumnMap.Contain(col.Name) { - continue - } - if len(session.statement.ColumnMap) > 0 && !session.statement.ColumnMap.Contain(col.Name) { - continue - } - if session.statement.IncrColumns.IsColExist(col.Name) { - continue - } else if session.statement.DecrColumns.IsColExist(col.Name) { - continue - } else if session.statement.ExprColumns.IsColExist(col.Name) { - continue - } - - if col.IsDeleted { - arg, err := dialects.FormatColumnTime(session.engine.dialect, session.engine.DatabaseTZ, col, time.Time{}) - if err != nil { - return nil, nil, err - } - args = append(args, arg) - colNames = append(colNames, col.Name) - continue - } - - fieldValuePtr, err := col.ValueOf(bean) - if err != nil { - return nil, nil, err - } - fieldValue := *fieldValuePtr - - if col.IsAutoIncrement && utils.IsValueZero(fieldValue) { - continue - } - - // !evalphobia! set fieldValue as nil when column is nullable and zero-value - if _, ok := getFlagForColumn(session.statement.NullableMap, col); ok { - if col.Nullable && utils.IsValueZero(fieldValue) { - var nilValue *int - fieldValue = reflect.ValueOf(nilValue) - } - } - - if (col.IsCreated || col.IsUpdated) && session.statement.UseAutoTime /*&& isZero(fieldValue.Interface())*/ { - // if time is non-empty, then set to auto time - val, t, err := session.engine.nowTime(col) - if err != nil { - return nil, nil, err - } - args = append(args, val) - - var colName = col.Name - session.afterClosures = append(session.afterClosures, func(bean interface{}) { - col := table.GetColumn(colName) - setColumnTime(bean, col, t) - }) - } else if col.IsVersion && session.statement.CheckVersion { - args = append(args, 1) - } else { - arg, err := session.statement.Value2Interface(col, fieldValue) - if err != nil { - return colNames, args, err - } - args = append(args, arg) - } - - colNames = append(colNames, col.Name) - } - return colNames, args, nil -} - -func (session *Session) insertMapInterface(m map[string]interface{}) (int64, error) { - if len(m) == 0 { - return 0, ErrParamsType - } - - tableName := session.statement.TableName() - if len(tableName) == 0 { - return 0, ErrTableNotFound - } - - var columns = make([]string, 0, len(m)) - exprs := session.statement.ExprColumns - for k := range m { - if !exprs.IsColExist(k) { - columns = append(columns, k) - } - } - sort.Strings(columns) - - var args = make([]interface{}, 0, len(m)) - for _, colName := range columns { - args = append(args, m[colName]) - } - - return session.insertMap(columns, args) -} - -func (session *Session) insertMultipleMapInterface(maps []map[string]interface{}) (int64, error) { - if len(maps) == 0 { - return 0, ErrNoElementsOnSlice - } - - tableName := session.statement.TableName() - if len(tableName) == 0 { - return 0, ErrTableNotFound - } - - var columns = make([]string, 0, len(maps[0])) - exprs := session.statement.ExprColumns - for k := range maps[0] { - if !exprs.IsColExist(k) { - columns = append(columns, k) - } - } - sort.Strings(columns) - - var argss = make([][]interface{}, 0, len(maps)) - for _, m := range maps { - var args = make([]interface{}, 0, len(m)) - for _, colName := range columns { - args = append(args, m[colName]) - } - argss = append(argss, args) - } - - return session.insertMultipleMap(columns, argss) -} - -func (session *Session) insertMapString(m map[string]string) (int64, error) { - if len(m) == 0 { - return 0, ErrParamsType - } - - tableName := session.statement.TableName() - if len(tableName) == 0 { - return 0, ErrTableNotFound - } - - var columns = make([]string, 0, len(m)) - exprs := session.statement.ExprColumns - for k := range m { - if !exprs.IsColExist(k) { - columns = append(columns, k) - } - } - - sort.Strings(columns) - - var args = make([]interface{}, 0, len(m)) - for _, colName := range columns { - args = append(args, m[colName]) - } - - return session.insertMap(columns, args) -} - -func (session *Session) insertMultipleMapString(maps []map[string]string) (int64, error) { - if len(maps) == 0 { - return 0, ErrNoElementsOnSlice - } - - tableName := session.statement.TableName() - if len(tableName) == 0 { - return 0, ErrTableNotFound - } - - var columns = make([]string, 0, len(maps[0])) - exprs := session.statement.ExprColumns - for k := range maps[0] { - if !exprs.IsColExist(k) { - columns = append(columns, k) - } - } - sort.Strings(columns) - - var argss = make([][]interface{}, 0, len(maps)) - for _, m := range maps { - var args = make([]interface{}, 0, len(m)) - for _, colName := range columns { - args = append(args, m[colName]) - } - argss = append(argss, args) - } - - return session.insertMultipleMap(columns, argss) -} - -func (session *Session) insertMap(columns []string, args []interface{}) (int64, error) { - tableName := session.statement.TableName() - if len(tableName) == 0 { - return 0, ErrTableNotFound - } - - sql, args, err := session.statement.GenInsertMapSQL(columns, args) - if err != nil { - return 0, err - } - sql = session.engine.dialect.Quoter().Replace(sql) - - if err := session.cacheInsert(tableName); err != nil { - return 0, err - } - - res, err := session.exec(sql, args...) - if err != nil { - return 0, err - } - affected, err := res.RowsAffected() - if err != nil { - return 0, err - } - return affected, nil -} - -func (session *Session) insertMultipleMap(columns []string, argss [][]interface{}) (int64, error) { - tableName := session.statement.TableName() - if len(tableName) == 0 { - return 0, ErrTableNotFound - } - - sql, args, err := session.statement.GenInsertMultipleMapSQL(columns, argss) - if err != nil { - return 0, err - } - sql = session.engine.dialect.Quoter().Replace(sql) - - if err := session.cacheInsert(tableName); err != nil { - return 0, err - } - - res, err := session.exec(sql, args...) - if err != nil { - return 0, err - } - affected, err := res.RowsAffected() - if err != nil { - return 0, err - } - return affected, nil -} diff --git a/vendor/xorm.io/xorm/session_iterate.go b/vendor/xorm.io/xorm/session_iterate.go deleted file mode 100644 index afb9a7cc..00000000 --- a/vendor/xorm.io/xorm/session_iterate.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2016 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xorm - -import ( - "reflect" - - "xorm.io/xorm/internal/utils" -) - -// IterFunc only use by Iterate -type IterFunc func(idx int, bean interface{}) error - -// Rows return sql.Rows compatible Rows obj, as a forward Iterator object for iterating record by record, bean's non-empty fields -// are conditions. -func (session *Session) Rows(bean interface{}) (*Rows, error) { - return newRows(session, bean) -} - -// Iterate record by record handle records from table, condiBeans's non-empty fields -// are conditions. beans could be []Struct, []*Struct, map[int64]Struct -// map[int64]*Struct -func (session *Session) Iterate(bean interface{}, fun IterFunc) error { - if session.isAutoClose { - defer session.Close() - } - - if session.statement.LastError != nil { - return session.statement.LastError - } - - if session.statement.BufferSize > 0 { - return session.bufferIterate(bean, fun) - } - - rows, err := session.Rows(bean) - if err != nil { - return err - } - defer rows.Close() - - i := 0 - for rows.Next() { - b := reflect.New(rows.beanType).Interface() - err = rows.Scan(b) - if err != nil { - return err - } - err = fun(i, b) - if err != nil { - return err - } - i++ - } - return rows.Err() -} - -// BufferSize sets the buffersize for iterate -func (session *Session) BufferSize(size int) *Session { - session.statement.BufferSize = size - return session -} - -func (session *Session) bufferIterate(bean interface{}, fun IterFunc) error { - var bufferSize = session.statement.BufferSize - var pLimitN = session.statement.LimitN - if pLimitN != nil && bufferSize > *pLimitN { - bufferSize = *pLimitN - } - var start = session.statement.Start - v := utils.ReflectValue(bean) - sliceType := reflect.SliceOf(v.Type()) - var idx = 0 - session.autoResetStatement = false - defer func() { - session.autoResetStatement = true - }() - - for bufferSize > 0 { - slice := reflect.New(sliceType) - if err := session.NoCache().Limit(bufferSize, start).find(slice.Interface(), bean); err != nil { - return err - } - - for i := 0; i < slice.Elem().Len(); i++ { - if err := fun(idx, slice.Elem().Index(i).Addr().Interface()); err != nil { - return err - } - idx++ - } - - if bufferSize > slice.Elem().Len() { - break - } - - start += slice.Elem().Len() - if pLimitN != nil && start+bufferSize > *pLimitN { - bufferSize = *pLimitN - start - } - } - - return nil -} diff --git a/vendor/xorm.io/xorm/session_raw.go b/vendor/xorm.io/xorm/session_raw.go deleted file mode 100644 index add584d0..00000000 --- a/vendor/xorm.io/xorm/session_raw.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright 2016 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xorm - -import ( - "database/sql" - "strings" - - "xorm.io/xorm/core" -) - -func (session *Session) queryPreprocess(sqlStr *string, paramStr ...interface{}) { - for _, filter := range session.engine.dialect.Filters() { - *sqlStr = filter.Do(*sqlStr) - } - - session.lastSQL = *sqlStr - session.lastSQLArgs = paramStr -} - -func (session *Session) queryRows(sqlStr string, args ...interface{}) (*core.Rows, error) { - defer session.resetStatement() - if session.statement.LastError != nil { - return nil, session.statement.LastError - } - - session.queryPreprocess(&sqlStr, args...) - - session.lastSQL = sqlStr - session.lastSQLArgs = args - - if session.isAutoCommit { - var db *core.DB - if session.sessionType == groupSession && strings.EqualFold(strings.TrimSpace(sqlStr)[:6], "select") && !session.statement.IsForUpdate { - db = session.engine.engineGroup.Slave().DB() - } else { - db = session.DB() - } - - if session.prepareStmt { - // don't clear stmt since session will cache them - stmt, err := session.doPrepare(db, sqlStr) - if err != nil { - return nil, err - } - - return stmt.QueryContext(session.ctx, args...) - } - - return db.QueryContext(session.ctx, sqlStr, args...) - } - - if session.prepareStmt { - stmt, err := session.doPrepareTx(sqlStr) - if err != nil { - return nil, err - } - - return stmt.QueryContext(session.ctx, args...) - } - - return session.tx.QueryContext(session.ctx, sqlStr, args...) -} - -func (session *Session) queryRow(sqlStr string, args ...interface{}) *core.Row { - return core.NewRow(session.queryRows(sqlStr, args...)) -} - -// Query runs a raw sql and return records as []map[string][]byte -func (session *Session) Query(sqlOrArgs ...interface{}) ([]map[string][]byte, error) { - if session.isAutoClose { - defer session.Close() - } - - sqlStr, args, err := session.statement.GenQuerySQL(sqlOrArgs...) - if err != nil { - return nil, err - } - - rows, err := session.queryRows(sqlStr, args...) - if err != nil { - return nil, err - } - defer rows.Close() - - return session.engine.scanByteMaps(rows) -} - -// QueryString runs a raw sql and return records as []map[string]string -func (session *Session) QueryString(sqlOrArgs ...interface{}) ([]map[string]string, error) { - if session.isAutoClose { - defer session.Close() - } - - sqlStr, args, err := session.statement.GenQuerySQL(sqlOrArgs...) - if err != nil { - return nil, err - } - - rows, err := session.queryRows(sqlStr, args...) - if err != nil { - return nil, err - } - defer rows.Close() - - return session.engine.ScanStringMaps(rows) -} - -// QuerySliceString runs a raw sql and return records as [][]string -func (session *Session) QuerySliceString(sqlOrArgs ...interface{}) ([][]string, error) { - if session.isAutoClose { - defer session.Close() - } - - sqlStr, args, err := session.statement.GenQuerySQL(sqlOrArgs...) - if err != nil { - return nil, err - } - - rows, err := session.queryRows(sqlStr, args...) - if err != nil { - return nil, err - } - defer rows.Close() - - return session.engine.ScanStringSlices(rows) -} - -// QueryInterface runs a raw sql and return records as []map[string]interface{} -func (session *Session) QueryInterface(sqlOrArgs ...interface{}) ([]map[string]interface{}, error) { - if session.isAutoClose { - defer session.Close() - } - - sqlStr, args, err := session.statement.GenQuerySQL(sqlOrArgs...) - if err != nil { - return nil, err - } - - rows, err := session.queryRows(sqlStr, args...) - if err != nil { - return nil, err - } - defer rows.Close() - - return session.engine.ScanInterfaceMaps(rows) -} - -func (session *Session) exec(sqlStr string, args ...interface{}) (sql.Result, error) { - defer session.resetStatement() - - session.queryPreprocess(&sqlStr, args...) - - session.lastSQL = sqlStr - session.lastSQLArgs = args - - if !session.isAutoCommit { - if session.prepareStmt { - stmt, err := session.doPrepareTx(sqlStr) - if err != nil { - return nil, err - } - return stmt.ExecContext(session.ctx, args...) - } - return session.tx.ExecContext(session.ctx, sqlStr, args...) - } - - if session.prepareStmt { - stmt, err := session.doPrepare(session.DB(), sqlStr) - if err != nil { - return nil, err - } - return stmt.ExecContext(session.ctx, args...) - } - - return session.DB().ExecContext(session.ctx, sqlStr, args...) -} - -// Exec raw sql -func (session *Session) Exec(sqlOrArgs ...interface{}) (sql.Result, error) { - if session.isAutoClose { - defer session.Close() - } - - if len(sqlOrArgs) == 0 { - return nil, ErrUnSupportedType - } - - sqlStr, args, err := session.statement.ConvertSQLOrArgs(sqlOrArgs...) - if err != nil { - return nil, err - } - - return session.exec(sqlStr, args...) -} diff --git a/vendor/xorm.io/xorm/session_schema.go b/vendor/xorm.io/xorm/session_schema.go deleted file mode 100644 index e66c3b42..00000000 --- a/vendor/xorm.io/xorm/session_schema.go +++ /dev/null @@ -1,536 +0,0 @@ -// Copyright 2016 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xorm - -import ( - "bufio" - "context" - "database/sql" - "fmt" - "io" - "os" - "strings" - - "xorm.io/xorm/dialects" - "xorm.io/xorm/internal/utils" - "xorm.io/xorm/schemas" -) - -// Ping test if database is ok -func (session *Session) Ping() error { - if session.isAutoClose { - defer session.Close() - } - - session.engine.logger.Infof("PING DATABASE %v", session.engine.DriverName()) - return session.DB().PingContext(session.ctx) -} - -// CreateTable create a table according a bean -func (session *Session) CreateTable(bean interface{}) error { - if session.isAutoClose { - defer session.Close() - } - - return session.createTable(bean) -} - -func (session *Session) createTable(bean interface{}) error { - if err := session.statement.SetRefBean(bean); err != nil { - return err - } - - session.statement.RefTable.StoreEngine = session.statement.StoreEngine - session.statement.RefTable.Charset = session.statement.Charset - tableName := session.statement.TableName() - refTable := session.statement.RefTable - if refTable.AutoIncrement != "" && session.engine.dialect.Features().AutoincrMode == dialects.SequenceAutoincrMode { - sqlStr, err := session.engine.dialect.CreateSequenceSQL(context.Background(), session.engine.db, utils.SeqName(tableName)) - if err != nil { - return err - } - if _, err := session.exec(sqlStr); err != nil { - return err - } - } - - sqlStr, _, err := session.engine.dialect.CreateTableSQL(context.Background(), session.engine.db, refTable, tableName) - if err != nil { - return err - } - if _, err := session.exec(sqlStr); err != nil { - return err - } - - return nil -} - -// CreateIndexes create indexes -func (session *Session) CreateIndexes(bean interface{}) error { - if session.isAutoClose { - defer session.Close() - } - - return session.createIndexes(bean) -} - -func (session *Session) createIndexes(bean interface{}) error { - if err := session.statement.SetRefBean(bean); err != nil { - return err - } - - sqls := session.statement.GenIndexSQL() - for _, sqlStr := range sqls { - _, err := session.exec(sqlStr) - if err != nil { - return err - } - } - return nil -} - -// CreateUniques create uniques -func (session *Session) CreateUniques(bean interface{}) error { - if session.isAutoClose { - defer session.Close() - } - return session.createUniques(bean) -} - -func (session *Session) createUniques(bean interface{}) error { - if err := session.statement.SetRefBean(bean); err != nil { - return err - } - - sqls := session.statement.GenUniqueSQL() - for _, sqlStr := range sqls { - _, err := session.exec(sqlStr) - if err != nil { - return err - } - } - return nil -} - -// DropIndexes drop indexes -func (session *Session) DropIndexes(bean interface{}) error { - if session.isAutoClose { - defer session.Close() - } - - return session.dropIndexes(bean) -} - -func (session *Session) dropIndexes(bean interface{}) error { - if err := session.statement.SetRefBean(bean); err != nil { - return err - } - - sqls := session.statement.GenDelIndexSQL() - for _, sqlStr := range sqls { - _, err := session.exec(sqlStr) - if err != nil { - return err - } - } - return nil -} - -// DropTable drop table will drop table if exist, if drop failed, it will return error -func (session *Session) DropTable(beanOrTableName interface{}) error { - if session.isAutoClose { - defer session.Close() - } - - return session.dropTable(beanOrTableName) -} - -func (session *Session) dropTable(beanOrTableName interface{}) error { - tableName := session.engine.TableName(beanOrTableName) - sqlStr, checkIfExist := session.engine.dialect.DropTableSQL(session.engine.TableName(tableName, true)) - if !checkIfExist { - exist, err := session.engine.dialect.IsTableExist(session.getQueryer(), session.ctx, tableName) - if err != nil { - return err - } - checkIfExist = exist - } - - if !checkIfExist { - return nil - } - if _, err := session.exec(sqlStr); err != nil { - return err - } - - if session.engine.dialect.Features().AutoincrMode == dialects.IncrAutoincrMode { - return nil - } - - var seqName = utils.SeqName(tableName) - exist, err := session.engine.dialect.IsSequenceExist(session.ctx, session.getQueryer(), seqName) - if err != nil { - return err - } - if !exist { - return nil - } - - sqlStr, err = session.engine.dialect.DropSequenceSQL(seqName) - if err != nil { - return err - } - _, err = session.exec(sqlStr) - return err -} - -// IsTableExist if a table is exist -func (session *Session) IsTableExist(beanOrTableName interface{}) (bool, error) { - if session.isAutoClose { - defer session.Close() - } - - tableName := session.engine.TableName(beanOrTableName) - - return session.isTableExist(tableName) -} - -func (session *Session) isTableExist(tableName string) (bool, error) { - return session.engine.dialect.IsTableExist(session.getQueryer(), session.ctx, tableName) -} - -// IsTableEmpty if table have any records -func (session *Session) IsTableEmpty(bean interface{}) (bool, error) { - if session.isAutoClose { - defer session.Close() - } - return session.isTableEmpty(session.engine.TableName(bean)) -} - -func (session *Session) isTableEmpty(tableName string) (bool, error) { - var total int64 - sqlStr := fmt.Sprintf("select count(*) from %s", session.engine.Quote(session.engine.TableName(tableName, true))) - err := session.queryRow(sqlStr).Scan(&total) - if err != nil { - if err == sql.ErrNoRows { - err = nil - } - return true, err - } - - return total == 0, nil -} - -func (session *Session) addColumn(colName string) error { - col := session.statement.RefTable.GetColumn(colName) - sql := session.engine.dialect.AddColumnSQL(session.statement.TableName(), col) - _, err := session.exec(sql) - return err -} - -func (session *Session) addIndex(tableName, idxName string) error { - index := session.statement.RefTable.Indexes[idxName] - sqlStr := session.engine.dialect.CreateIndexSQL(tableName, index) - _, err := session.exec(sqlStr) - return err -} - -func (session *Session) addUnique(tableName, uqeName string) error { - index := session.statement.RefTable.Indexes[uqeName] - sqlStr := session.engine.dialect.CreateIndexSQL(tableName, index) - _, err := session.exec(sqlStr) - return err -} - -// Sync2 synchronize structs to database tables -// Depricated -func (session *Session) Sync2(beans ...interface{}) error { - return session.Sync(beans...) -} - -// Sync synchronize structs to database tables -func (session *Session) Sync(beans ...interface{}) error { - engine := session.engine - - if session.isAutoClose { - session.isAutoClose = false - defer session.Close() - } - - tables, err := engine.dialect.GetTables(session.getQueryer(), session.ctx) - if err != nil { - return err - } - - session.autoResetStatement = false - defer func() { - session.autoResetStatement = true - session.resetStatement() - }() - - for _, bean := range beans { - v := utils.ReflectValue(bean) - table, err := engine.tagParser.ParseWithCache(v) - if err != nil { - return err - } - var tbName string - if len(session.statement.AltTableName) > 0 { - tbName = session.statement.AltTableName - } else { - tbName = engine.TableName(bean) - } - tbNameWithSchema := engine.tbNameWithSchema(tbName) - - var oriTable *schemas.Table - for _, tb := range tables { - if strings.EqualFold(engine.tbNameWithSchema(tb.Name), engine.tbNameWithSchema(tbName)) { - oriTable = tb - break - } - } - - // this is a new table - if oriTable == nil { - err = session.StoreEngine(session.statement.StoreEngine).createTable(bean) - if err != nil { - return err - } - - err = session.createUniques(bean) - if err != nil { - return err - } - - err = session.createIndexes(bean) - if err != nil { - return err - } - continue - } - - // this will modify an old table - if err = engine.loadTableInfo(oriTable); err != nil { - return err - } - - // check columns - for _, col := range table.Columns() { - var oriCol *schemas.Column - for _, col2 := range oriTable.Columns() { - if strings.EqualFold(col.Name, col2.Name) { - oriCol = col2 - break - } - } - - // column is not exist on table - if oriCol == nil { - session.statement.RefTable = table - session.statement.SetTableName(tbNameWithSchema) - if err = session.addColumn(col.Name); err != nil { - return err - } - continue - } - - err = nil - expectedType := engine.dialect.SQLType(col) - curType := engine.dialect.SQLType(oriCol) - if expectedType != curType { - if expectedType == schemas.Text && - strings.HasPrefix(curType, schemas.Varchar) { - // currently only support mysql & postgres - if engine.dialect.URI().DBType == schemas.MYSQL || - engine.dialect.URI().DBType == schemas.POSTGRES { - engine.logger.Infof("Table %s column %s change type from %s to %s\n", - tbNameWithSchema, col.Name, curType, expectedType) - _, err = session.exec(engine.dialect.ModifyColumnSQL(tbNameWithSchema, col)) - } else { - engine.logger.Warnf("Table %s column %s db type is %s, struct type is %s\n", - tbNameWithSchema, col.Name, curType, expectedType) - } - } else if strings.HasPrefix(curType, schemas.Varchar) && strings.HasPrefix(expectedType, schemas.Varchar) { - if engine.dialect.URI().DBType == schemas.MYSQL { - if oriCol.Length < col.Length { - engine.logger.Infof("Table %s column %s change type from varchar(%d) to varchar(%d)\n", - tbNameWithSchema, col.Name, oriCol.Length, col.Length) - _, err = session.exec(engine.dialect.ModifyColumnSQL(tbNameWithSchema, col)) - } - } - } else { - if !(strings.HasPrefix(curType, expectedType) && curType[len(expectedType)] == '(') { - if !strings.EqualFold(schemas.SQLTypeName(curType), engine.dialect.Alias(schemas.SQLTypeName(expectedType))) { - engine.logger.Warnf("Table %s column %s db type is %s, struct type is %s", - tbNameWithSchema, col.Name, curType, expectedType) - } - } - } - } else if expectedType == schemas.Varchar { - if engine.dialect.URI().DBType == schemas.MYSQL { - if oriCol.Length < col.Length { - engine.logger.Infof("Table %s column %s change type from varchar(%d) to varchar(%d)\n", - tbNameWithSchema, col.Name, oriCol.Length, col.Length) - _, err = session.exec(engine.dialect.ModifyColumnSQL(tbNameWithSchema, col)) - } - } - } else if col.Comment != oriCol.Comment { - _, err = session.exec(engine.dialect.ModifyColumnSQL(tbNameWithSchema, col)) - } - - if col.Default != oriCol.Default { - switch { - case col.IsAutoIncrement: // For autoincrement column, don't check default - case (col.SQLType.Name == schemas.Bool || col.SQLType.Name == schemas.Boolean) && - ((strings.EqualFold(col.Default, "true") && oriCol.Default == "1") || - (strings.EqualFold(col.Default, "false") && oriCol.Default == "0")): - default: - engine.logger.Warnf("Table %s Column %s db default is %s, struct default is %s", - tbName, col.Name, oriCol.Default, col.Default) - } - } - if col.Nullable != oriCol.Nullable { - engine.logger.Warnf("Table %s Column %s db nullable is %v, struct nullable is %v", - tbName, col.Name, oriCol.Nullable, col.Nullable) - } - - if err != nil { - return err - } - } - - var foundIndexNames = make(map[string]bool) - var addedNames = make(map[string]*schemas.Index) - - for name, index := range table.Indexes { - var oriIndex *schemas.Index - for name2, index2 := range oriTable.Indexes { - if index.Equal(index2) { - oriIndex = index2 - foundIndexNames[name2] = true - break - } - } - - if oriIndex != nil { - if oriIndex.Type != index.Type { - sql := engine.dialect.DropIndexSQL(tbNameWithSchema, oriIndex) - _, err = session.exec(sql) - if err != nil { - return err - } - oriIndex = nil - } - } - - if oriIndex == nil { - addedNames[name] = index - } - } - - for name2, index2 := range oriTable.Indexes { - if _, ok := foundIndexNames[name2]; !ok { - sql := engine.dialect.DropIndexSQL(tbNameWithSchema, index2) - _, err = session.exec(sql) - if err != nil { - return err - } - } - } - - for name, index := range addedNames { - if index.Type == schemas.UniqueType { - session.statement.RefTable = table - session.statement.SetTableName(tbNameWithSchema) - err = session.addUnique(tbNameWithSchema, name) - } else if index.Type == schemas.IndexType { - session.statement.RefTable = table - session.statement.SetTableName(tbNameWithSchema) - err = session.addIndex(tbNameWithSchema, name) - } - if err != nil { - return err - } - } - - // check all the columns which removed from struct fields but left on database tables. - for _, colName := range oriTable.ColumnsSeq() { - if table.GetColumn(colName) == nil { - engine.logger.Warnf("Table %s has column %s but struct has not related field", engine.TableName(oriTable.Name, true), colName) - } - } - } - - return nil -} - -// ImportFile SQL DDL file -func (session *Session) ImportFile(ddlPath string) ([]sql.Result, error) { - file, err := os.Open(ddlPath) - if err != nil { - return nil, err - } - defer file.Close() - return session.Import(file) -} - -// Import SQL DDL from io.Reader -func (session *Session) Import(r io.Reader) ([]sql.Result, error) { - var ( - results []sql.Result - lastError error - inSingleQuote bool - startComment bool - ) - - scanner := bufio.NewScanner(r) - semiColSpliter := func(data []byte, atEOF bool) (advance int, token []byte, err error) { - if atEOF && len(data) == 0 { - return 0, nil, nil - } - var oriInSingleQuote = inSingleQuote - for i, b := range data { - if startComment { - if b == '\n' { - startComment = false - } - } else { - if !inSingleQuote && i > 0 && data[i-1] == '-' && data[i] == '-' { - startComment = true - continue - } - - if b == '\'' { - inSingleQuote = !inSingleQuote - } - if !inSingleQuote && b == ';' { - return i + 1, data[0:i], nil - } - } - } - // If we're at EOF, we have a final, non-terminated line. Return it. - if atEOF { - return len(data), data, nil - } - inSingleQuote = oriInSingleQuote - // Request more data. - return 0, nil, nil - } - - scanner.Split(semiColSpliter) - - for scanner.Scan() { - query := strings.Trim(scanner.Text(), " \t\n\r") - if len(query) > 0 { - result, err := session.Exec(query) - if err != nil { - return nil, err - } - results = append(results, result) - } - } - - return results, lastError -} diff --git a/vendor/xorm.io/xorm/session_stats.go b/vendor/xorm.io/xorm/session_stats.go deleted file mode 100644 index 5d0da5e9..00000000 --- a/vendor/xorm.io/xorm/session_stats.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2016 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xorm - -import ( - "database/sql" - "errors" - "reflect" -) - -// Count counts the records. bean's non-empty fields -// are conditions. -func (session *Session) Count(bean ...interface{}) (int64, error) { - if session.isAutoClose { - defer session.Close() - } - - sqlStr, args, err := session.statement.GenCountSQL(bean...) - if err != nil { - return 0, err - } - - var total int64 - err = session.queryRow(sqlStr, args...).Scan(&total) - if err == sql.ErrNoRows || err == nil { - return total, nil - } - - return 0, err -} - -// sum call sum some column. bean's non-empty fields are conditions. -func (session *Session) sum(res interface{}, bean interface{}, columnNames ...string) error { - if session.isAutoClose { - defer session.Close() - } - - v := reflect.ValueOf(res) - if v.Kind() != reflect.Ptr { - return errors.New("need a pointer to a variable") - } - - sqlStr, args, err := session.statement.GenSumSQL(bean, columnNames...) - if err != nil { - return err - } - - if v.Elem().Kind() == reflect.Slice { - err = session.queryRow(sqlStr, args...).ScanSlice(res) - } else { - err = session.queryRow(sqlStr, args...).Scan(res) - } - if err == sql.ErrNoRows || err == nil { - return nil - } - return err -} - -// Sum call sum some column. bean's non-empty fields are conditions. -func (session *Session) Sum(bean interface{}, columnName string) (res float64, err error) { - return res, session.sum(&res, bean, columnName) -} - -// SumInt call sum some column. bean's non-empty fields are conditions. -func (session *Session) SumInt(bean interface{}, columnName string) (res int64, err error) { - return res, session.sum(&res, bean, columnName) -} - -// Sums call sum some columns. bean's non-empty fields are conditions. -func (session *Session) Sums(bean interface{}, columnNames ...string) ([]float64, error) { - var res = make([]float64, len(columnNames)) - return res, session.sum(&res, bean, columnNames...) -} - -// SumsInt sum specify columns and return as []int64 instead of []float64 -func (session *Session) SumsInt(bean interface{}, columnNames ...string) ([]int64, error) { - var res = make([]int64, len(columnNames)) - return res, session.sum(&res, bean, columnNames...) -} diff --git a/vendor/xorm.io/xorm/session_tx.go b/vendor/xorm.io/xorm/session_tx.go deleted file mode 100644 index 4fa56891..00000000 --- a/vendor/xorm.io/xorm/session_tx.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2016 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xorm - -// Begin a transaction -func (session *Session) Begin() error { - if session.isAutoCommit { - tx, err := session.DB().BeginTx(session.ctx, nil) - if err != nil { - return err - } - session.isAutoCommit = false - session.isCommitedOrRollbacked = false - session.tx = tx - - session.saveLastSQL("BEGIN TRANSACTION") - } - return nil -} - -// Rollback When using transaction, you can rollback if any error -func (session *Session) Rollback() error { - if !session.isAutoCommit && !session.isCommitedOrRollbacked { - session.saveLastSQL("ROLL BACK") - session.isCommitedOrRollbacked = true - session.isAutoCommit = true - - return session.tx.Rollback() - } - return nil -} - -// Commit When using transaction, Commit will commit all operations. -func (session *Session) Commit() error { - if !session.isAutoCommit && !session.isCommitedOrRollbacked { - session.saveLastSQL("COMMIT") - session.isCommitedOrRollbacked = true - session.isAutoCommit = true - - if err := session.tx.Commit(); err != nil { - return err - } - - // handle processors after tx committed - closureCallFunc := func(closuresPtr *[]func(interface{}), bean interface{}) { - if closuresPtr != nil { - for _, closure := range *closuresPtr { - closure(bean) - } - } - } - - for bean, closuresPtr := range session.afterInsertBeans { - closureCallFunc(closuresPtr, bean) - - if processor, ok := interface{}(bean).(AfterInsertProcessor); ok { - processor.AfterInsert() - } - } - for bean, closuresPtr := range session.afterUpdateBeans { - closureCallFunc(closuresPtr, bean) - - if processor, ok := interface{}(bean).(AfterUpdateProcessor); ok { - processor.AfterUpdate() - } - } - for bean, closuresPtr := range session.afterDeleteBeans { - closureCallFunc(closuresPtr, bean) - - if processor, ok := interface{}(bean).(AfterDeleteProcessor); ok { - processor.AfterDelete() - } - } - cleanUpFunc := func(slices *map[interface{}]*[]func(interface{})) { - if len(*slices) > 0 { - *slices = make(map[interface{}]*[]func(interface{})) - } - } - cleanUpFunc(&session.afterInsertBeans) - cleanUpFunc(&session.afterUpdateBeans) - cleanUpFunc(&session.afterDeleteBeans) - } - return nil -} - -// IsInTx if current session is in a transaction -func (session *Session) IsInTx() bool { - return !session.isAutoCommit -} diff --git a/vendor/xorm.io/xorm/session_update.go b/vendor/xorm.io/xorm/session_update.go deleted file mode 100644 index 76f311d6..00000000 --- a/vendor/xorm.io/xorm/session_update.go +++ /dev/null @@ -1,553 +0,0 @@ -// Copyright 2016 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xorm - -import ( - "errors" - "fmt" - "reflect" - "strconv" - "strings" - - "xorm.io/builder" - "xorm.io/xorm/caches" - "xorm.io/xorm/internal/utils" - "xorm.io/xorm/schemas" -) - -// enumerated all errors -var ( - ErrNoColumnsTobeUpdated = errors.New("no columns found to be updated") -) - -//revive:disable -func (session *Session) cacheUpdate(table *schemas.Table, tableName, sqlStr string, args ...interface{}) error { - if table == nil || - session.tx != nil { - return ErrCacheFailed - } - - oldhead, newsql := session.statement.ConvertUpdateSQL(sqlStr) - if newsql == "" { - return ErrCacheFailed - } - for _, filter := range session.engine.dialect.Filters() { - newsql = filter.Do(newsql) - } - session.engine.logger.Debugf("[cache] new sql: %v, %v", oldhead, newsql) - - var nStart int - if len(args) > 0 { - if strings.Contains(sqlStr, "?") { - nStart = strings.Count(oldhead, "?") - } else { - // only for pq, TODO: if any other databse? - nStart = strings.Count(oldhead, "$") - } - } - - cacher := session.engine.GetCacher(tableName) - session.engine.logger.Debugf("[cache] get cache sql: %v, %v", newsql, args[nStart:]) - ids, err := caches.GetCacheSql(cacher, tableName, newsql, args[nStart:]) - if err != nil { - rows, err := session.NoCache().queryRows(newsql, args[nStart:]...) - if err != nil { - return err - } - defer rows.Close() - - ids = make([]schemas.PK, 0) - for rows.Next() { - res := make([]string, len(table.PrimaryKeys)) - err = rows.ScanSlice(&res) - if err != nil { - return err - } - var pk schemas.PK = make([]interface{}, len(table.PrimaryKeys)) - for i, col := range table.PKColumns() { - if col.SQLType.IsNumeric() { - n, err := strconv.ParseInt(res[i], 10, 64) - if err != nil { - return err - } - pk[i] = n - } else if col.SQLType.IsText() { - pk[i] = res[i] - } else { - return errors.New("not supported") - } - } - - ids = append(ids, pk) - } - if rows.Err() != nil { - return rows.Err() - } - session.engine.logger.Debugf("[cache] find updated id: %v", ids) - } /*else { - session.engine.LogDebug("[xorm:cacheUpdate] del cached sql:", tableName, newsql, args) - cacher.DelIds(tableName, genSqlKey(newsql, args)) - }*/ - - for _, id := range ids { - sid, err := id.ToString() - if err != nil { - return err - } - if bean := cacher.GetBean(tableName, sid); bean != nil { - sqls := utils.SplitNNoCase(sqlStr, "where", 2) - if len(sqls) == 0 || len(sqls) > 2 { - return ErrCacheFailed - } - - sqls = utils.SplitNNoCase(sqls[0], "set", 2) - if len(sqls) != 2 { - return ErrCacheFailed - } - kvs := strings.Split(strings.TrimSpace(sqls[1]), ",") - - for idx, kv := range kvs { - sps := strings.SplitN(kv, "=", 2) - sps2 := strings.Split(sps[0], ".") - colName := sps2[len(sps2)-1] - colName = session.engine.dialect.Quoter().Trim(colName) - colName = schemas.CommonQuoter.Trim(colName) - - if col := table.GetColumn(colName); col != nil { - fieldValue, err := col.ValueOf(bean) - if err != nil { - session.engine.logger.Errorf("%v", err) - } else { - session.engine.logger.Debugf("[cache] set bean field: %v, %v, %v", bean, colName, fieldValue.Interface()) - if col.IsVersion && session.statement.CheckVersion { - session.incrVersionFieldValue(fieldValue) - } else { - fieldValue.Set(reflect.ValueOf(args[idx])) - } - } - } else { - session.engine.logger.Errorf("[cache] ERROR: column %v is not table %v's", - colName, table.Name) - } - } - - session.engine.logger.Debugf("[cache] update cache: %v, %v, %v", tableName, id, bean) - cacher.PutBean(tableName, sid, bean) - } - } - session.engine.logger.Debugf("[cache] clear cached table sql: %v", tableName) - cacher.ClearIds(tableName) - return nil -} - -// Update records, bean's non-empty fields are updated contents, -// condiBean' non-empty filds are conditions -// CAUTION: -// 1.bool will defaultly be updated content nor conditions -// You should call UseBool if you have bool to use. -// 2.float32 & float64 may be not inexact as conditions -func (session *Session) Update(bean interface{}, condiBean ...interface{}) (int64, error) { - if session.isAutoClose { - defer session.Close() - } - - defer session.resetStatement() - - if session.statement.LastError != nil { - return 0, session.statement.LastError - } - - v := utils.ReflectValue(bean) - t := v.Type() - - var colNames []string - var args []interface{} - - // handle before update processors - for _, closure := range session.beforeClosures { - closure(bean) - } - cleanupProcessorsClosures(&session.beforeClosures) // cleanup after used - if processor, ok := interface{}(bean).(BeforeUpdateProcessor); ok { - processor.BeforeUpdate() - } - // -- - - var err error - isMap := t.Kind() == reflect.Map - isStruct := t.Kind() == reflect.Struct - if isStruct { - if err := session.statement.SetRefBean(bean); err != nil { - return 0, err - } - - if len(session.statement.TableName()) == 0 { - return 0, ErrTableNotFound - } - - if session.statement.ColumnStr() == "" { - colNames, args, err = session.statement.BuildUpdates(v, false, false, - false, false, true) - } else { - colNames, args, err = session.genUpdateColumns(bean) - } - if err != nil { - return 0, err - } - } else if isMap { - colNames = make([]string, 0) - args = make([]interface{}, 0) - bValue := reflect.Indirect(reflect.ValueOf(bean)) - - for _, v := range bValue.MapKeys() { - colNames = append(colNames, session.engine.Quote(v.String())+" = ?") - args = append(args, bValue.MapIndex(v).Interface()) - } - } else { - return 0, ErrParamsType - } - - table := session.statement.RefTable - - if session.statement.UseAutoTime && table != nil && table.Updated != "" { - if !session.statement.ColumnMap.Contain(table.Updated) && - !session.statement.OmitColumnMap.Contain(table.Updated) { - colNames = append(colNames, session.engine.Quote(table.Updated)+" = ?") - col := table.UpdatedColumn() - val, t, err := session.engine.nowTime(col) - if err != nil { - return 0, err - } - if session.engine.dialect.URI().DBType == schemas.ORACLE { - args = append(args, t) - } else { - args = append(args, val) - } - - colName := col.Name - if isStruct { - session.afterClosures = append(session.afterClosures, func(bean interface{}) { - col := table.GetColumn(colName) - setColumnTime(bean, col, t) - }) - } - } - } - - // for update action to like "column = column + ?" - incColumns := session.statement.IncrColumns - for _, expr := range incColumns { - colNames = append(colNames, session.engine.Quote(expr.ColName)+" = "+session.engine.Quote(expr.ColName)+" + ?") - args = append(args, expr.Arg) - } - // for update action to like "column = column - ?" - decColumns := session.statement.DecrColumns - for _, expr := range decColumns { - colNames = append(colNames, session.engine.Quote(expr.ColName)+" = "+session.engine.Quote(expr.ColName)+" - ?") - args = append(args, expr.Arg) - } - // for update action to like "column = expression" - exprColumns := session.statement.ExprColumns - for _, expr := range exprColumns { - switch tp := expr.Arg.(type) { - case string: - if len(tp) == 0 { - tp = "''" - } - colNames = append(colNames, session.engine.Quote(expr.ColName)+"="+tp) - case *builder.Builder: - subQuery, subArgs, err := builder.ToSQL(tp) - if err != nil { - return 0, err - } - subQuery = session.statement.ReplaceQuote(subQuery) - colNames = append(colNames, session.engine.Quote(expr.ColName)+"=("+subQuery+")") - args = append(args, subArgs...) - default: - colNames = append(colNames, session.engine.Quote(expr.ColName)+"=?") - args = append(args, expr.Arg) - } - } - - if err = session.statement.ProcessIDParam(); err != nil { - return 0, err - } - - var autoCond builder.Cond - if !session.statement.NoAutoCondition { - condBeanIsStruct := false - if len(condiBean) > 0 { - if c, ok := condiBean[0].(map[string]interface{}); ok { - eq := make(builder.Eq) - for k, v := range c { - eq[session.engine.Quote(k)] = v - } - autoCond = builder.Eq(eq) - } else { - ct := reflect.TypeOf(condiBean[0]) - k := ct.Kind() - if k == reflect.Ptr { - k = ct.Elem().Kind() - } - if k == reflect.Struct { - condTable, err := session.engine.TableInfo(condiBean[0]) - if err != nil { - return 0, err - } - - autoCond, err = session.statement.BuildConds(condTable, condiBean[0], true, true, false, true, false) - if err != nil { - return 0, err - } - condBeanIsStruct = true - } else { - return 0, ErrConditionType - } - } - } - - if !condBeanIsStruct && table != nil { - if col := table.DeletedColumn(); col != nil && !session.statement.GetUnscoped() { // tag "deleted" is enabled - autoCond1 := session.statement.CondDeleted(col) - - if autoCond == nil { - autoCond = autoCond1 - } else { - autoCond = autoCond.And(autoCond1) - } - } - } - } - - st := session.statement - - var ( - cond = session.statement.Conds().And(autoCond) - doIncVer = isStruct && (table != nil && table.Version != "" && session.statement.CheckVersion) - verValue *reflect.Value - ) - if doIncVer { - verValue, err = table.VersionColumn().ValueOf(bean) - if err != nil { - return 0, err - } - - if verValue != nil { - cond = cond.And(builder.Eq{session.engine.Quote(table.Version): verValue.Interface()}) - colNames = append(colNames, session.engine.Quote(table.Version)+" = "+session.engine.Quote(table.Version)+" + 1") - } - } - - if len(colNames) == 0 { - return 0, ErrNoColumnsTobeUpdated - } - - whereWriter := builder.NewWriter() - if cond.IsValid() { - fmt.Fprint(whereWriter, "WHERE ") - } - if err := cond.WriteTo(st.QuoteReplacer(whereWriter)); err != nil { - return 0, err - } - if err := st.WriteOrderBy(whereWriter); err != nil { - return 0, err - } - - tableName := session.statement.TableName() - // TODO: Oracle support needed - var top string - if st.LimitN != nil { - limitValue := *st.LimitN - switch session.engine.dialect.URI().DBType { - case schemas.MYSQL: - fmt.Fprintf(whereWriter, " LIMIT %d", limitValue) - case schemas.SQLITE: - fmt.Fprintf(whereWriter, " LIMIT %d", limitValue) - - cond = cond.And(builder.Expr(fmt.Sprintf("rowid IN (SELECT rowid FROM %v %v)", - session.engine.Quote(tableName), whereWriter.String()), whereWriter.Args()...)) - - whereWriter = builder.NewWriter() - fmt.Fprint(whereWriter, "WHERE ") - if err := cond.WriteTo(st.QuoteReplacer(whereWriter)); err != nil { - return 0, err - } - case schemas.POSTGRES: - fmt.Fprintf(whereWriter, " LIMIT %d", limitValue) - - cond = cond.And(builder.Expr(fmt.Sprintf("CTID IN (SELECT CTID FROM %v %v)", - session.engine.Quote(tableName), whereWriter.String()), whereWriter.Args()...)) - - whereWriter = builder.NewWriter() - fmt.Fprint(whereWriter, "WHERE ") - if err := cond.WriteTo(st.QuoteReplacer(whereWriter)); err != nil { - return 0, err - } - case schemas.MSSQL: - if st.HasOrderBy() && table != nil && len(table.PrimaryKeys) == 1 { - cond = builder.Expr(fmt.Sprintf("%s IN (SELECT TOP (%d) %s FROM %v%v)", - table.PrimaryKeys[0], limitValue, table.PrimaryKeys[0], - session.engine.Quote(tableName), whereWriter.String()), whereWriter.Args()...) - - whereWriter = builder.NewWriter() - fmt.Fprint(whereWriter, "WHERE ") - if err := cond.WriteTo(whereWriter); err != nil { - return 0, err - } - } else { - top = fmt.Sprintf("TOP (%d) ", limitValue) - } - } - } - - tableAlias := session.engine.Quote(tableName) - var fromSQL string - if session.statement.TableAlias != "" { - switch session.engine.dialect.URI().DBType { - case schemas.MSSQL: - fromSQL = fmt.Sprintf("FROM %s %s ", tableAlias, session.statement.TableAlias) - tableAlias = session.statement.TableAlias - default: - tableAlias = fmt.Sprintf("%s AS %s", tableAlias, session.statement.TableAlias) - } - } - - updateWriter := builder.NewWriter() - if _, err := fmt.Fprintf(updateWriter, "UPDATE %v%v SET %v %v", - top, - tableAlias, - strings.Join(colNames, ", "), - fromSQL); err != nil { - return 0, err - } - if err := utils.WriteBuilder(updateWriter, whereWriter); err != nil { - return 0, err - } - - res, err := session.exec(updateWriter.String(), append(args, updateWriter.Args()...)...) - if err != nil { - return 0, err - } else if doIncVer { - if verValue != nil && verValue.IsValid() && verValue.CanSet() { - session.incrVersionFieldValue(verValue) - } - } - - if cacher := session.engine.GetCacher(tableName); cacher != nil && session.statement.UseCache { - // session.cacheUpdate(table, tableName, sqlStr, args...) - session.engine.logger.Debugf("[cache] clear table: %v", tableName) - cacher.ClearIds(tableName) - cacher.ClearBeans(tableName) - } - - // handle after update processors - if session.isAutoCommit { - for _, closure := range session.afterClosures { - closure(bean) - } - if processor, ok := interface{}(bean).(AfterUpdateProcessor); ok { - session.engine.logger.Debugf("[event] %v has after update processor", tableName) - processor.AfterUpdate() - } - } else { - lenAfterClosures := len(session.afterClosures) - if lenAfterClosures > 0 { - if value, has := session.afterUpdateBeans[bean]; has && value != nil { - *value = append(*value, session.afterClosures...) - } else { - afterClosures := make([]func(interface{}), lenAfterClosures) - copy(afterClosures, session.afterClosures) - // FIXME: if bean is a map type, it will panic because map cannot be as map key - session.afterUpdateBeans[bean] = &afterClosures - } - } else { - if _, ok := interface{}(bean).(AfterUpdateProcessor); ok { - session.afterUpdateBeans[bean] = nil - } - } - } - cleanupProcessorsClosures(&session.afterClosures) // cleanup after used - // -- - - return res.RowsAffected() -} - -func (session *Session) genUpdateColumns(bean interface{}) ([]string, []interface{}, error) { - table := session.statement.RefTable - colNames := make([]string, 0, len(table.ColumnsSeq())) - args := make([]interface{}, 0, len(table.ColumnsSeq())) - - for _, col := range table.Columns() { - if !col.IsVersion && !col.IsCreated && !col.IsUpdated { - if session.statement.OmitColumnMap.Contain(col.Name) { - continue - } - } - if col.MapType == schemas.ONLYFROMDB { - continue - } - - fieldValuePtr, err := col.ValueOf(bean) - if err != nil { - return nil, nil, err - } - fieldValue := *fieldValuePtr - - if col.IsAutoIncrement && utils.IsValueZero(fieldValue) { - continue - } - - if (col.IsDeleted && !session.statement.GetUnscoped()) || col.IsCreated { - continue - } - - // if only update specify columns - if len(session.statement.ColumnMap) > 0 && !session.statement.ColumnMap.Contain(col.Name) { - continue - } - - if session.statement.IncrColumns.IsColExist(col.Name) { - continue - } else if session.statement.DecrColumns.IsColExist(col.Name) { - continue - } else if session.statement.ExprColumns.IsColExist(col.Name) { - continue - } - - // !evalphobia! set fieldValue as nil when column is nullable and zero-value - if _, ok := getFlagForColumn(session.statement.NullableMap, col); ok { - if col.Nullable && utils.IsValueZero(fieldValue) { - var nilValue *int - fieldValue = reflect.ValueOf(nilValue) - } - } - - if col.IsUpdated && session.statement.UseAutoTime /*&& isZero(fieldValue.Interface())*/ { - // if time is non-empty, then set to auto time - val, t, err := session.engine.nowTime(col) - if err != nil { - return nil, nil, err - } - args = append(args, val) - - colName := col.Name - session.afterClosures = append(session.afterClosures, func(bean interface{}) { - col := table.GetColumn(colName) - setColumnTime(bean, col, t) - }) - } else if col.IsVersion && session.statement.CheckVersion { - args = append(args, 1) - } else { - arg, err := session.statement.Value2Interface(col, fieldValue) - if err != nil { - return colNames, args, err - } - args = append(args, arg) - } - - colNames = append(colNames, session.engine.Quote(col.Name)+" = ?") - } - return colNames, args, nil -} diff --git a/vendor/xorm.io/xorm/tags/parser.go b/vendor/xorm.io/xorm/tags/parser.go deleted file mode 100644 index 028f8d0b..00000000 --- a/vendor/xorm.io/xorm/tags/parser.go +++ /dev/null @@ -1,379 +0,0 @@ -// Copyright 2020 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tags - -import ( - "encoding/gob" - "errors" - "fmt" - "reflect" - "strings" - "sync" - "time" - "unicode" - - "xorm.io/xorm/caches" - "xorm.io/xorm/convert" - "xorm.io/xorm/dialects" - "xorm.io/xorm/names" - "xorm.io/xorm/schemas" -) - -// ErrUnsupportedType represents an unsupported type error -var ErrUnsupportedType = errors.New("unsupported type") - -// TableIndices is an interface that describes structs that provide additional index information above that which is automatically parsed -type TableIndices interface { - TableIndices() []*schemas.Index -} - -var tpTableIndices = reflect.TypeOf((*TableIndices)(nil)).Elem() - -// Parser represents a parser for xorm tag -type Parser struct { - identifier string - dialect dialects.Dialect - columnMapper names.Mapper - tableMapper names.Mapper - handlers map[string]Handler - cacherMgr *caches.Manager - tableCache sync.Map // map[reflect.Type]*schemas.Table -} - -// NewParser creates a tag parser -func NewParser(identifier string, dialect dialects.Dialect, tableMapper, columnMapper names.Mapper, cacherMgr *caches.Manager) *Parser { - return &Parser{ - identifier: identifier, - dialect: dialect, - tableMapper: tableMapper, - columnMapper: columnMapper, - handlers: defaultTagHandlers, - cacherMgr: cacherMgr, - } -} - -// GetTableMapper returns table mapper -func (parser *Parser) GetTableMapper() names.Mapper { - return parser.tableMapper -} - -// SetTableMapper sets table mapper -func (parser *Parser) SetTableMapper(mapper names.Mapper) { - parser.ClearCaches() - parser.tableMapper = mapper -} - -// GetColumnMapper returns column mapper -func (parser *Parser) GetColumnMapper() names.Mapper { - return parser.columnMapper -} - -// SetColumnMapper sets column mapper -func (parser *Parser) SetColumnMapper(mapper names.Mapper) { - parser.ClearCaches() - parser.columnMapper = mapper -} - -// SetIdentifier sets tag identifier -func (parser *Parser) SetIdentifier(identifier string) { - parser.ClearCaches() - parser.identifier = identifier -} - -// ParseWithCache parse a struct with cache -func (parser *Parser) ParseWithCache(v reflect.Value) (*schemas.Table, error) { - t := v.Type() - tableI, ok := parser.tableCache.Load(t) - if ok { - return tableI.(*schemas.Table), nil - } - - table, err := parser.Parse(v) - if err != nil { - return nil, err - } - - parser.tableCache.Store(t, table) - - if parser.cacherMgr.GetDefaultCacher() != nil { - if v.CanAddr() { - gob.Register(v.Addr().Interface()) - } else { - gob.Register(v.Interface()) - } - } - - return table, nil -} - -// ClearCacheTable removes the database mapper of a type from the cache -func (parser *Parser) ClearCacheTable(t reflect.Type) { - parser.tableCache.Delete(t) -} - -// ClearCaches removes all the cached table information parsed by structs -func (parser *Parser) ClearCaches() { - parser.tableCache = sync.Map{} -} - -func addIndex(indexName string, table *schemas.Table, col *schemas.Column, indexType int) { - if index, ok := table.Indexes[indexName]; ok { - index.AddColumn(col.Name) - col.Indexes[index.Name] = indexType - } else { - index := schemas.NewIndex(indexName, indexType) - index.AddColumn(col.Name) - table.AddIndex(index) - col.Indexes[index.Name] = indexType - } -} - -// ErrIgnoreField represents an error to ignore field -var ErrIgnoreField = errors.New("field will be ignored") - -func (parser *Parser) getSQLTypeByType(t reflect.Type) (schemas.SQLType, error) { - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - if t.Kind() == reflect.Struct { - v, ok := parser.tableCache.Load(t) - if ok { - pkCols := v.(*schemas.Table).PKColumns() - if len(pkCols) == 1 { - return pkCols[0].SQLType, nil - } - if len(pkCols) > 1 { - return schemas.SQLType{}, fmt.Errorf("unsupported mulitiple primary key on cascade") - } - } - } - return schemas.Type2SQLType(t), nil -} - -func (parser *Parser) parseFieldWithNoTag(fieldIndex int, field reflect.StructField, fieldValue reflect.Value) (*schemas.Column, error) { - var sqlType schemas.SQLType - if fieldValue.CanAddr() { - if _, ok := fieldValue.Addr().Interface().(convert.Conversion); ok { - sqlType = schemas.SQLType{Name: schemas.Text} - } - } - if _, ok := fieldValue.Interface().(convert.Conversion); ok { - sqlType = schemas.SQLType{Name: schemas.Text} - } else { - var err error - sqlType, err = parser.getSQLTypeByType(field.Type) - if err != nil { - return nil, err - } - } - col := schemas.NewColumn(parser.columnMapper.Obj2Table(field.Name), - field.Name, sqlType, sqlType.DefaultLength, - sqlType.DefaultLength2, true) - col.FieldIndex = []int{fieldIndex} - - if field.Type.Kind() == reflect.Int64 && (strings.ToUpper(col.FieldName) == "ID" || strings.HasSuffix(strings.ToUpper(col.FieldName), ".ID")) { - col.IsAutoIncrement = true - col.IsPrimaryKey = true - col.Nullable = false - } - return col, nil -} - -func (parser *Parser) parseFieldWithTags(table *schemas.Table, fieldIndex int, field reflect.StructField, fieldValue reflect.Value, tags []tag) (*schemas.Column, error) { - col := &schemas.Column{ - FieldName: field.Name, - FieldIndex: []int{fieldIndex}, - Nullable: true, - IsPrimaryKey: false, - IsAutoIncrement: false, - MapType: schemas.TWOSIDES, - Indexes: make(map[string]int), - DefaultIsEmpty: true, - } - - ctx := Context{ - table: table, - col: col, - fieldValue: fieldValue, - indexNames: make(map[string]int), - parser: parser, - } - - for j, tag := range tags { - if ctx.ignoreNext { - ctx.ignoreNext = false - continue - } - - ctx.tag = tag - ctx.tagUname = strings.ToUpper(tag.name) - - if j > 0 { - ctx.preTag = strings.ToUpper(tags[j-1].name) - } - if j < len(tags)-1 { - ctx.nextTag = tags[j+1].name - } else { - ctx.nextTag = "" - } - - if h, ok := parser.handlers[ctx.tagUname]; ok { - if err := h(&ctx); err != nil { - return nil, err - } - } else { - if strings.HasPrefix(ctx.tag.name, "'") && strings.HasSuffix(ctx.tag.name, "'") { - col.Name = ctx.tag.name[1 : len(ctx.tag.name)-1] - } else { - col.Name = ctx.tag.name - } - } - - if ctx.hasCacheTag { - if parser.cacherMgr.GetDefaultCacher() != nil { - parser.cacherMgr.SetCacher(table.Name, parser.cacherMgr.GetDefaultCacher()) - } else { - parser.cacherMgr.SetCacher(table.Name, caches.NewLRUCacher2(caches.NewMemoryStore(), time.Hour, 10000)) - } - } - if ctx.hasNoCacheTag { - parser.cacherMgr.SetCacher(table.Name, nil) - } - } - - if col.SQLType.Name == "" { - var err error - col.SQLType, err = parser.getSQLTypeByType(field.Type) - if err != nil { - return nil, err - } - } - if ctx.isUnsigned && col.SQLType.IsNumeric() && !strings.HasPrefix(col.SQLType.Name, "UNSIGNED") { - col.SQLType.Name = "UNSIGNED " + col.SQLType.Name - } - - parser.dialect.SQLType(col) - if col.Length == 0 { - col.Length = col.SQLType.DefaultLength - } - if col.Length2 == 0 { - col.Length2 = col.SQLType.DefaultLength2 - } - if col.Name == "" { - col.Name = parser.columnMapper.Obj2Table(field.Name) - } - - if ctx.isUnique { - ctx.indexNames[col.Name] = schemas.UniqueType - } else if ctx.isIndex { - ctx.indexNames[col.Name] = schemas.IndexType - } - - for indexName, indexType := range ctx.indexNames { - addIndex(indexName, table, col, indexType) - } - - return col, nil -} - -func (parser *Parser) parseField(table *schemas.Table, fieldIndex int, field reflect.StructField, fieldValue reflect.Value) (*schemas.Column, error) { - if isNotTitle(field.Name) { - return nil, ErrIgnoreField - } - - var ( - tag = field.Tag - ormTagStr = strings.TrimSpace(tag.Get(parser.identifier)) - ) - if ormTagStr == "-" { - return nil, ErrIgnoreField - } - if ormTagStr == "" { - return parser.parseFieldWithNoTag(fieldIndex, field, fieldValue) - } - tags, err := splitTag(ormTagStr) - if err != nil { - return nil, err - } - return parser.parseFieldWithTags(table, fieldIndex, field, fieldValue, tags) -} - -func isNotTitle(n string) bool { - for _, c := range n { - return unicode.IsLower(c) - } - return true -} - -// Parse parses a struct as a table information -func (parser *Parser) Parse(v reflect.Value) (*schemas.Table, error) { - t := v.Type() - if t.Kind() == reflect.Ptr { - t = t.Elem() - v = v.Elem() - } - if t.Kind() != reflect.Struct { - return nil, ErrUnsupportedType - } - - table := schemas.NewEmptyTable() - table.Type = t - table.Name = names.GetTableName(parser.tableMapper, v) - table.Comment = names.GetTableComment(v) - - for i := 0; i < t.NumField(); i++ { - col, err := parser.parseField(table, i, t.Field(i), v.Field(i)) - if err == ErrIgnoreField { - continue - } else if err != nil { - return nil, err - } - - table.AddColumn(col) - } // end for - - indices := tableIndices(v) - for _, index := range indices { - // Override old information - if oldIndex, ok := table.Indexes[index.Name]; ok { - for _, colName := range oldIndex.Cols { - col := table.GetColumn(colName) - if col == nil { - return nil, ErrUnsupportedType - } - delete(col.Indexes, index.Name) - } - } - table.AddIndex(index) - for _, colName := range index.Cols { - col := table.GetColumn(colName) - if col == nil { - return nil, ErrUnsupportedType - } - col.Indexes[index.Name] = index.Type - } - } - - return table, nil -} - -func tableIndices(v reflect.Value) []*schemas.Index { - if v.Type().Implements(tpTableIndices) { - return v.Interface().(TableIndices).TableIndices() - } - - if v.Kind() == reflect.Ptr { - v = v.Elem() - if v.Type().Implements(tpTableIndices) { - return v.Interface().(TableIndices).TableIndices() - } - } else if v.CanAddr() { - v1 := v.Addr() - if v1.Type().Implements(tpTableIndices) { - return v1.Interface().(TableIndices).TableIndices() - } - } - return nil -} diff --git a/vendor/xorm.io/xorm/tags/tag.go b/vendor/xorm.io/xorm/tags/tag.go deleted file mode 100644 index 55f5f4cf..00000000 --- a/vendor/xorm.io/xorm/tags/tag.go +++ /dev/null @@ -1,398 +0,0 @@ -// Copyright 2017 The Xorm Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tags - -import ( - "fmt" - "reflect" - "strconv" - "strings" - "time" - - "xorm.io/xorm/schemas" -) - -type tag struct { - name string - params []string -} - -func splitTag(tagStr string) ([]tag, error) { - tagStr = strings.TrimSpace(tagStr) - var ( - inQuote bool - inBigQuote bool - lastIdx int - curTag tag - paramStart int - tags []tag - ) - for i, t := range tagStr { - switch t { - case '\'': - inQuote = !inQuote - case ' ': - if !inQuote && !inBigQuote { - if lastIdx < i { - if curTag.name == "" { - curTag.name = tagStr[lastIdx:i] - } - tags = append(tags, curTag) - lastIdx = i + 1 - curTag = tag{} - } else if lastIdx == i { - lastIdx = i + 1 - } - } else if inBigQuote && !inQuote { - paramStart = i + 1 - } - case ',': - if !inQuote && !inBigQuote { - return nil, fmt.Errorf("comma[%d] of %s should be in quote or big quote", i, tagStr) - } - if !inQuote && inBigQuote { - curTag.params = append(curTag.params, strings.TrimSpace(tagStr[paramStart:i])) - paramStart = i + 1 - } - case '(': - inBigQuote = true - if !inQuote { - curTag.name = tagStr[lastIdx:i] - paramStart = i + 1 - } - case ')': - inBigQuote = false - if !inQuote { - curTag.params = append(curTag.params, tagStr[paramStart:i]) - } - } - } - if lastIdx < len(tagStr) { - if curTag.name == "" { - curTag.name = tagStr[lastIdx:] - } - tags = append(tags, curTag) - } - return tags, nil -} - -// Context represents a context for xorm tag parse. -type Context struct { - tag - tagUname string - preTag, nextTag string - table *schemas.Table - col *schemas.Column - fieldValue reflect.Value - isIndex bool - isUnique bool - indexNames map[string]int - parser *Parser - hasCacheTag bool - hasNoCacheTag bool - ignoreNext bool - isUnsigned bool -} - -// Handler describes tag handler for XORM -type Handler func(ctx *Context) error - -// defaultTagHandlers enumerates all the default tag handler -var defaultTagHandlers = map[string]Handler{ - "-": IgnoreHandler, - "<-": OnlyFromDBTagHandler, - "->": OnlyToDBTagHandler, - "PK": PKTagHandler, - "NULL": NULLTagHandler, - "NOT": NotTagHandler, - "AUTOINCR": AutoIncrTagHandler, - "DEFAULT": DefaultTagHandler, - "CREATED": CreatedTagHandler, - "UPDATED": UpdatedTagHandler, - "DELETED": DeletedTagHandler, - "VERSION": VersionTagHandler, - "UTC": UTCTagHandler, - "LOCAL": LocalTagHandler, - "NOTNULL": NotNullTagHandler, - "INDEX": IndexTagHandler, - "UNIQUE": UniqueTagHandler, - "CACHE": CacheTagHandler, - "NOCACHE": NoCacheTagHandler, - "COMMENT": CommentTagHandler, - "EXTENDS": ExtendsTagHandler, - "UNSIGNED": UnsignedTagHandler, -} - -func init() { - for k := range schemas.SqlTypes { - defaultTagHandlers[k] = SQLTypeTagHandler - } -} - -// NotTagHandler describes ignored tag handler -func NotTagHandler(ctx *Context) error { - return nil -} - -// IgnoreHandler represetns the field should be ignored -func IgnoreHandler(ctx *Context) error { - return ErrIgnoreField -} - -// OnlyFromDBTagHandler describes mapping direction tag handler -func OnlyFromDBTagHandler(ctx *Context) error { - ctx.col.MapType = schemas.ONLYFROMDB - return nil -} - -// OnlyToDBTagHandler describes mapping direction tag handler -func OnlyToDBTagHandler(ctx *Context) error { - ctx.col.MapType = schemas.ONLYTODB - return nil -} - -// PKTagHandler describes primary key tag handler -func PKTagHandler(ctx *Context) error { - ctx.col.IsPrimaryKey = true - ctx.col.Nullable = false - return nil -} - -// NULLTagHandler describes null tag handler -func NULLTagHandler(ctx *Context) error { - ctx.col.Nullable = (strings.ToUpper(ctx.preTag) != "NOT") - return nil -} - -// NotNullTagHandler describes notnull tag handler -func NotNullTagHandler(ctx *Context) error { - ctx.col.Nullable = false - return nil -} - -// AutoIncrTagHandler describes autoincr tag handler -func AutoIncrTagHandler(ctx *Context) error { - ctx.col.IsAutoIncrement = true - ctx.col.Nullable = false - /* - if len(ctx.params) > 0 { - autoStartInt, err := strconv.Atoi(ctx.params[0]) - if err != nil { - return err - } - ctx.col.AutoIncrStart = autoStartInt - } else { - ctx.col.AutoIncrStart = 1 - } - */ - return nil -} - -// DefaultTagHandler describes default tag handler -func DefaultTagHandler(ctx *Context) error { - if len(ctx.params) > 0 { - ctx.col.Default = ctx.params[0] - } else { - ctx.col.Default = ctx.nextTag - ctx.ignoreNext = true - } - ctx.col.DefaultIsEmpty = false - return nil -} - -// CreatedTagHandler describes created tag handler -func CreatedTagHandler(ctx *Context) error { - ctx.col.IsCreated = true - return nil -} - -// VersionTagHandler describes version tag handler -func VersionTagHandler(ctx *Context) error { - ctx.col.IsVersion = true - ctx.col.Default = "1" - return nil -} - -// UTCTagHandler describes utc tag handler -func UTCTagHandler(ctx *Context) error { - ctx.col.TimeZone = time.UTC - return nil -} - -// LocalTagHandler describes local tag handler -func LocalTagHandler(ctx *Context) error { - if len(ctx.params) == 0 { - ctx.col.TimeZone = time.Local - } else { - var err error - ctx.col.TimeZone, err = time.LoadLocation(ctx.params[0]) - if err != nil { - return err - } - } - return nil -} - -// UpdatedTagHandler describes updated tag handler -func UpdatedTagHandler(ctx *Context) error { - ctx.col.IsUpdated = true - return nil -} - -// DeletedTagHandler describes deleted tag handler -func DeletedTagHandler(ctx *Context) error { - ctx.col.IsDeleted = true - ctx.col.Nullable = true - return nil -} - -// IndexTagHandler describes index tag handler -func IndexTagHandler(ctx *Context) error { - if len(ctx.params) > 0 { - ctx.indexNames[ctx.params[0]] = schemas.IndexType - } else { - ctx.isIndex = true - } - return nil -} - -// UniqueTagHandler describes unique tag handler -func UniqueTagHandler(ctx *Context) error { - if len(ctx.params) > 0 { - ctx.indexNames[ctx.params[0]] = schemas.UniqueType - } else { - ctx.isUnique = true - } - return nil -} - -// UnsignedTagHandler represents the column is unsigned -func UnsignedTagHandler(ctx *Context) error { - ctx.isUnsigned = true - return nil -} - -// CommentTagHandler add comment to column -func CommentTagHandler(ctx *Context) error { - if len(ctx.params) > 0 { - ctx.col.Comment = strings.Trim(ctx.params[0], "' ") - } - return nil -} - -// SQLTypeTagHandler describes SQL Type tag handler -func SQLTypeTagHandler(ctx *Context) error { - ctx.col.SQLType = schemas.SQLType{Name: ctx.tagUname} - if ctx.tagUname == "JSON" { - ctx.col.IsJSON = true - } - if len(ctx.params) == 0 { - return nil - } - - switch ctx.tagUname { - case schemas.Enum: - ctx.col.EnumOptions = make(map[string]int) - for k, v := range ctx.params { - v = strings.TrimSpace(v) - v = strings.Trim(v, "'") - ctx.col.EnumOptions[v] = k - } - case schemas.Set: - ctx.col.SetOptions = make(map[string]int) - for k, v := range ctx.params { - v = strings.TrimSpace(v) - v = strings.Trim(v, "'") - ctx.col.SetOptions[v] = k - } - default: - var err error - if len(ctx.params) == 2 { - ctx.col.Length, err = strconv.ParseInt(ctx.params[0], 10, 64) - if err != nil { - return err - } - ctx.col.Length2, err = strconv.ParseInt(ctx.params[1], 10, 64) - if err != nil { - return err - } - } else if len(ctx.params) == 1 { - ctx.col.Length, err = strconv.ParseInt(ctx.params[0], 10, 64) - if err != nil { - return err - } - } - } - return nil -} - -// ExtendsTagHandler describes extends tag handler -func ExtendsTagHandler(ctx *Context) error { - fieldValue := ctx.fieldValue - isPtr := false - switch fieldValue.Kind() { - case reflect.Ptr: - f := fieldValue.Type().Elem() - if f.Kind() == reflect.Struct { - fieldPtr := fieldValue - fieldValue = fieldValue.Elem() - if !fieldValue.IsValid() || fieldPtr.IsNil() { - fieldValue = reflect.New(f).Elem() - } - } - isPtr = true - fallthrough - case reflect.Struct: - parentTable, err := ctx.parser.Parse(fieldValue) - if err != nil { - return err - } - for _, col := range parentTable.Columns() { - col.FieldName = fmt.Sprintf("%v.%v", ctx.col.FieldName, col.FieldName) - col.FieldIndex = append(ctx.col.FieldIndex, col.FieldIndex...) - - tagPrefix := ctx.col.FieldName - if len(ctx.params) > 0 { - col.Nullable = isPtr - tagPrefix = strings.Trim(ctx.params[0], "'") - if col.IsPrimaryKey { - col.Name = ctx.col.FieldName - col.IsPrimaryKey = false - } else { - col.Name = fmt.Sprintf("%v%v", tagPrefix, col.Name) - } - } - - if col.Nullable { - col.IsAutoIncrement = false - col.IsPrimaryKey = false - } - - ctx.table.AddColumn(col) - for indexName, indexType := range col.Indexes { - addIndex(indexName, ctx.table, col, indexType) - } - } - default: - // TODO: warning - } - return ErrIgnoreField -} - -// CacheTagHandler describes cache tag handler -func CacheTagHandler(ctx *Context) error { - if !ctx.hasCacheTag { - ctx.hasCacheTag = true - } - return nil -} - -// NoCacheTagHandler describes nocache tag handler -func NoCacheTagHandler(ctx *Context) error { - if !ctx.hasNoCacheTag { - ctx.hasNoCacheTag = true - } - return nil -}