From 75a1a8508385e07e692e8af875ef814818fb486b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E5=85=89=E6=98=A5?= Date: Sun, 26 Mar 2023 11:24:43 +0800 Subject: [PATCH] - update vendor --- go.mod | 10 + go.sum | 60 +- vendor/github.com/fatih/color/LICENSE.md | 20 + vendor/github.com/fatih/color/README.md | 176 + vendor/github.com/fatih/color/color.go | 616 +++ .../github.com/fatih/color/color_windows.go | 19 + vendor/github.com/fatih/color/doc.go | 134 + .../fsnotify/fsnotify/.editorconfig | 12 + .../fsnotify/fsnotify/.gitattributes | 1 + .../github.com/fsnotify/fsnotify/.gitignore | 6 + vendor/github.com/fsnotify/fsnotify/.mailmap | 2 + .../github.com/fsnotify/fsnotify/CHANGELOG.md | 470 +++ .../fsnotify/fsnotify/CONTRIBUTING.md | 26 + vendor/github.com/fsnotify/fsnotify/LICENSE | 25 + vendor/github.com/fsnotify/fsnotify/README.md | 161 + .../fsnotify/fsnotify/backend_fen.go | 162 + .../fsnotify/fsnotify/backend_inotify.go | 459 +++ .../fsnotify/fsnotify/backend_kqueue.go | 707 ++++ .../fsnotify/fsnotify/backend_other.go | 66 + .../fsnotify/fsnotify/backend_windows.go | 746 ++++ .../github.com/fsnotify/fsnotify/fsnotify.go | 81 + vendor/github.com/fsnotify/fsnotify/mkdoc.zsh | 208 + .../fsnotify/fsnotify/system_bsd.go | 8 + .../fsnotify/fsnotify/system_darwin.go | 9 + vendor/github.com/go-co-op/gocron/.gitignore | 19 + .../github.com/go-co-op/gocron/.golangci.yaml | 49 + .../go-co-op/gocron/CODE_OF_CONDUCT.md | 73 + .../go-co-op/gocron/CONTRIBUTING.md | 40 + vendor/github.com/go-co-op/gocron/LICENSE | 21 + vendor/github.com/go-co-op/gocron/Makefile | 12 + vendor/github.com/go-co-op/gocron/README.md | 132 + vendor/github.com/go-co-op/gocron/SECURITY.md | 15 + vendor/github.com/go-co-op/gocron/executor.go | 127 + vendor/github.com/go-co-op/gocron/gocron.go | 129 + vendor/github.com/go-co-op/gocron/job.go | 480 +++ .../github.com/go-co-op/gocron/scheduler.go | 1339 +++++++ .../github.com/go-co-op/gocron/timeHelper.go | 33 + vendor/github.com/go-logr/logr/.golangci.yaml | 29 + vendor/github.com/go-logr/logr/CHANGELOG.md | 6 + .../github.com/go-logr/logr/CONTRIBUTING.md | 17 + vendor/github.com/go-logr/logr/LICENSE | 201 + vendor/github.com/go-logr/logr/README.md | 282 ++ vendor/github.com/go-logr/logr/discard.go | 54 + vendor/github.com/go-logr/logr/funcr/funcr.go | 787 ++++ vendor/github.com/go-logr/logr/logr.go | 510 +++ vendor/github.com/go-logr/stdr/LICENSE | 201 + vendor/github.com/go-logr/stdr/README.md | 6 + vendor/github.com/go-logr/stdr/stdr.go | 170 + vendor/github.com/gogf/gf/v2/LICENSE | 21 + .../gogf/gf/v2/container/garray/garray.go | 8 + .../gf/v2/container/garray/garray_func.go | 69 + .../v2/container/garray/garray_normal_any.go | 837 ++++ .../v2/container/garray/garray_normal_int.go | 813 ++++ .../v2/container/garray/garray_normal_str.go | 826 ++++ .../v2/container/garray/garray_sorted_any.go | 815 ++++ .../v2/container/garray/garray_sorted_int.go | 760 ++++ .../v2/container/garray/garray_sorted_str.go | 773 ++++ .../gogf/gf/v2/container/glist/glist.go | 572 +++ .../gogf/gf/v2/container/gmap/gmap.go | 45 + .../container/gmap/gmap_hash_any_any_map.go | 537 +++ .../container/gmap/gmap_hash_int_any_map.go | 538 +++ .../container/gmap/gmap_hash_int_int_map.go | 508 +++ .../container/gmap/gmap_hash_int_str_map.go | 508 +++ .../container/gmap/gmap_hash_str_any_map.go | 524 +++ .../container/gmap/gmap_hash_str_int_map.go | 512 +++ .../container/gmap/gmap_hash_str_str_map.go | 501 +++ .../gf/v2/container/gmap/gmap_list_map.go | 612 +++ .../gf/v2/container/gmap/gmap_tree_map.go | 30 + .../gogf/gf/v2/container/gpool/gpool.go | 188 + .../gogf/gf/v2/container/gqueue/gqueue.go | 147 + .../gogf/gf/v2/container/gset/gset_any_set.go | 526 +++ .../gogf/gf/v2/container/gset/gset_int_set.go | 489 +++ .../gogf/gf/v2/container/gset/gset_str_set.go | 519 +++ .../gogf/gf/v2/container/gtree/gtree.go | 10 + .../gf/v2/container/gtree/gtree_avltree.go | 816 ++++ .../gogf/gf/v2/container/gtree/gtree_btree.go | 979 +++++ .../v2/container/gtree/gtree_redblacktree.go | 991 +++++ .../gogf/gf/v2/container/gtype/gtype.go | 14 + .../gogf/gf/v2/container/gtype/gtype_bool.go | 106 + .../gogf/gf/v2/container/gtype/gtype_byte.go | 85 + .../gogf/gf/v2/container/gtype/gtype_bytes.go | 96 + .../gf/v2/container/gtype/gtype_float32.go | 97 + .../gf/v2/container/gtype/gtype_float64.go | 97 + .../gogf/gf/v2/container/gtype/gtype_int.go | 85 + .../gogf/gf/v2/container/gtype/gtype_int32.go | 85 + .../gogf/gf/v2/container/gtype/gtype_int64.go | 85 + .../gf/v2/container/gtype/gtype_interface.go | 82 + .../gf/v2/container/gtype/gtype_string.go | 80 + .../gogf/gf/v2/container/gtype/gtype_uint.go | 85 + .../gf/v2/container/gtype/gtype_uint32.go | 85 + .../gf/v2/container/gtype/gtype_uint64.go | 85 + .../gogf/gf/v2/container/gvar/gvar.go | 205 + .../gogf/gf/v2/container/gvar/gvar_is.go | 51 + .../gogf/gf/v2/container/gvar/gvar_list.go | 25 + .../gogf/gf/v2/container/gvar/gvar_map.go | 91 + .../gogf/gf/v2/container/gvar/gvar_scan.go | 19 + .../gogf/gf/v2/container/gvar/gvar_slice.go | 77 + .../gogf/gf/v2/container/gvar/gvar_struct.go | 23 + .../gogf/gf/v2/container/gvar/gvar_vars.go | 131 + .../gogf/gf/v2/database/gredis/gredis.go | 78 + .../gf/v2/database/gredis/gredis_adapter.go | 78 + .../gf/v2/database/gredis/gredis_config.go | 134 + .../gf/v2/database/gredis/gredis_instance.go | 44 + .../gf/v2/database/gredis/gredis_redis.go | 137 + .../gredis/gredis_redis_group_generic.go | 62 + .../gredis/gredis_redis_group_hash.go | 32 + .../gredis/gredis_redis_group_list.go | 43 + .../gredis/gredis_redis_group_pubsub.go | 40 + .../gredis/gredis_redis_group_script.go | 30 + .../database/gredis/gredis_redis_group_set.go | 33 + .../gredis/gredis_redis_group_sorted_set.go | 85 + .../gredis/gredis_redis_group_string.go | 63 + .../gogf/gf/v2/debug/gdebug/gdebug.go | 8 + .../gogf/gf/v2/debug/gdebug/gdebug_caller.go | 196 + .../gogf/gf/v2/debug/gdebug/gdebug_grid.go | 29 + .../gogf/gf/v2/debug/gdebug/gdebug_stack.go | 77 + .../gogf/gf/v2/debug/gdebug/gdebug_version.go | 58 + .../gogf/gf/v2/encoding/gbinary/gbinary.go | 134 + .../gogf/gf/v2/encoding/gbinary/gbinary_be.go | 287 ++ .../gf/v2/encoding/gbinary/gbinary_bit.go | 74 + .../gf/v2/encoding/gbinary/gbinary_func.go | 7 + .../gogf/gf/v2/encoding/gbinary/gbinary_le.go | 287 ++ .../gf/v2/encoding/gcompress/gcompress.go | 8 + .../v2/encoding/gcompress/gcompress_gzip.go | 135 + .../gf/v2/encoding/gcompress/gcompress_zip.go | 280 ++ .../v2/encoding/gcompress/gcompress_zlib.go | 59 + .../gogf/gf/v2/encoding/ghash/ghash.go | 8 + .../gogf/gf/v2/encoding/ghash/ghash_ap.go | 33 + .../gogf/gf/v2/encoding/ghash/ghash_bkdr.go | 31 + .../gogf/gf/v2/encoding/ghash/ghash_djb.go | 25 + .../gogf/gf/v2/encoding/ghash/ghash_elf.go | 39 + .../gogf/gf/v2/encoding/ghash/ghash_jshash.go | 25 + .../gogf/gf/v2/encoding/ghash/ghash_pjw.go | 45 + .../gogf/gf/v2/encoding/ghash/ghash_rs.go | 35 + .../gogf/gf/v2/encoding/ghash/ghash_sdbm.go | 27 + .../gogf/gf/v2/errors/gcode/gcode.go | 70 + .../gogf/gf/v2/errors/gcode/gcode_local.go | 43 + .../gogf/gf/v2/errors/gerror/gerror.go | 79 + .../gogf/gf/v2/errors/gerror/gerror_api.go | 110 + .../gf/v2/errors/gerror/gerror_api_code.go | 139 + .../gf/v2/errors/gerror/gerror_api_option.go | 31 + .../gf/v2/errors/gerror/gerror_api_stack.go | 118 + .../gogf/gf/v2/errors/gerror/gerror_error.go | 146 + .../gf/v2/errors/gerror/gerror_error_code.go | 31 + .../v2/errors/gerror/gerror_error_format.go | 40 + .../gf/v2/errors/gerror/gerror_error_json.go | 13 + .../gf/v2/errors/gerror/gerror_error_stack.go | 171 + .../gogf/gf/v2/internal/command/command.go | 135 + .../gogf/gf/v2/internal/consts/consts.go | 21 + .../gogf/gf/v2/internal/deepcopy/deepcopy.go | 136 + .../gogf/gf/v2/internal/empty/empty.go | 224 ++ .../gogf/gf/v2/internal/intlog/intlog.go | 125 + .../gogf/gf/v2/internal/json/json.go | 85 + .../gf/v2/internal/reflection/reflection.go | 94 + .../gogf/gf/v2/internal/rwmutex/rwmutex.go | 77 + .../gogf/gf/v2/internal/tracing/tracing.go | 49 + .../gogf/gf/v2/internal/utils/utils.go | 8 + .../gogf/gf/v2/internal/utils/utils_array.go | 26 + .../gogf/gf/v2/internal/utils/utils_debug.go | 42 + .../gogf/gf/v2/internal/utils/utils_io.go | 66 + .../gogf/gf/v2/internal/utils/utils_is.go | 100 + .../gogf/gf/v2/internal/utils/utils_list.go | 37 + .../gogf/gf/v2/internal/utils/utils_map.go | 37 + .../gogf/gf/v2/internal/utils/utils_str.go | 171 + .../github.com/gogf/gf/v2/net/gipv4/gipv4.go | 60 + .../gogf/gf/v2/net/gipv4/gipv4_ip.go | 145 + .../gogf/gf/v2/net/gipv4/gipv4_lookup.go | 52 + .../gogf/gf/v2/net/gipv4/gipv4_mac.go | 43 + .../gogf/gf/v2/net/gtrace/gtrace.go | 180 + .../gogf/gf/v2/net/gtrace/gtrace_baggage.go | 75 + .../gogf/gf/v2/net/gtrace/gtrace_carrier.go | 62 + .../gogf/gf/v2/net/gtrace/gtrace_span.go | 26 + .../gogf/gf/v2/net/gtrace/gtrace_tracer.go | 28 + .../net/gtrace/internal/provider/provider.go | 33 + .../internal/provider/provider_idgenerator.go | 33 + .../github.com/gogf/gf/v2/os/gcache/gcache.go | 240 ++ .../gogf/gf/v2/os/gcache/gcache_adapter.go | 142 + .../gf/v2/os/gcache/gcache_adapter_memory.go | 476 +++ .../os/gcache/gcache_adapter_memory_data.go | 206 + .../gcache_adapter_memory_expire_sets.go | 52 + .../gcache_adapter_memory_expire_times.go | 41 + .../os/gcache/gcache_adapter_memory_item.go | 19 + .../v2/os/gcache/gcache_adapter_memory_lru.go | 100 + .../gf/v2/os/gcache/gcache_adapter_redis.go | 438 +++ .../gogf/gf/v2/os/gcache/gcache_cache.go | 70 + .../gogf/gf/v2/os/gcache/gcache_cache_must.go | 113 + .../github.com/gogf/gf/v2/os/gcron/gcron.go | 122 + .../gogf/gf/v2/os/gcron/gcron_cron.go | 221 ++ .../gogf/gf/v2/os/gcron/gcron_entry.go | 195 + .../gogf/gf/v2/os/gcron/gcron_schedule.go | 412 ++ .../gogf/gf/v2/os/gcron/gcron_schedule_fix.go | 47 + vendor/github.com/gogf/gf/v2/os/gctx/gctx.go | 82 + .../github.com/gogf/gf/v2/os/gfile/gfile.go | 458 +++ .../gogf/gf/v2/os/gfile/gfile_cache.go | 87 + .../gogf/gf/v2/os/gfile/gfile_contents.go | 214 ++ .../gogf/gf/v2/os/gfile/gfile_copy.go | 139 + .../gogf/gf/v2/os/gfile/gfile_home.go | 82 + .../gogf/gf/v2/os/gfile/gfile_replace.go | 58 + .../gogf/gf/v2/os/gfile/gfile_scan.go | 184 + .../gogf/gf/v2/os/gfile/gfile_search.go | 58 + .../gogf/gf/v2/os/gfile/gfile_size.go | 131 + .../gogf/gf/v2/os/gfile/gfile_sort.go | 40 + .../gogf/gf/v2/os/gfile/gfile_source.go | 91 + .../gogf/gf/v2/os/gfile/gfile_time.go | 39 + .../github.com/gogf/gf/v2/os/gfpool/gfpool.go | 41 + .../gogf/gf/v2/os/gfpool/gfpool_file.go | 77 + .../gogf/gf/v2/os/gfpool/gfpool_pool.go | 122 + .../gogf/gf/v2/os/gfsnotify/gfsnotify.go | 170 + .../gf/v2/os/gfsnotify/gfsnotify_event.go | 37 + .../gf/v2/os/gfsnotify/gfsnotify_filefunc.go | 134 + .../gf/v2/os/gfsnotify/gfsnotify_watcher.go | 198 + .../v2/os/gfsnotify/gfsnotify_watcher_loop.go | 181 + vendor/github.com/gogf/gf/v2/os/glog/glog.go | 75 + .../github.com/gogf/gf/v2/os/glog/glog_api.go | 109 + .../gogf/gf/v2/os/glog/glog_chaining.go | 98 + .../gogf/gf/v2/os/glog/glog_config.go | 161 + .../gogf/gf/v2/os/glog/glog_instance.go | 31 + .../gogf/gf/v2/os/glog/glog_logger.go | 411 ++ .../gogf/gf/v2/os/glog/glog_logger_api.go | 146 + .../gf/v2/os/glog/glog_logger_chaining.go | 223 ++ .../gogf/gf/v2/os/glog/glog_logger_color.go | 53 + .../gogf/gf/v2/os/glog/glog_logger_config.go | 286 ++ .../gogf/gf/v2/os/glog/glog_logger_handler.go | 142 + .../gf/v2/os/glog/glog_logger_handler_json.go | 48 + .../gogf/gf/v2/os/glog/glog_logger_level.go | 111 + .../gogf/gf/v2/os/glog/glog_logger_rotate.go | 302 ++ .../gogf/gf/v2/os/glog/glog_logger_writer.go | 19 + .../github.com/gogf/gf/v2/os/gmlock/gmlock.go | 89 + .../gogf/gf/v2/os/gmlock/gmlock_locker.go | 133 + .../github.com/gogf/gf/v2/os/gmutex/gmutex.go | 224 ++ .../github.com/gogf/gf/v2/os/grpool/grpool.go | 193 + .../gogf/gf/v2/os/grpool/grpool_supervisor.go | 30 + .../gogf/gf/v2/os/gstructs/gstructs.go | 62 + .../gogf/gf/v2/os/gstructs/gstructs_field.go | 232 ++ .../gf/v2/os/gstructs/gstructs_field_tag.go | 90 + .../gogf/gf/v2/os/gstructs/gstructs_tag.go | 225 ++ .../gogf/gf/v2/os/gstructs/gstructs_type.go | 75 + .../github.com/gogf/gf/v2/os/gtime/gtime.go | 452 +++ .../gogf/gf/v2/os/gtime/gtime_format.go | 280 ++ .../gogf/gf/v2/os/gtime/gtime_sql.go | 28 + .../gogf/gf/v2/os/gtime/gtime_time.go | 518 +++ .../gogf/gf/v2/os/gtime/gtime_time_wrapper.go | 29 + .../gogf/gf/v2/os/gtime/gtime_time_zone.go | 120 + .../github.com/gogf/gf/v2/os/gtimer/gtimer.go | 160 + .../gogf/gf/v2/os/gtimer/gtimer_entry.go | 146 + .../gogf/gf/v2/os/gtimer/gtimer_exit.go | 15 + .../gogf/gf/v2/os/gtimer/gtimer_queue.go | 84 + .../gogf/gf/v2/os/gtimer/gtimer_queue_heap.go | 42 + .../gogf/gf/v2/os/gtimer/gtimer_timer.go | 198 + .../gogf/gf/v2/os/gtimer/gtimer_timer_loop.go | 67 + .../gogf/gf/v2/text/gregex/gregex.go | 149 + .../gogf/gf/v2/text/gregex/gregex_cache.go | 50 + .../github.com/gogf/gf/v2/text/gstr/gstr.go | 17 + .../gogf/gf/v2/text/gstr/gstr_array.go | 31 + .../gogf/gf/v2/text/gstr/gstr_case.go | 184 + .../gogf/gf/v2/text/gstr/gstr_compare.go | 21 + .../gogf/gf/v2/text/gstr/gstr_contain.go | 24 + .../gogf/gf/v2/text/gstr/gstr_convert.go | 265 ++ .../gogf/gf/v2/text/gstr/gstr_count.go | 63 + .../gogf/gf/v2/text/gstr/gstr_create.go | 14 + .../gogf/gf/v2/text/gstr/gstr_domain.go | 56 + .../gogf/gf/v2/text/gstr/gstr_is.go | 14 + .../gogf/gf/v2/text/gstr/gstr_length.go | 14 + .../gogf/gf/v2/text/gstr/gstr_parse.go | 181 + .../gogf/gf/v2/text/gstr/gstr_pos.go | 140 + .../gogf/gf/v2/text/gstr/gstr_replace.go | 94 + .../gogf/gf/v2/text/gstr/gstr_similar.go | 158 + .../gogf/gf/v2/text/gstr/gstr_slashes.go | 54 + .../gogf/gf/v2/text/gstr/gstr_split_join.go | 83 + .../gogf/gf/v2/text/gstr/gstr_sub.go | 199 + .../gogf/gf/v2/text/gstr/gstr_trim.go | 114 + .../gogf/gf/v2/text/gstr/gstr_upper_lower.go | 54 + .../gogf/gf/v2/text/gstr/gstr_version.go | 189 + .../github.com/gogf/gf/v2/util/gconv/gconv.go | 286 ++ .../gogf/gf/v2/util/gconv/gconv_convert.go | 284 ++ .../gogf/gf/v2/util/gconv/gconv_float.go | 55 + .../gogf/gf/v2/util/gconv/gconv_int.go | 136 + .../gogf/gf/v2/util/gconv/gconv_interface.go | 112 + .../gogf/gf/v2/util/gconv/gconv_map.go | 512 +++ .../gogf/gf/v2/util/gconv/gconv_maps.go | 119 + .../gogf/gf/v2/util/gconv/gconv_maptomap.go | 147 + .../gogf/gf/v2/util/gconv/gconv_maptomaps.go | 141 + .../gogf/gf/v2/util/gconv/gconv_ptr.go | 96 + .../gogf/gf/v2/util/gconv/gconv_scan.go | 525 +++ .../gogf/gf/v2/util/gconv/gconv_slice_any.go | 130 + .../gf/v2/util/gconv/gconv_slice_float.go | 282 ++ .../gogf/gf/v2/util/gconv/gconv_slice_int.go | 416 ++ .../gogf/gf/v2/util/gconv/gconv_slice_str.go | 144 + .../gogf/gf/v2/util/gconv/gconv_slice_uint.go | 436 +++ .../gogf/gf/v2/util/gconv/gconv_struct.go | 620 +++ .../gogf/gf/v2/util/gconv/gconv_structs.go | 172 + .../gogf/gf/v2/util/gconv/gconv_time.go | 84 + .../gogf/gf/v2/util/gconv/gconv_uint.go | 119 + .../gogf/gf/v2/util/gconv/gconv_unsafe.go | 23 + .../github.com/gogf/gf/v2/util/grand/grand.go | 195 + .../gogf/gf/v2/util/grand/grand_buffer.go | 53 + .../github.com/gogf/gf/v2/util/gtag/gtag.go | 48 + .../gogf/gf/v2/util/gtag/gtag_func.go | 65 + .../github.com/gogf/gf/v2/util/gutil/gutil.go | 159 + .../gogf/gf/v2/util/gutil/gutil_comparator.go | 127 + .../gogf/gf/v2/util/gutil/gutil_copy.go | 20 + .../gogf/gf/v2/util/gutil/gutil_default.go | 27 + .../gogf/gf/v2/util/gutil/gutil_dump.go | 471 +++ .../gogf/gf/v2/util/gutil/gutil_list.go | 140 + .../gogf/gf/v2/util/gutil/gutil_map.go | 115 + .../gogf/gf/v2/util/gutil/gutil_reflect.go | 26 + .../gogf/gf/v2/util/gutil/gutil_slice.go | 118 + .../gogf/gf/v2/util/gutil/gutil_struct.go | 38 + vendor/github.com/mattn/go-colorable/LICENSE | 21 + .../github.com/mattn/go-colorable/README.md | 48 + .../mattn/go-colorable/colorable_appengine.go | 38 + .../mattn/go-colorable/colorable_others.go | 38 + .../mattn/go-colorable/colorable_windows.go | 1047 +++++ .../mattn/go-colorable/noncolorable.go | 57 + .../go.opentelemetry.io/otel/.gitattributes | 3 + vendor/go.opentelemetry.io/otel/.gitignore | 21 + vendor/go.opentelemetry.io/otel/.gitmodules | 3 + vendor/go.opentelemetry.io/otel/.golangci.yml | 244 ++ vendor/go.opentelemetry.io/otel/.lycheeignore | 6 + .../otel/.markdownlint.yaml | 29 + vendor/go.opentelemetry.io/otel/CHANGELOG.md | 2369 ++++++++++++ vendor/go.opentelemetry.io/otel/CODEOWNERS | 17 + .../go.opentelemetry.io/otel/CONTRIBUTING.md | 526 +++ vendor/go.opentelemetry.io/otel/LICENSE | 201 + vendor/go.opentelemetry.io/otel/Makefile | 227 ++ vendor/go.opentelemetry.io/otel/README.md | 114 + vendor/go.opentelemetry.io/otel/RELEASING.md | 127 + vendor/go.opentelemetry.io/otel/VERSIONING.md | 224 ++ .../go.opentelemetry.io/otel/attribute/doc.go | 16 + .../otel/attribute/encoder.go | 146 + .../otel/attribute/iterator.go | 161 + .../go.opentelemetry.io/otel/attribute/key.go | 134 + .../go.opentelemetry.io/otel/attribute/kv.go | 86 + .../go.opentelemetry.io/otel/attribute/set.go | 424 +++ .../otel/attribute/type_string.go | 31 + .../otel/attribute/value.go | 270 ++ .../otel/baggage/baggage.go | 570 +++ .../otel/baggage/context.go | 39 + .../go.opentelemetry.io/otel/baggage/doc.go | 20 + .../go.opentelemetry.io/otel/codes/codes.go | 116 + vendor/go.opentelemetry.io/otel/codes/doc.go | 21 + vendor/go.opentelemetry.io/otel/doc.go | 34 + .../go.opentelemetry.io/otel/error_handler.go | 38 + vendor/go.opentelemetry.io/otel/handler.go | 96 + .../otel/internal/attribute/attribute.go | 111 + .../otel/internal/baggage/baggage.go | 43 + .../otel/internal/baggage/context.go | 92 + .../otel/internal/global/internal_logging.go | 63 + .../otel/internal/global/propagator.go | 82 + .../otel/internal/global/state.go | 115 + .../otel/internal/global/trace.go | 192 + .../otel/internal/rawhelpers.go | 55 + .../otel/internal_logging.go | 26 + .../go.opentelemetry.io/otel/propagation.go | 31 + .../otel/propagation/baggage.go | 58 + .../otel/propagation/doc.go | 24 + .../otel/propagation/propagation.go | 153 + .../otel/propagation/trace_context.go | 159 + vendor/go.opentelemetry.io/otel/sdk/LICENSE | 201 + .../otel/sdk/instrumentation/doc.go | 24 + .../otel/sdk/instrumentation/library.go | 19 + .../otel/sdk/instrumentation/scope.go | 26 + .../otel/sdk/internal/env/env.go | 177 + .../otel/sdk/internal/internal.go | 37 + .../otel/sdk/resource/auto.go | 72 + .../otel/sdk/resource/builtin.go | 108 + .../otel/sdk/resource/config.go | 201 + .../otel/sdk/resource/container.go | 100 + .../otel/sdk/resource/doc.go | 28 + .../otel/sdk/resource/env.go | 108 + .../otel/sdk/resource/os.go | 97 + .../otel/sdk/resource/os_release_darwin.go | 102 + .../otel/sdk/resource/os_release_unix.go | 154 + .../otel/sdk/resource/os_unix.go | 90 + .../otel/sdk/resource/os_unsupported.go | 34 + .../otel/sdk/resource/os_windows.go | 101 + .../otel/sdk/resource/process.go | 180 + .../otel/sdk/resource/resource.go | 282 ++ .../otel/sdk/trace/batch_span_processor.go | 432 +++ .../go.opentelemetry.io/otel/sdk/trace/doc.go | 21 + .../otel/sdk/trace/event.go | 37 + .../otel/sdk/trace/evictedqueue.go | 44 + .../otel/sdk/trace/id_generator.go | 77 + .../otel/sdk/trace/link.go | 34 + .../otel/sdk/trace/provider.go | 461 +++ .../otel/sdk/trace/sampler_env.go | 108 + .../otel/sdk/trace/sampling.go | 293 ++ .../otel/sdk/trace/simple_span_processor.go | 128 + .../otel/sdk/trace/snapshot.go | 144 + .../otel/sdk/trace/span.go | 828 ++++ .../otel/sdk/trace/span_exporter.go | 47 + .../otel/sdk/trace/span_limits.go | 125 + .../otel/sdk/trace/span_processor.go | 72 + .../otel/sdk/trace/tracer.go | 161 + .../otel/semconv/internal/http.go | 336 ++ .../otel/semconv/v1.17.0/doc.go | 20 + .../otel/semconv/v1.17.0/event.go | 199 + .../otel/semconv/v1.17.0/exception.go | 20 + .../otel/semconv/v1.17.0/http.go | 21 + .../otel/semconv/v1.17.0/resource.go | 2010 ++++++++++ .../otel/semconv/v1.17.0/schema.go | 20 + .../otel/semconv/v1.17.0/trace.go | 3375 +++++++++++++++++ .../otel/semconv/v1.4.0/doc.go | 20 + .../otel/semconv/v1.4.0/exception.go | 20 + .../otel/semconv/v1.4.0/http.go | 114 + .../otel/semconv/v1.4.0/resource.go | 906 +++++ .../otel/semconv/v1.4.0/schema.go | 20 + .../otel/semconv/v1.4.0/trace.go | 1378 +++++++ vendor/go.opentelemetry.io/otel/trace.go | 47 + vendor/go.opentelemetry.io/otel/trace/LICENSE | 201 + .../go.opentelemetry.io/otel/trace/config.go | 333 ++ .../go.opentelemetry.io/otel/trace/context.go | 61 + vendor/go.opentelemetry.io/otel/trace/doc.go | 66 + .../otel/trace/nonrecording.go | 27 + vendor/go.opentelemetry.io/otel/trace/noop.go | 89 + .../go.opentelemetry.io/otel/trace/trace.go | 551 +++ .../otel/trace/tracestate.go | 212 ++ vendor/go.opentelemetry.io/otel/version.go | 20 + vendor/go.opentelemetry.io/otel/versions.yaml | 57 + .../golang.org/x/sync/semaphore/semaphore.go | 136 + .../golang.org/x/sys/windows/registry/key.go | 206 + .../x/sys/windows/registry/mksyscall.go | 10 + .../x/sys/windows/registry/syscall.go | 33 + .../x/sys/windows/registry/value.go | 387 ++ .../sys/windows/registry/zsyscall_windows.go | 117 + vendor/modules.txt | 95 + 426 files changed, 75961 insertions(+), 4 deletions(-) create mode 100644 vendor/github.com/fatih/color/LICENSE.md create mode 100644 vendor/github.com/fatih/color/README.md create mode 100644 vendor/github.com/fatih/color/color.go create mode 100644 vendor/github.com/fatih/color/color_windows.go create mode 100644 vendor/github.com/fatih/color/doc.go create mode 100644 vendor/github.com/fsnotify/fsnotify/.editorconfig create mode 100644 vendor/github.com/fsnotify/fsnotify/.gitattributes create mode 100644 vendor/github.com/fsnotify/fsnotify/.gitignore create mode 100644 vendor/github.com/fsnotify/fsnotify/.mailmap create mode 100644 vendor/github.com/fsnotify/fsnotify/CHANGELOG.md create mode 100644 vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md create mode 100644 vendor/github.com/fsnotify/fsnotify/LICENSE create mode 100644 vendor/github.com/fsnotify/fsnotify/README.md create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_fen.go create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_inotify.go create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_kqueue.go create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_other.go create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_windows.go create mode 100644 vendor/github.com/fsnotify/fsnotify/fsnotify.go create mode 100644 vendor/github.com/fsnotify/fsnotify/mkdoc.zsh create mode 100644 vendor/github.com/fsnotify/fsnotify/system_bsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/system_darwin.go create mode 100644 vendor/github.com/go-co-op/gocron/.gitignore create mode 100644 vendor/github.com/go-co-op/gocron/.golangci.yaml create mode 100644 vendor/github.com/go-co-op/gocron/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/go-co-op/gocron/CONTRIBUTING.md create mode 100644 vendor/github.com/go-co-op/gocron/LICENSE create mode 100644 vendor/github.com/go-co-op/gocron/Makefile create mode 100644 vendor/github.com/go-co-op/gocron/README.md create mode 100644 vendor/github.com/go-co-op/gocron/SECURITY.md create mode 100644 vendor/github.com/go-co-op/gocron/executor.go create mode 100644 vendor/github.com/go-co-op/gocron/gocron.go create mode 100644 vendor/github.com/go-co-op/gocron/job.go create mode 100644 vendor/github.com/go-co-op/gocron/scheduler.go create mode 100644 vendor/github.com/go-co-op/gocron/timeHelper.go create mode 100644 vendor/github.com/go-logr/logr/.golangci.yaml create mode 100644 vendor/github.com/go-logr/logr/CHANGELOG.md create mode 100644 vendor/github.com/go-logr/logr/CONTRIBUTING.md create mode 100644 vendor/github.com/go-logr/logr/LICENSE create mode 100644 vendor/github.com/go-logr/logr/README.md create mode 100644 vendor/github.com/go-logr/logr/discard.go create mode 100644 vendor/github.com/go-logr/logr/funcr/funcr.go create mode 100644 vendor/github.com/go-logr/logr/logr.go create mode 100644 vendor/github.com/go-logr/stdr/LICENSE create mode 100644 vendor/github.com/go-logr/stdr/README.md create mode 100644 vendor/github.com/go-logr/stdr/stdr.go create mode 100644 vendor/github.com/gogf/gf/v2/LICENSE create mode 100644 vendor/github.com/gogf/gf/v2/container/garray/garray.go create mode 100644 vendor/github.com/gogf/gf/v2/container/garray/garray_func.go create mode 100644 vendor/github.com/gogf/gf/v2/container/garray/garray_normal_any.go create mode 100644 vendor/github.com/gogf/gf/v2/container/garray/garray_normal_int.go create mode 100644 vendor/github.com/gogf/gf/v2/container/garray/garray_normal_str.go create mode 100644 vendor/github.com/gogf/gf/v2/container/garray/garray_sorted_any.go create mode 100644 vendor/github.com/gogf/gf/v2/container/garray/garray_sorted_int.go create mode 100644 vendor/github.com/gogf/gf/v2/container/garray/garray_sorted_str.go create mode 100644 vendor/github.com/gogf/gf/v2/container/glist/glist.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gmap/gmap.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_any_any_map.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_int_any_map.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_int_int_map.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_int_str_map.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_str_any_map.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_str_int_map.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_str_str_map.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gmap/gmap_list_map.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gmap/gmap_tree_map.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gpool/gpool.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gqueue/gqueue.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gset/gset_any_set.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gset/gset_int_set.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gset/gset_str_set.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gtree/gtree.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gtree/gtree_avltree.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gtree/gtree_btree.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gtree/gtree_redblacktree.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gtype/gtype.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gtype/gtype_bool.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gtype/gtype_byte.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gtype/gtype_bytes.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gtype/gtype_float32.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gtype/gtype_float64.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gtype/gtype_int.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gtype/gtype_int32.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gtype/gtype_int64.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gtype/gtype_interface.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gtype/gtype_string.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gtype/gtype_uint.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gtype/gtype_uint32.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gtype/gtype_uint64.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gvar/gvar.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gvar/gvar_is.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gvar/gvar_list.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gvar/gvar_map.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gvar/gvar_scan.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gvar/gvar_slice.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gvar/gvar_struct.go create mode 100644 vendor/github.com/gogf/gf/v2/container/gvar/gvar_vars.go create mode 100644 vendor/github.com/gogf/gf/v2/database/gredis/gredis.go create mode 100644 vendor/github.com/gogf/gf/v2/database/gredis/gredis_adapter.go create mode 100644 vendor/github.com/gogf/gf/v2/database/gredis/gredis_config.go create mode 100644 vendor/github.com/gogf/gf/v2/database/gredis/gredis_instance.go create mode 100644 vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis.go create mode 100644 vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_generic.go create mode 100644 vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_hash.go create mode 100644 vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_list.go create mode 100644 vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_pubsub.go create mode 100644 vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_script.go create mode 100644 vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_set.go create mode 100644 vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_sorted_set.go create mode 100644 vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_string.go create mode 100644 vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug.go create mode 100644 vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug_caller.go create mode 100644 vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug_grid.go create mode 100644 vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug_stack.go create mode 100644 vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug_version.go create mode 100644 vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary.go create mode 100644 vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary_be.go create mode 100644 vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary_bit.go create mode 100644 vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary_func.go create mode 100644 vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary_le.go create mode 100644 vendor/github.com/gogf/gf/v2/encoding/gcompress/gcompress.go create mode 100644 vendor/github.com/gogf/gf/v2/encoding/gcompress/gcompress_gzip.go create mode 100644 vendor/github.com/gogf/gf/v2/encoding/gcompress/gcompress_zip.go create mode 100644 vendor/github.com/gogf/gf/v2/encoding/gcompress/gcompress_zlib.go create mode 100644 vendor/github.com/gogf/gf/v2/encoding/ghash/ghash.go create mode 100644 vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_ap.go create mode 100644 vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_bkdr.go create mode 100644 vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_djb.go create mode 100644 vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_elf.go create mode 100644 vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_jshash.go create mode 100644 vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_pjw.go create mode 100644 vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_rs.go create mode 100644 vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_sdbm.go create mode 100644 vendor/github.com/gogf/gf/v2/errors/gcode/gcode.go create mode 100644 vendor/github.com/gogf/gf/v2/errors/gcode/gcode_local.go create mode 100644 vendor/github.com/gogf/gf/v2/errors/gerror/gerror.go create mode 100644 vendor/github.com/gogf/gf/v2/errors/gerror/gerror_api.go create mode 100644 vendor/github.com/gogf/gf/v2/errors/gerror/gerror_api_code.go create mode 100644 vendor/github.com/gogf/gf/v2/errors/gerror/gerror_api_option.go create mode 100644 vendor/github.com/gogf/gf/v2/errors/gerror/gerror_api_stack.go create mode 100644 vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error.go create mode 100644 vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error_code.go create mode 100644 vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error_format.go create mode 100644 vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error_json.go create mode 100644 vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error_stack.go create mode 100644 vendor/github.com/gogf/gf/v2/internal/command/command.go create mode 100644 vendor/github.com/gogf/gf/v2/internal/consts/consts.go create mode 100644 vendor/github.com/gogf/gf/v2/internal/deepcopy/deepcopy.go create mode 100644 vendor/github.com/gogf/gf/v2/internal/empty/empty.go create mode 100644 vendor/github.com/gogf/gf/v2/internal/intlog/intlog.go create mode 100644 vendor/github.com/gogf/gf/v2/internal/json/json.go create mode 100644 vendor/github.com/gogf/gf/v2/internal/reflection/reflection.go create mode 100644 vendor/github.com/gogf/gf/v2/internal/rwmutex/rwmutex.go create mode 100644 vendor/github.com/gogf/gf/v2/internal/tracing/tracing.go create mode 100644 vendor/github.com/gogf/gf/v2/internal/utils/utils.go create mode 100644 vendor/github.com/gogf/gf/v2/internal/utils/utils_array.go create mode 100644 vendor/github.com/gogf/gf/v2/internal/utils/utils_debug.go create mode 100644 vendor/github.com/gogf/gf/v2/internal/utils/utils_io.go create mode 100644 vendor/github.com/gogf/gf/v2/internal/utils/utils_is.go create mode 100644 vendor/github.com/gogf/gf/v2/internal/utils/utils_list.go create mode 100644 vendor/github.com/gogf/gf/v2/internal/utils/utils_map.go create mode 100644 vendor/github.com/gogf/gf/v2/internal/utils/utils_str.go create mode 100644 vendor/github.com/gogf/gf/v2/net/gipv4/gipv4.go create mode 100644 vendor/github.com/gogf/gf/v2/net/gipv4/gipv4_ip.go create mode 100644 vendor/github.com/gogf/gf/v2/net/gipv4/gipv4_lookup.go create mode 100644 vendor/github.com/gogf/gf/v2/net/gipv4/gipv4_mac.go create mode 100644 vendor/github.com/gogf/gf/v2/net/gtrace/gtrace.go create mode 100644 vendor/github.com/gogf/gf/v2/net/gtrace/gtrace_baggage.go create mode 100644 vendor/github.com/gogf/gf/v2/net/gtrace/gtrace_carrier.go create mode 100644 vendor/github.com/gogf/gf/v2/net/gtrace/gtrace_span.go create mode 100644 vendor/github.com/gogf/gf/v2/net/gtrace/gtrace_tracer.go create mode 100644 vendor/github.com/gogf/gf/v2/net/gtrace/internal/provider/provider.go create mode 100644 vendor/github.com/gogf/gf/v2/net/gtrace/internal/provider/provider_idgenerator.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gcache/gcache.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_data.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_expire_sets.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_expire_times.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_item.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_lru.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_redis.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gcache/gcache_cache.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gcache/gcache_cache_must.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gcron/gcron.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gcron/gcron_cron.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gcron/gcron_entry.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gcron/gcron_schedule.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gcron/gcron_schedule_fix.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gctx/gctx.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gfile/gfile.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gfile/gfile_cache.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gfile/gfile_contents.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gfile/gfile_copy.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gfile/gfile_home.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gfile/gfile_replace.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gfile/gfile_scan.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gfile/gfile_search.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gfile/gfile_size.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gfile/gfile_sort.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gfile/gfile_source.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gfile/gfile_time.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gfpool/gfpool.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gfpool/gfpool_file.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gfpool/gfpool_pool.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify_event.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify_filefunc.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify_watcher.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify_watcher_loop.go create mode 100644 vendor/github.com/gogf/gf/v2/os/glog/glog.go create mode 100644 vendor/github.com/gogf/gf/v2/os/glog/glog_api.go create mode 100644 vendor/github.com/gogf/gf/v2/os/glog/glog_chaining.go create mode 100644 vendor/github.com/gogf/gf/v2/os/glog/glog_config.go create mode 100644 vendor/github.com/gogf/gf/v2/os/glog/glog_instance.go create mode 100644 vendor/github.com/gogf/gf/v2/os/glog/glog_logger.go create mode 100644 vendor/github.com/gogf/gf/v2/os/glog/glog_logger_api.go create mode 100644 vendor/github.com/gogf/gf/v2/os/glog/glog_logger_chaining.go create mode 100644 vendor/github.com/gogf/gf/v2/os/glog/glog_logger_color.go create mode 100644 vendor/github.com/gogf/gf/v2/os/glog/glog_logger_config.go create mode 100644 vendor/github.com/gogf/gf/v2/os/glog/glog_logger_handler.go create mode 100644 vendor/github.com/gogf/gf/v2/os/glog/glog_logger_handler_json.go create mode 100644 vendor/github.com/gogf/gf/v2/os/glog/glog_logger_level.go create mode 100644 vendor/github.com/gogf/gf/v2/os/glog/glog_logger_rotate.go create mode 100644 vendor/github.com/gogf/gf/v2/os/glog/glog_logger_writer.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gmlock/gmlock.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gmlock/gmlock_locker.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gmutex/gmutex.go create mode 100644 vendor/github.com/gogf/gf/v2/os/grpool/grpool.go create mode 100644 vendor/github.com/gogf/gf/v2/os/grpool/grpool_supervisor.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gstructs/gstructs.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gstructs/gstructs_field.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gstructs/gstructs_field_tag.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gstructs/gstructs_tag.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gstructs/gstructs_type.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gtime/gtime.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gtime/gtime_format.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gtime/gtime_sql.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gtime/gtime_time.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gtime/gtime_time_wrapper.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gtime/gtime_time_zone.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gtimer/gtimer.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_entry.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_exit.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_queue.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_queue_heap.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_timer.go create mode 100644 vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_timer_loop.go create mode 100644 vendor/github.com/gogf/gf/v2/text/gregex/gregex.go create mode 100644 vendor/github.com/gogf/gf/v2/text/gregex/gregex_cache.go create mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr.go create mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_array.go create mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_case.go create mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_compare.go create mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_contain.go create mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_convert.go create mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_count.go create mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_create.go create mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_domain.go create mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_is.go create mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_length.go create mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_parse.go create mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_pos.go create mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_replace.go create mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_similar.go create mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_slashes.go create mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_split_join.go create mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_sub.go create mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_trim.go create mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_upper_lower.go create mode 100644 vendor/github.com/gogf/gf/v2/text/gstr/gstr_version.go create mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv.go create mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_convert.go create mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_float.go create mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_int.go create mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_interface.go create mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_map.go create mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_maps.go create mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_maptomap.go create mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_maptomaps.go create mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_ptr.go create mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_scan.go create mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_any.go create mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_float.go create mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_int.go create mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_str.go create mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_uint.go create mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_struct.go create mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_structs.go create mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_time.go create mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_uint.go create mode 100644 vendor/github.com/gogf/gf/v2/util/gconv/gconv_unsafe.go create mode 100644 vendor/github.com/gogf/gf/v2/util/grand/grand.go create mode 100644 vendor/github.com/gogf/gf/v2/util/grand/grand_buffer.go create mode 100644 vendor/github.com/gogf/gf/v2/util/gtag/gtag.go create mode 100644 vendor/github.com/gogf/gf/v2/util/gtag/gtag_func.go create mode 100644 vendor/github.com/gogf/gf/v2/util/gutil/gutil.go create mode 100644 vendor/github.com/gogf/gf/v2/util/gutil/gutil_comparator.go create mode 100644 vendor/github.com/gogf/gf/v2/util/gutil/gutil_copy.go create mode 100644 vendor/github.com/gogf/gf/v2/util/gutil/gutil_default.go create mode 100644 vendor/github.com/gogf/gf/v2/util/gutil/gutil_dump.go create mode 100644 vendor/github.com/gogf/gf/v2/util/gutil/gutil_list.go create mode 100644 vendor/github.com/gogf/gf/v2/util/gutil/gutil_map.go create mode 100644 vendor/github.com/gogf/gf/v2/util/gutil/gutil_reflect.go create mode 100644 vendor/github.com/gogf/gf/v2/util/gutil/gutil_slice.go create mode 100644 vendor/github.com/gogf/gf/v2/util/gutil/gutil_struct.go create mode 100644 vendor/github.com/mattn/go-colorable/LICENSE create mode 100644 vendor/github.com/mattn/go-colorable/README.md create mode 100644 vendor/github.com/mattn/go-colorable/colorable_appengine.go create mode 100644 vendor/github.com/mattn/go-colorable/colorable_others.go create mode 100644 vendor/github.com/mattn/go-colorable/colorable_windows.go create mode 100644 vendor/github.com/mattn/go-colorable/noncolorable.go create mode 100644 vendor/go.opentelemetry.io/otel/.gitattributes create mode 100644 vendor/go.opentelemetry.io/otel/.gitignore create mode 100644 vendor/go.opentelemetry.io/otel/.gitmodules create mode 100644 vendor/go.opentelemetry.io/otel/.golangci.yml create mode 100644 vendor/go.opentelemetry.io/otel/.lycheeignore create mode 100644 vendor/go.opentelemetry.io/otel/.markdownlint.yaml create mode 100644 vendor/go.opentelemetry.io/otel/CHANGELOG.md create mode 100644 vendor/go.opentelemetry.io/otel/CODEOWNERS create mode 100644 vendor/go.opentelemetry.io/otel/CONTRIBUTING.md create mode 100644 vendor/go.opentelemetry.io/otel/LICENSE create mode 100644 vendor/go.opentelemetry.io/otel/Makefile create mode 100644 vendor/go.opentelemetry.io/otel/README.md create mode 100644 vendor/go.opentelemetry.io/otel/RELEASING.md create mode 100644 vendor/go.opentelemetry.io/otel/VERSIONING.md create mode 100644 vendor/go.opentelemetry.io/otel/attribute/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/attribute/encoder.go create mode 100644 vendor/go.opentelemetry.io/otel/attribute/iterator.go create mode 100644 vendor/go.opentelemetry.io/otel/attribute/key.go create mode 100644 vendor/go.opentelemetry.io/otel/attribute/kv.go create mode 100644 vendor/go.opentelemetry.io/otel/attribute/set.go create mode 100644 vendor/go.opentelemetry.io/otel/attribute/type_string.go create mode 100644 vendor/go.opentelemetry.io/otel/attribute/value.go create mode 100644 vendor/go.opentelemetry.io/otel/baggage/baggage.go create mode 100644 vendor/go.opentelemetry.io/otel/baggage/context.go create mode 100644 vendor/go.opentelemetry.io/otel/baggage/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/codes/codes.go create mode 100644 vendor/go.opentelemetry.io/otel/codes/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/error_handler.go create mode 100644 vendor/go.opentelemetry.io/otel/handler.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/baggage/context.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/propagator.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/state.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/rawhelpers.go create mode 100644 vendor/go.opentelemetry.io/otel/internal_logging.go create mode 100644 vendor/go.opentelemetry.io/otel/propagation.go create mode 100644 vendor/go.opentelemetry.io/otel/propagation/baggage.go create mode 100644 vendor/go.opentelemetry.io/otel/propagation/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/propagation/propagation.go create mode 100644 vendor/go.opentelemetry.io/otel/propagation/trace_context.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/LICENSE create mode 100644 vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/internal/internal.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/auto.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/config.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/container.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/env.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/os.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/process.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/resource.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/event.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/link.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/provider.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/span.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/internal/http.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.4.0/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.4.0/exception.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.4.0/http.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.4.0/resource.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.4.0/schema.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.4.0/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/LICENSE create mode 100644 vendor/go.opentelemetry.io/otel/trace/config.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/context.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/nonrecording.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/noop.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/tracestate.go create mode 100644 vendor/go.opentelemetry.io/otel/version.go create mode 100644 vendor/go.opentelemetry.io/otel/versions.yaml create mode 100644 vendor/golang.org/x/sync/semaphore/semaphore.go create mode 100644 vendor/golang.org/x/sys/windows/registry/key.go create mode 100644 vendor/golang.org/x/sys/windows/registry/mksyscall.go create mode 100644 vendor/golang.org/x/sys/windows/registry/syscall.go create mode 100644 vendor/golang.org/x/sys/windows/registry/value.go create mode 100644 vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go diff --git a/go.mod b/go.mod index 9d7416f6..d654670b 100644 --- a/go.mod +++ b/go.mod @@ -10,11 +10,13 @@ require ( github.com/basgys/goxml2json v1.1.0 github.com/bytedance/sonic v1.8.6 github.com/gin-gonic/gin v1.9.0 + github.com/go-co-op/gocron v1.19.0 github.com/go-playground/locales v0.14.1 github.com/go-playground/universal-translator v0.18.1 github.com/go-playground/validator/v10 v10.12.0 github.com/go-sql-driver/mysql v1.7.0 github.com/goccy/go-json v0.10.2 + github.com/gogf/gf/v2 v2.3.3 github.com/json-iterator/go v1.1.12 github.com/lib/pq v1.10.7 github.com/mitchellh/mapstructure v1.5.0 @@ -47,7 +49,11 @@ require ( github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect github.com/clbanning/mxj v1.8.4 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/fatih/color v1.15.0 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/gin-contrib/sse v0.1.0 // indirect + github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/google/go-querystring v1.1.0 // indirect @@ -59,6 +65,7 @@ require ( github.com/klauspost/compress v1.16.3 // indirect github.com/klauspost/cpuid/v2 v2.2.4 // indirect github.com/leodido/go-urn v1.2.2 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.18 // indirect github.com/mattn/go-sqlite3 v2.0.3+incompatible // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect @@ -82,6 +89,9 @@ require ( github.com/xdg-go/stringprep v1.0.4 // indirect github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect + go.opentelemetry.io/otel v1.14.0 // indirect + go.opentelemetry.io/otel/sdk v1.14.0 // indirect + go.opentelemetry.io/otel/trace v1.14.0 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.10.0 // indirect golang.org/x/arch v0.3.0 // indirect diff --git a/go.sum b/go.sum index 00484138..9cb106d8 100644 --- a/go.sum +++ b/go.sum @@ -3,8 +3,9 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a h1:lSA0F4e9A2NcQSqGqTOXqu2aRi/XEQxDCBwM8yJtE6s= gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a/go.mod h1:EXuID2Zs0pAQhH8yz+DNjUbjppKQzKFAn28TMYPB6IU= gitee.com/travelliu/dm v1.8.11192/go.mod h1:DHTzyhCrM843x9VdKVbZ+GKXGRbKM2sJ4LxihRxShkE= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I= +github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/MercuryEngineering/CookieMonster v0.0.0-20180304172713-1584578b3403 h1:EtZwYyLbkEcIt+B//6sujwRCnHuTEK3qiSypAX5aJeM= @@ -58,6 +59,8 @@ github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhD github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= github.com/clbanning/mxj v1.8.4 h1:HuhwZtbyvyOw+3Z1AowPkU87JkJUSv751ELWaiTpj8I= github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng= +github.com/clbanning/mxj/v2 v2.5.5 h1:oT81vUeEiQQ/DcHbzSytRngP6Ky9O+L+0Bw0zSJag9E= +github.com/clbanning/mxj/v2 v2.5.5/go.mod h1:hNiWqW14h+kc+MdF9C6/YoRfjEJoR3ou6tn/Qo+ve2s= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= @@ -88,20 +91,33 @@ github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4s github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.9.0 h1:OjyFBKICoexlu99ctXNR2gg+c5pKrKMuyjgARg9qeY8= github.com/gin-gonic/gin v1.9.0/go.mod h1:W1Me9+hsUSyj3CePGrd1/QrKJMSJ1Tu/0hFEH89961k= +github.com/go-co-op/gocron v1.19.0 h1:XlPLqNnxnKblmCRLdfcWV1UgbukQaU54QdNeR1jtgak= +github.com/go-co-op/gocron v1.19.0/go.mod h1:UqVyvM90I1q/R1qGEX6cBORI6WArLuEgYlbncLMvzRM= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= @@ -127,6 +143,8 @@ github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogf/gf/v2 v2.3.3 h1:3iry6kybjvuryTtjypG9oUuxrQ0URMT7j0DVg7FFnaw= +github.com/gogf/gf/v2 v2.3.3/go.mod h1:tsbmtwcAl2chcYoq/fP9W2FZf06aw4i89X34nbSHo9Y= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -153,8 +171,9 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= @@ -169,6 +188,10 @@ github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51 github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grokify/html-strip-tags-go v0.0.1 h1:0fThFwLbW7P/kOiTBs03FsJSV9RM2M/Q/MOnCQxKMo0= +github.com/grokify/html-strip-tags-go v0.0.1/go.mod h1:2Su6romC5/1VXOQMaWL2yb618ARB8iVo6/DR99A6d78= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= @@ -300,10 +323,15 @@ github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= +github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= @@ -311,9 +339,13 @@ github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98= github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-sqlite3 v1.14.9/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U= @@ -359,6 +391,8 @@ github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OS github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -512,6 +546,7 @@ github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7Jul github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a h1:fZHgsYlfvtyqToslyjUt3VOPF4J7aK/3MPcK7xp3PDk= github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a/go.mod h1:ul22v+Nro/R083muKhosV54bj5niojjWZvU8xrevuH4= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= @@ -524,13 +559,22 @@ go.mongodb.org/mongo-driver v1.11.3/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5queth go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk= +go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM= +go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU= +go.opentelemetry.io/otel/sdk v1.7.0/go.mod h1:uTEOTwaqIVuTGiJN7ii13Ibp75wJmYUDe374q6cZwUU= +go.opentelemetry.io/otel/sdk v1.14.0 h1:PDCppFRDq8A1jL9v6KMI6dYesaq+DFcDZvjsoGvxGzY= +go.opentelemetry.io/otel/sdk v1.14.0/go.mod h1:bwIC5TjrNG6QDCHNWvW4HLHtUQ4I+VQDsnjhvyZCALM= +go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU= +go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M= +go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= @@ -576,6 +620,7 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= @@ -597,6 +642,7 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= @@ -642,15 +688,20 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210902050250-f475640dd07b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211020174200-9d6173849985/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= @@ -666,6 +717,7 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8-0.20211105212822-18b340fc7af2/go.mod h1:EFNZuWvGYxIRUEX+K8UmCFwYmZjqcrnq15ZuVldZkZ0= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= @@ -691,6 +743,7 @@ golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= @@ -699,7 +752,6 @@ golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= diff --git a/vendor/github.com/fatih/color/LICENSE.md b/vendor/github.com/fatih/color/LICENSE.md new file mode 100644 index 00000000..25fdaf63 --- /dev/null +++ b/vendor/github.com/fatih/color/LICENSE.md @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Fatih Arslan + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/fatih/color/README.md b/vendor/github.com/fatih/color/README.md new file mode 100644 index 00000000..be82827c --- /dev/null +++ b/vendor/github.com/fatih/color/README.md @@ -0,0 +1,176 @@ +# color [![](https://github.com/fatih/color/workflows/build/badge.svg)](https://github.com/fatih/color/actions) [![PkgGoDev](https://pkg.go.dev/badge/github.com/fatih/color)](https://pkg.go.dev/github.com/fatih/color) + +Color lets you use colorized outputs in terms of [ANSI Escape +Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It +has support for Windows too! The API can be used in several ways, pick one that +suits you. + +![Color](https://user-images.githubusercontent.com/438920/96832689-03b3e000-13f4-11eb-9803-46f4c4de3406.jpg) + +## Install + +```bash +go get github.com/fatih/color +``` + +## Examples + +### Standard colors + +```go +// Print with default helper functions +color.Cyan("Prints text in cyan.") + +// A newline will be appended automatically +color.Blue("Prints %s in blue.", "text") + +// These are using the default foreground colors +color.Red("We have red") +color.Magenta("And many others ..") + +``` + +### Mix and reuse colors + +```go +// Create a new color object +c := color.New(color.FgCyan).Add(color.Underline) +c.Println("Prints cyan text with an underline.") + +// Or just add them to New() +d := color.New(color.FgCyan, color.Bold) +d.Printf("This prints bold cyan %s\n", "too!.") + +// Mix up foreground and background colors, create new mixes! +red := color.New(color.FgRed) + +boldRed := red.Add(color.Bold) +boldRed.Println("This will print text in bold red.") + +whiteBackground := red.Add(color.BgWhite) +whiteBackground.Println("Red text with white background.") +``` + +### Use your own output (io.Writer) + +```go +// Use your own io.Writer output +color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + +blue := color.New(color.FgBlue) +blue.Fprint(writer, "This will print text in blue.") +``` + +### Custom print functions (PrintFunc) + +```go +// Create a custom print function for convenience +red := color.New(color.FgRed).PrintfFunc() +red("Warning") +red("Error: %s", err) + +// Mix up multiple attributes +notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() +notice("Don't forget this...") +``` + +### Custom fprint functions (FprintFunc) + +```go +blue := color.New(color.FgBlue).FprintfFunc() +blue(myWriter, "important notice: %s", stars) + +// Mix up with multiple attributes +success := color.New(color.Bold, color.FgGreen).FprintlnFunc() +success(myWriter, "Don't forget this...") +``` + +### Insert into noncolor strings (SprintFunc) + +```go +// Create SprintXxx functions to mix strings with other non-colorized strings: +yellow := color.New(color.FgYellow).SprintFunc() +red := color.New(color.FgRed).SprintFunc() +fmt.Printf("This is a %s and this is %s.\n", yellow("warning"), red("error")) + +info := color.New(color.FgWhite, color.BgGreen).SprintFunc() +fmt.Printf("This %s rocks!\n", info("package")) + +// Use helper functions +fmt.Println("This", color.RedString("warning"), "should be not neglected.") +fmt.Printf("%v %v\n", color.GreenString("Info:"), "an important message.") + +// Windows supported too! Just don't forget to change the output to color.Output +fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) +``` + +### Plug into existing code + +```go +// Use handy standard colors +color.Set(color.FgYellow) + +fmt.Println("Existing text will now be in yellow") +fmt.Printf("This one %s\n", "too") + +color.Unset() // Don't forget to unset + +// You can mix up parameters +color.Set(color.FgMagenta, color.Bold) +defer color.Unset() // Use it in your function + +fmt.Println("All text will now be bold magenta.") +``` + +### Disable/Enable color + +There might be a case where you want to explicitly disable/enable color output. the +`go-isatty` package will automatically disable color output for non-tty output streams +(for example if the output were piped directly to `less`). + +The `color` package also disables color output if the [`NO_COLOR`](https://no-color.org) environment +variable is set to a non-empty string. + +`Color` has support to disable/enable colors programmatically both globally and +for single color definitions. For example suppose you have a CLI app and a +`-no-color` bool flag. You can easily disable the color output with: + +```go +var flagNoColor = flag.Bool("no-color", false, "Disable color output") + +if *flagNoColor { + color.NoColor = true // disables colorized output +} +``` + +It also has support for single color definitions (local). You can +disable/enable color output on the fly: + +```go +c := color.New(color.FgCyan) +c.Println("Prints cyan text") + +c.DisableColor() +c.Println("This is printed without any color") + +c.EnableColor() +c.Println("This prints again cyan...") +``` + +## GitHub Actions + +To output color in GitHub Actions (or other CI systems that support ANSI colors), make sure to set `color.NoColor = false` so that it bypasses the check for non-tty output streams. + +## Todo + +* Save/Return previous values +* Evaluate fmt.Formatter interface + +## Credits + +* [Fatih Arslan](https://github.com/fatih) +* Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable) + +## License + +The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details diff --git a/vendor/github.com/fatih/color/color.go b/vendor/github.com/fatih/color/color.go new file mode 100644 index 00000000..889f9e77 --- /dev/null +++ b/vendor/github.com/fatih/color/color.go @@ -0,0 +1,616 @@ +package color + +import ( + "fmt" + "io" + "os" + "strconv" + "strings" + "sync" + + "github.com/mattn/go-colorable" + "github.com/mattn/go-isatty" +) + +var ( + // NoColor defines if the output is colorized or not. It's dynamically set to + // false or true based on the stdout's file descriptor referring to a terminal + // or not. It's also set to true if the NO_COLOR environment variable is + // set (regardless of its value). This is a global option and affects all + // colors. For more control over each color block use the methods + // DisableColor() individually. + NoColor = noColorIsSet() || os.Getenv("TERM") == "dumb" || + (!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd())) + + // Output defines the standard output of the print functions. By default, + // os.Stdout is used. + Output = colorable.NewColorableStdout() + + // Error defines a color supporting writer for os.Stderr. + Error = colorable.NewColorableStderr() + + // colorsCache is used to reduce the count of created Color objects and + // allows to reuse already created objects with required Attribute. + colorsCache = make(map[Attribute]*Color) + colorsCacheMu sync.Mutex // protects colorsCache +) + +// noColorIsSet returns true if the environment variable NO_COLOR is set to a non-empty string. +func noColorIsSet() bool { + return os.Getenv("NO_COLOR") != "" +} + +// Color defines a custom color object which is defined by SGR parameters. +type Color struct { + params []Attribute + noColor *bool +} + +// Attribute defines a single SGR Code +type Attribute int + +const escape = "\x1b" + +// Base attributes +const ( + Reset Attribute = iota + Bold + Faint + Italic + Underline + BlinkSlow + BlinkRapid + ReverseVideo + Concealed + CrossedOut +) + +// Foreground text colors +const ( + FgBlack Attribute = iota + 30 + FgRed + FgGreen + FgYellow + FgBlue + FgMagenta + FgCyan + FgWhite +) + +// Foreground Hi-Intensity text colors +const ( + FgHiBlack Attribute = iota + 90 + FgHiRed + FgHiGreen + FgHiYellow + FgHiBlue + FgHiMagenta + FgHiCyan + FgHiWhite +) + +// Background text colors +const ( + BgBlack Attribute = iota + 40 + BgRed + BgGreen + BgYellow + BgBlue + BgMagenta + BgCyan + BgWhite +) + +// Background Hi-Intensity text colors +const ( + BgHiBlack Attribute = iota + 100 + BgHiRed + BgHiGreen + BgHiYellow + BgHiBlue + BgHiMagenta + BgHiCyan + BgHiWhite +) + +// New returns a newly created color object. +func New(value ...Attribute) *Color { + c := &Color{ + params: make([]Attribute, 0), + } + + if noColorIsSet() { + c.noColor = boolPtr(true) + } + + c.Add(value...) + return c +} + +// Set sets the given parameters immediately. It will change the color of +// output with the given SGR parameters until color.Unset() is called. +func Set(p ...Attribute) *Color { + c := New(p...) + c.Set() + return c +} + +// Unset resets all escape attributes and clears the output. Usually should +// be called after Set(). +func Unset() { + if NoColor { + return + } + + fmt.Fprintf(Output, "%s[%dm", escape, Reset) +} + +// Set sets the SGR sequence. +func (c *Color) Set() *Color { + if c.isNoColorSet() { + return c + } + + fmt.Fprint(Output, c.format()) + return c +} + +func (c *Color) unset() { + if c.isNoColorSet() { + return + } + + Unset() +} + +// SetWriter is used to set the SGR sequence with the given io.Writer. This is +// a low-level function, and users should use the higher-level functions, such +// as color.Fprint, color.Print, etc. +func (c *Color) SetWriter(w io.Writer) *Color { + if c.isNoColorSet() { + return c + } + + fmt.Fprint(w, c.format()) + return c +} + +// UnsetWriter resets all escape attributes and clears the output with the give +// io.Writer. Usually should be called after SetWriter(). +func (c *Color) UnsetWriter(w io.Writer) { + if c.isNoColorSet() { + return + } + + if NoColor { + return + } + + fmt.Fprintf(w, "%s[%dm", escape, Reset) +} + +// Add is used to chain SGR parameters. Use as many as parameters to combine +// and create custom color objects. Example: Add(color.FgRed, color.Underline). +func (c *Color) Add(value ...Attribute) *Color { + c.params = append(c.params, value...) + return c +} + +// Fprint formats using the default formats for its operands and writes to w. +// Spaces are added between operands when neither is a string. +// It returns the number of bytes written and any write error encountered. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + c.SetWriter(w) + defer c.UnsetWriter(w) + + return fmt.Fprint(w, a...) +} + +// Print formats using the default formats for its operands and writes to +// standard output. Spaces are added between operands when neither is a +// string. It returns the number of bytes written and any write error +// encountered. This is the standard fmt.Print() method wrapped with the given +// color. +func (c *Color) Print(a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprint(Output, a...) +} + +// Fprintf formats according to a format specifier and writes to w. +// It returns the number of bytes written and any write error encountered. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + c.SetWriter(w) + defer c.UnsetWriter(w) + + return fmt.Fprintf(w, format, a...) +} + +// Printf formats according to a format specifier and writes to standard output. +// It returns the number of bytes written and any write error encountered. +// This is the standard fmt.Printf() method wrapped with the given color. +func (c *Color) Printf(format string, a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprintf(Output, format, a...) +} + +// Fprintln formats using the default formats for its operands and writes to w. +// Spaces are always added between operands and a newline is appended. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + c.SetWriter(w) + defer c.UnsetWriter(w) + + return fmt.Fprintln(w, a...) +} + +// Println formats using the default formats for its operands and writes to +// standard output. Spaces are always added between operands and a newline is +// appended. It returns the number of bytes written and any write error +// encountered. This is the standard fmt.Print() method wrapped with the given +// color. +func (c *Color) Println(a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprintln(Output, a...) +} + +// Sprint is just like Print, but returns a string instead of printing it. +func (c *Color) Sprint(a ...interface{}) string { + return c.wrap(fmt.Sprint(a...)) +} + +// Sprintln is just like Println, but returns a string instead of printing it. +func (c *Color) Sprintln(a ...interface{}) string { + return c.wrap(fmt.Sprintln(a...)) +} + +// Sprintf is just like Printf, but returns a string instead of printing it. +func (c *Color) Sprintf(format string, a ...interface{}) string { + return c.wrap(fmt.Sprintf(format, a...)) +} + +// FprintFunc returns a new function that prints the passed arguments as +// colorized with color.Fprint(). +func (c *Color) FprintFunc() func(w io.Writer, a ...interface{}) { + return func(w io.Writer, a ...interface{}) { + c.Fprint(w, a...) + } +} + +// PrintFunc returns a new function that prints the passed arguments as +// colorized with color.Print(). +func (c *Color) PrintFunc() func(a ...interface{}) { + return func(a ...interface{}) { + c.Print(a...) + } +} + +// FprintfFunc returns a new function that prints the passed arguments as +// colorized with color.Fprintf(). +func (c *Color) FprintfFunc() func(w io.Writer, format string, a ...interface{}) { + return func(w io.Writer, format string, a ...interface{}) { + c.Fprintf(w, format, a...) + } +} + +// PrintfFunc returns a new function that prints the passed arguments as +// colorized with color.Printf(). +func (c *Color) PrintfFunc() func(format string, a ...interface{}) { + return func(format string, a ...interface{}) { + c.Printf(format, a...) + } +} + +// FprintlnFunc returns a new function that prints the passed arguments as +// colorized with color.Fprintln(). +func (c *Color) FprintlnFunc() func(w io.Writer, a ...interface{}) { + return func(w io.Writer, a ...interface{}) { + c.Fprintln(w, a...) + } +} + +// PrintlnFunc returns a new function that prints the passed arguments as +// colorized with color.Println(). +func (c *Color) PrintlnFunc() func(a ...interface{}) { + return func(a ...interface{}) { + c.Println(a...) + } +} + +// SprintFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprint(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output, example: +// +// put := New(FgYellow).SprintFunc() +// fmt.Fprintf(color.Output, "This is a %s", put("warning")) +func (c *Color) SprintFunc() func(a ...interface{}) string { + return func(a ...interface{}) string { + return c.wrap(fmt.Sprint(a...)) + } +} + +// SprintfFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprintf(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output. +func (c *Color) SprintfFunc() func(format string, a ...interface{}) string { + return func(format string, a ...interface{}) string { + return c.wrap(fmt.Sprintf(format, a...)) + } +} + +// SprintlnFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprintln(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output. +func (c *Color) SprintlnFunc() func(a ...interface{}) string { + return func(a ...interface{}) string { + return c.wrap(fmt.Sprintln(a...)) + } +} + +// sequence returns a formatted SGR sequence to be plugged into a "\x1b[...m" +// an example output might be: "1;36" -> bold cyan +func (c *Color) sequence() string { + format := make([]string, len(c.params)) + for i, v := range c.params { + format[i] = strconv.Itoa(int(v)) + } + + return strings.Join(format, ";") +} + +// wrap wraps the s string with the colors attributes. The string is ready to +// be printed. +func (c *Color) wrap(s string) string { + if c.isNoColorSet() { + return s + } + + return c.format() + s + c.unformat() +} + +func (c *Color) format() string { + return fmt.Sprintf("%s[%sm", escape, c.sequence()) +} + +func (c *Color) unformat() string { + return fmt.Sprintf("%s[%dm", escape, Reset) +} + +// DisableColor disables the color output. Useful to not change any existing +// code and still being able to output. Can be used for flags like +// "--no-color". To enable back use EnableColor() method. +func (c *Color) DisableColor() { + c.noColor = boolPtr(true) +} + +// EnableColor enables the color output. Use it in conjunction with +// DisableColor(). Otherwise, this method has no side effects. +func (c *Color) EnableColor() { + c.noColor = boolPtr(false) +} + +func (c *Color) isNoColorSet() bool { + // check first if we have user set action + if c.noColor != nil { + return *c.noColor + } + + // if not return the global option, which is disabled by default + return NoColor +} + +// Equals returns a boolean value indicating whether two colors are equal. +func (c *Color) Equals(c2 *Color) bool { + if len(c.params) != len(c2.params) { + return false + } + + for _, attr := range c.params { + if !c2.attrExists(attr) { + return false + } + } + + return true +} + +func (c *Color) attrExists(a Attribute) bool { + for _, attr := range c.params { + if attr == a { + return true + } + } + + return false +} + +func boolPtr(v bool) *bool { + return &v +} + +func getCachedColor(p Attribute) *Color { + colorsCacheMu.Lock() + defer colorsCacheMu.Unlock() + + c, ok := colorsCache[p] + if !ok { + c = New(p) + colorsCache[p] = c + } + + return c +} + +func colorPrint(format string, p Attribute, a ...interface{}) { + c := getCachedColor(p) + + if !strings.HasSuffix(format, "\n") { + format += "\n" + } + + if len(a) == 0 { + c.Print(format) + } else { + c.Printf(format, a...) + } +} + +func colorString(format string, p Attribute, a ...interface{}) string { + c := getCachedColor(p) + + if len(a) == 0 { + return c.SprintFunc()(format) + } + + return c.SprintfFunc()(format, a...) +} + +// Black is a convenient helper function to print with black foreground. A +// newline is appended to format by default. +func Black(format string, a ...interface{}) { colorPrint(format, FgBlack, a...) } + +// Red is a convenient helper function to print with red foreground. A +// newline is appended to format by default. +func Red(format string, a ...interface{}) { colorPrint(format, FgRed, a...) } + +// Green is a convenient helper function to print with green foreground. A +// newline is appended to format by default. +func Green(format string, a ...interface{}) { colorPrint(format, FgGreen, a...) } + +// Yellow is a convenient helper function to print with yellow foreground. +// A newline is appended to format by default. +func Yellow(format string, a ...interface{}) { colorPrint(format, FgYellow, a...) } + +// Blue is a convenient helper function to print with blue foreground. A +// newline is appended to format by default. +func Blue(format string, a ...interface{}) { colorPrint(format, FgBlue, a...) } + +// Magenta is a convenient helper function to print with magenta foreground. +// A newline is appended to format by default. +func Magenta(format string, a ...interface{}) { colorPrint(format, FgMagenta, a...) } + +// Cyan is a convenient helper function to print with cyan foreground. A +// newline is appended to format by default. +func Cyan(format string, a ...interface{}) { colorPrint(format, FgCyan, a...) } + +// White is a convenient helper function to print with white foreground. A +// newline is appended to format by default. +func White(format string, a ...interface{}) { colorPrint(format, FgWhite, a...) } + +// BlackString is a convenient helper function to return a string with black +// foreground. +func BlackString(format string, a ...interface{}) string { return colorString(format, FgBlack, a...) } + +// RedString is a convenient helper function to return a string with red +// foreground. +func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) } + +// GreenString is a convenient helper function to return a string with green +// foreground. +func GreenString(format string, a ...interface{}) string { return colorString(format, FgGreen, a...) } + +// YellowString is a convenient helper function to return a string with yellow +// foreground. +func YellowString(format string, a ...interface{}) string { return colorString(format, FgYellow, a...) } + +// BlueString is a convenient helper function to return a string with blue +// foreground. +func BlueString(format string, a ...interface{}) string { return colorString(format, FgBlue, a...) } + +// MagentaString is a convenient helper function to return a string with magenta +// foreground. +func MagentaString(format string, a ...interface{}) string { + return colorString(format, FgMagenta, a...) +} + +// CyanString is a convenient helper function to return a string with cyan +// foreground. +func CyanString(format string, a ...interface{}) string { return colorString(format, FgCyan, a...) } + +// WhiteString is a convenient helper function to return a string with white +// foreground. +func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) } + +// HiBlack is a convenient helper function to print with hi-intensity black foreground. A +// newline is appended to format by default. +func HiBlack(format string, a ...interface{}) { colorPrint(format, FgHiBlack, a...) } + +// HiRed is a convenient helper function to print with hi-intensity red foreground. A +// newline is appended to format by default. +func HiRed(format string, a ...interface{}) { colorPrint(format, FgHiRed, a...) } + +// HiGreen is a convenient helper function to print with hi-intensity green foreground. A +// newline is appended to format by default. +func HiGreen(format string, a ...interface{}) { colorPrint(format, FgHiGreen, a...) } + +// HiYellow is a convenient helper function to print with hi-intensity yellow foreground. +// A newline is appended to format by default. +func HiYellow(format string, a ...interface{}) { colorPrint(format, FgHiYellow, a...) } + +// HiBlue is a convenient helper function to print with hi-intensity blue foreground. A +// newline is appended to format by default. +func HiBlue(format string, a ...interface{}) { colorPrint(format, FgHiBlue, a...) } + +// HiMagenta is a convenient helper function to print with hi-intensity magenta foreground. +// A newline is appended to format by default. +func HiMagenta(format string, a ...interface{}) { colorPrint(format, FgHiMagenta, a...) } + +// HiCyan is a convenient helper function to print with hi-intensity cyan foreground. A +// newline is appended to format by default. +func HiCyan(format string, a ...interface{}) { colorPrint(format, FgHiCyan, a...) } + +// HiWhite is a convenient helper function to print with hi-intensity white foreground. A +// newline is appended to format by default. +func HiWhite(format string, a ...interface{}) { colorPrint(format, FgHiWhite, a...) } + +// HiBlackString is a convenient helper function to return a string with hi-intensity black +// foreground. +func HiBlackString(format string, a ...interface{}) string { + return colorString(format, FgHiBlack, a...) +} + +// HiRedString is a convenient helper function to return a string with hi-intensity red +// foreground. +func HiRedString(format string, a ...interface{}) string { return colorString(format, FgHiRed, a...) } + +// HiGreenString is a convenient helper function to return a string with hi-intensity green +// foreground. +func HiGreenString(format string, a ...interface{}) string { + return colorString(format, FgHiGreen, a...) +} + +// HiYellowString is a convenient helper function to return a string with hi-intensity yellow +// foreground. +func HiYellowString(format string, a ...interface{}) string { + return colorString(format, FgHiYellow, a...) +} + +// HiBlueString is a convenient helper function to return a string with hi-intensity blue +// foreground. +func HiBlueString(format string, a ...interface{}) string { return colorString(format, FgHiBlue, a...) } + +// HiMagentaString is a convenient helper function to return a string with hi-intensity magenta +// foreground. +func HiMagentaString(format string, a ...interface{}) string { + return colorString(format, FgHiMagenta, a...) +} + +// HiCyanString is a convenient helper function to return a string with hi-intensity cyan +// foreground. +func HiCyanString(format string, a ...interface{}) string { return colorString(format, FgHiCyan, a...) } + +// HiWhiteString is a convenient helper function to return a string with hi-intensity white +// foreground. +func HiWhiteString(format string, a ...interface{}) string { + return colorString(format, FgHiWhite, a...) +} diff --git a/vendor/github.com/fatih/color/color_windows.go b/vendor/github.com/fatih/color/color_windows.go new file mode 100644 index 00000000..be01c558 --- /dev/null +++ b/vendor/github.com/fatih/color/color_windows.go @@ -0,0 +1,19 @@ +package color + +import ( + "os" + + "golang.org/x/sys/windows" +) + +func init() { + // Opt-in for ansi color support for current process. + // https://learn.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences#output-sequences + var outMode uint32 + out := windows.Handle(os.Stdout.Fd()) + if err := windows.GetConsoleMode(out, &outMode); err != nil { + return + } + outMode |= windows.ENABLE_PROCESSED_OUTPUT | windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING + _ = windows.SetConsoleMode(out, outMode) +} diff --git a/vendor/github.com/fatih/color/doc.go b/vendor/github.com/fatih/color/doc.go new file mode 100644 index 00000000..9491ad54 --- /dev/null +++ b/vendor/github.com/fatih/color/doc.go @@ -0,0 +1,134 @@ +/* +Package color is an ANSI color package to output colorized or SGR defined +output to the standard output. The API can be used in several way, pick one +that suits you. + +Use simple and default helper functions with predefined foreground colors: + + color.Cyan("Prints text in cyan.") + + // a newline will be appended automatically + color.Blue("Prints %s in blue.", "text") + + // More default foreground colors.. + color.Red("We have red") + color.Yellow("Yellow color too!") + color.Magenta("And many others ..") + + // Hi-intensity colors + color.HiGreen("Bright green color.") + color.HiBlack("Bright black means gray..") + color.HiWhite("Shiny white color!") + +However, there are times when custom color mixes are required. Below are some +examples to create custom color objects and use the print functions of each +separate color object. + + // Create a new color object + c := color.New(color.FgCyan).Add(color.Underline) + c.Println("Prints cyan text with an underline.") + + // Or just add them to New() + d := color.New(color.FgCyan, color.Bold) + d.Printf("This prints bold cyan %s\n", "too!.") + + + // Mix up foreground and background colors, create new mixes! + red := color.New(color.FgRed) + + boldRed := red.Add(color.Bold) + boldRed.Println("This will print text in bold red.") + + whiteBackground := red.Add(color.BgWhite) + whiteBackground.Println("Red text with White background.") + + // Use your own io.Writer output + color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + + blue := color.New(color.FgBlue) + blue.Fprint(myWriter, "This will print text in blue.") + +You can create PrintXxx functions to simplify even more: + + // Create a custom print function for convenient + red := color.New(color.FgRed).PrintfFunc() + red("warning") + red("error: %s", err) + + // Mix up multiple attributes + notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() + notice("don't forget this...") + +You can also FprintXxx functions to pass your own io.Writer: + + blue := color.New(FgBlue).FprintfFunc() + blue(myWriter, "important notice: %s", stars) + + // Mix up with multiple attributes + success := color.New(color.Bold, color.FgGreen).FprintlnFunc() + success(myWriter, don't forget this...") + +Or create SprintXxx functions to mix strings with other non-colorized strings: + + yellow := New(FgYellow).SprintFunc() + red := New(FgRed).SprintFunc() + + fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error")) + + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Printf("this %s rocks!\n", info("package")) + +Windows support is enabled by default. All Print functions work as intended. +However, only for color.SprintXXX functions, user should use fmt.FprintXXX and +set the output to color.Output: + + fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) + + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Fprintf(color.Output, "this %s rocks!\n", info("package")) + +Using with existing code is possible. Just use the Set() method to set the +standard output to the given parameters. That way a rewrite of an existing +code is not required. + + // Use handy standard colors. + color.Set(color.FgYellow) + + fmt.Println("Existing text will be now in Yellow") + fmt.Printf("This one %s\n", "too") + + color.Unset() // don't forget to unset + + // You can mix up parameters + color.Set(color.FgMagenta, color.Bold) + defer color.Unset() // use it in your function + + fmt.Println("All text will be now bold magenta.") + +There might be a case where you want to disable color output (for example to +pipe the standard output of your app to somewhere else). `Color` has support to +disable colors both globally and for single color definition. For example +suppose you have a CLI app and a `--no-color` bool flag. You can easily disable +the color output with: + + var flagNoColor = flag.Bool("no-color", false, "Disable color output") + + if *flagNoColor { + color.NoColor = true // disables colorized output + } + +You can also disable the color by setting the NO_COLOR environment variable to any value. + +It also has support for single color definitions (local). You can +disable/enable color output on the fly: + + c := color.New(color.FgCyan) + c.Println("Prints cyan text") + + c.DisableColor() + c.Println("This is printed without any color") + + c.EnableColor() + c.Println("This prints again cyan...") +*/ +package color diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig new file mode 100644 index 00000000..fad89585 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.editorconfig @@ -0,0 +1,12 @@ +root = true + +[*.go] +indent_style = tab +indent_size = 4 +insert_final_newline = true + +[*.{yml,yaml}] +indent_style = space +indent_size = 2 +insert_final_newline = true +trim_trailing_whitespace = true diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes new file mode 100644 index 00000000..32f1001b --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.gitattributes @@ -0,0 +1 @@ +go.sum linguist-generated diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore new file mode 100644 index 00000000..1d89d85c --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -0,0 +1,6 @@ +# go test -c output +*.test +*.test.exe + +# Output of go build ./cmd/fsnotify +/fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/.mailmap b/vendor/github.com/fsnotify/fsnotify/.mailmap new file mode 100644 index 00000000..a04f2907 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.mailmap @@ -0,0 +1,2 @@ +Chris Howey +Nathan Youngman <4566+nathany@users.noreply.github.com> diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md new file mode 100644 index 00000000..77f9593b --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -0,0 +1,470 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +Nothing yet. + +## [1.6.0] - 2022-10-13 + +This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1, +but not documented). It also increases the minimum Linux version to 2.6.32. + +### Additions + +- all: add `Event.Has()` and `Op.Has()` ([#477]) + + This makes checking events a lot easier; for example: + + if event.Op&Write == Write && !(event.Op&Remove == Remove) { + } + + Becomes: + + if event.Has(Write) && !event.Has(Remove) { + } + +- all: add cmd/fsnotify ([#463]) + + A command-line utility for testing and some examples. + +### Changes and fixes + +- inotify: don't ignore events for files that don't exist ([#260], [#470]) + + Previously the inotify watcher would call `os.Lstat()` to check if a file + still exists before emitting events. + + This was inconsistent with other platforms and resulted in inconsistent event + reporting (e.g. when a file is quickly removed and re-created), and generally + a source of confusion. It was added in 2013 to fix a memory leak that no + longer exists. + +- all: return `ErrNonExistentWatch` when `Remove()` is called on a path that's + not watched ([#460]) + +- inotify: replace epoll() with non-blocking inotify ([#434]) + + Non-blocking inotify was not generally available at the time this library was + written in 2014, but now it is. As a result, the minimum Linux version is + bumped from 2.6.27 to 2.6.32. This hugely simplifies the code and is faster. + +- kqueue: don't check for events every 100ms ([#480]) + + The watcher would wake up every 100ms, even when there was nothing to do. Now + it waits until there is something to do. + +- macos: retry opening files on EINTR ([#475]) + +- kqueue: skip unreadable files ([#479]) + + kqueue requires a file descriptor for every file in a directory; this would + fail if a file was unreadable by the current user. Now these files are simply + skipped. + +- windows: fix renaming a watched directory if the parent is also watched ([#370]) + +- windows: increase buffer size from 4K to 64K ([#485]) + +- windows: close file handle on Remove() ([#288]) + +- kqueue: put pathname in the error if watching a file fails ([#471]) + +- inotify, windows: calling Close() more than once could race ([#465]) + +- kqueue: improve Close() performance ([#233]) + +- all: various documentation additions and clarifications. + +[#233]: https://github.com/fsnotify/fsnotify/pull/233 +[#260]: https://github.com/fsnotify/fsnotify/pull/260 +[#288]: https://github.com/fsnotify/fsnotify/pull/288 +[#370]: https://github.com/fsnotify/fsnotify/pull/370 +[#434]: https://github.com/fsnotify/fsnotify/pull/434 +[#460]: https://github.com/fsnotify/fsnotify/pull/460 +[#463]: https://github.com/fsnotify/fsnotify/pull/463 +[#465]: https://github.com/fsnotify/fsnotify/pull/465 +[#470]: https://github.com/fsnotify/fsnotify/pull/470 +[#471]: https://github.com/fsnotify/fsnotify/pull/471 +[#475]: https://github.com/fsnotify/fsnotify/pull/475 +[#477]: https://github.com/fsnotify/fsnotify/pull/477 +[#479]: https://github.com/fsnotify/fsnotify/pull/479 +[#480]: https://github.com/fsnotify/fsnotify/pull/480 +[#485]: https://github.com/fsnotify/fsnotify/pull/485 + +## [1.5.4] - 2022-04-25 + +* Windows: add missing defer to `Watcher.WatchList` [#447](https://github.com/fsnotify/fsnotify/pull/447) +* go.mod: use latest x/sys [#444](https://github.com/fsnotify/fsnotify/pull/444) +* Fix compilation for OpenBSD [#443](https://github.com/fsnotify/fsnotify/pull/443) + +## [1.5.3] - 2022-04-22 + +* This version is retracted. An incorrect branch is published accidentally [#445](https://github.com/fsnotify/fsnotify/issues/445) + +## [1.5.2] - 2022-04-21 + +* Add a feature to return the directories and files that are being monitored [#374](https://github.com/fsnotify/fsnotify/pull/374) +* Fix potential crash on windows if `raw.FileNameLength` exceeds `syscall.MAX_PATH` [#361](https://github.com/fsnotify/fsnotify/pull/361) +* Allow build on unsupported GOOS [#424](https://github.com/fsnotify/fsnotify/pull/424) +* Don't set `poller.fd` twice in `newFdPoller` [#406](https://github.com/fsnotify/fsnotify/pull/406) +* fix go vet warnings: call to `(*T).Fatalf` from a non-test goroutine [#416](https://github.com/fsnotify/fsnotify/pull/416) + +## [1.5.1] - 2021-08-24 + +* Revert Add AddRaw to not follow symlinks [#394](https://github.com/fsnotify/fsnotify/pull/394) + +## [1.5.0] - 2021-08-20 + +* Go: Increase minimum required version to Go 1.12 [#381](https://github.com/fsnotify/fsnotify/pull/381) +* Feature: Add AddRaw method which does not follow symlinks when adding a watch [#289](https://github.com/fsnotify/fsnotify/pull/298) +* Windows: Follow symlinks by default like on all other systems [#289](https://github.com/fsnotify/fsnotify/pull/289) +* CI: Use GitHub Actions for CI and cover go 1.12-1.17 + [#378](https://github.com/fsnotify/fsnotify/pull/378) + [#381](https://github.com/fsnotify/fsnotify/pull/381) + [#385](https://github.com/fsnotify/fsnotify/pull/385) +* Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325) + +## [1.4.9] - 2020-03-11 + +* Move example usage to the readme #329. This may resolve #328. + +## [1.4.8] - 2020-03-10 + +* CI: test more go versions (@nathany 1d13583d846ea9d66dcabbfefbfb9d8e6fb05216) +* Tests: Queued inotify events could have been read by the test before max_queued_events was hit (@matthias-stone #265) +* Tests: t.Fatalf -> t.Errorf in go routines (@gdey #266) +* CI: Less verbosity (@nathany #267) +* Tests: Darwin: Exchangedata is deprecated on 10.13 (@nathany #267) +* Tests: Check if channels are closed in the example (@alexeykazakov #244) +* CI: Only run golint on latest version of go and fix issues (@cpuguy83 #284) +* CI: Add windows to travis matrix (@cpuguy83 #284) +* Docs: Remover appveyor badge (@nathany 11844c0959f6fff69ba325d097fce35bd85a8e93) +* Linux: create epoll and pipe fds with close-on-exec (@JohannesEbke #219) +* Linux: open files with close-on-exec (@linxiulei #273) +* Docs: Plan to support fanotify (@nathany ab058b44498e8b7566a799372a39d150d9ea0119 ) +* Project: Add go.mod (@nathany #309) +* Project: Revise editor config (@nathany #309) +* Project: Update copyright for 2019 (@nathany #309) +* CI: Drop go1.8 from CI matrix (@nathany #309) +* Docs: Updating the FAQ section for supportability with NFS & FUSE filesystems (@Pratik32 4bf2d1fec78374803a39307bfb8d340688f4f28e ) + +## [1.4.7] - 2018-01-09 + +* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) +* Tests: Fix missing verb on format string (thanks @rchiossi) +* Linux: Fix deadlock in Remove (thanks @aarondl) +* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne) +* Docs: Moved FAQ into the README (thanks @vahe) +* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich) +* Docs: replace references to OS X with macOS + +## [1.4.2] - 2016-10-10 + +* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack) + +## [1.4.1] - 2016-10-04 + +* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack) + +## [1.4.0] - 2016-10-01 + +* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie) + +## [1.3.1] - 2016-06-28 + +* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc) + +## [1.3.0] - 2016-04-19 + +* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135) + +## [1.2.10] - 2016-03-02 + +* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj) + +## [1.2.9] - 2016-01-13 + +kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep) + +## [1.2.8] - 2015-12-17 + +* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test) +* inotify: fix race in test +* enable race detection for continuous integration (Linux, Mac, Windows) + +## [1.2.5] - 2015-10-17 + +* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki) +* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken) +* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie) +* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion) + +## [1.2.1] - 2015-10-14 + +* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx) + +## [1.2.0] - 2015-02-08 + +* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD) +* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD) +* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59) + +## [1.1.1] - 2015-02-05 + +* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD) + +## [1.1.0] - 2014-12-12 + +* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43) + * add low-level functions + * only need to store flags on directories + * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13) + * done can be an unbuffered channel + * remove calls to os.NewSyscallError +* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher) +* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48) +* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) + +## [1.0.4] - 2014-09-07 + +* kqueue: add dragonfly to the build tags. +* Rename source code files, rearrange code so exported APIs are at the top. +* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang) + +## [1.0.3] - 2014-08-19 + +* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36) + +## [1.0.2] - 2014-08-17 + +* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) +* [Fix] Make ./path and path equivalent. (thanks @zhsso) + +## [1.0.0] - 2014-08-15 + +* [API] Remove AddWatch on Windows, use Add. +* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30) +* Minor updates based on feedback from golint. + +## dev / 2014-07-09 + +* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify). +* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno) + +## dev / 2014-07-04 + +* kqueue: fix incorrect mutex used in Close() +* Update example to demonstrate usage of Op. + +## dev / 2014-06-28 + +* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4) +* Fix for String() method on Event (thanks Alex Brainman) +* Don't build on Plan 9 or Solaris (thanks @4ad) + +## dev / 2014-06-21 + +* Events channel of type Event rather than *Event. +* [internal] use syscall constants directly for inotify and kqueue. +* [internal] kqueue: rename events to kevents and fileEvent to event. + +## dev / 2014-06-19 + +* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally). +* [internal] remove cookie from Event struct (unused). +* [internal] Event struct has the same definition across every OS. +* [internal] remove internal watch and removeWatch methods. + +## dev / 2014-06-12 + +* [API] Renamed Watch() to Add() and RemoveWatch() to Remove(). +* [API] Pluralized channel names: Events and Errors. +* [API] Renamed FileEvent struct to Event. +* [API] Op constants replace methods like IsCreate(). + +## dev / 2014-06-12 + +* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) + +## dev / 2014-05-23 + +* [API] Remove current implementation of WatchFlags. + * current implementation doesn't take advantage of OS for efficiency + * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes + * no tests for the current implementation + * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) + +## [0.9.3] - 2014-12-31 + +* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) + +## [0.9.2] - 2014-08-17 + +* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) + +## [0.9.1] - 2014-06-12 + +* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) + +## [0.9.0] - 2014-01-17 + +* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) +* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) +* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. + +## [0.8.12] - 2013-11-13 + +* [API] Remove FD_SET and friends from Linux adapter + +## [0.8.11] - 2013-11-02 + +* [Doc] Add Changelog [#72][] (thanks @nathany) +* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond) + +## [0.8.10] - 2013-10-19 + +* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) +* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) +* [Doc] specify OS-specific limits in README (thanks @debrando) + +## [0.8.9] - 2013-09-08 + +* [Doc] Contributing (thanks @nathany) +* [Doc] update package path in example code [#63][] (thanks @paulhammond) +* [Doc] GoCI badge in README (Linux only) [#60][] +* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) + +## [0.8.8] - 2013-06-17 + +* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) + +## [0.8.7] - 2013-06-03 + +* [API] Make syscall flags internal +* [Fix] inotify: ignore event changes +* [Fix] race in symlink test [#45][] (reported by @srid) +* [Fix] tests on Windows +* lower case error messages + +## [0.8.6] - 2013-05-23 + +* kqueue: Use EVT_ONLY flag on Darwin +* [Doc] Update README with full example + +## [0.8.5] - 2013-05-09 + +* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) + +## [0.8.4] - 2013-04-07 + +* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) + +## [0.8.3] - 2013-03-13 + +* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) +* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) + +## [0.8.2] - 2013-02-07 + +* [Doc] add Authors +* [Fix] fix data races for map access [#29][] (thanks @fsouza) + +## [0.8.1] - 2013-01-09 + +* [Fix] Windows path separators +* [Doc] BSD License + +## [0.8.0] - 2012-11-09 + +* kqueue: directory watching improvements (thanks @vmirage) +* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) +* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) + +## [0.7.4] - 2012-10-09 + +* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) +* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) +* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) +* [Fix] kqueue: modify after recreation of file + +## [0.7.3] - 2012-09-27 + +* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) +* [Fix] kqueue: no longer get duplicate CREATE events + +## [0.7.2] - 2012-09-01 + +* kqueue: events for created directories + +## [0.7.1] - 2012-07-14 + +* [Fix] for renaming files + +## [0.7.0] - 2012-07-02 + +* [Feature] FSNotify flags +* [Fix] inotify: Added file name back to event path + +## [0.6.0] - 2012-06-06 + +* kqueue: watch files after directory created (thanks @tmc) + +## [0.5.1] - 2012-05-22 + +* [Fix] inotify: remove all watches before Close() + +## [0.5.0] - 2012-05-03 + +* [API] kqueue: return errors during watch instead of sending over channel +* kqueue: match symlink behavior on Linux +* inotify: add `DELETE_SELF` (requested by @taralx) +* [Fix] kqueue: handle EINTR (reported by @robfig) +* [Doc] Godoc example [#1][] (thanks @davecheney) + +## [0.4.0] - 2012-03-30 + +* Go 1 released: build with go tool +* [Feature] Windows support using winfsnotify +* Windows does not have attribute change notifications +* Roll attribute notifications into IsModify + +## [0.3.0] - 2012-02-19 + +* kqueue: add files when watch directory + +## [0.2.0] - 2011-12-30 + +* update to latest Go weekly code + +## [0.1.0] - 2011-10-19 + +* kqueue: add watch on file creation to match inotify +* kqueue: create file event +* inotify: ignore `IN_IGNORED` events +* event String() +* linux: common FileEvent functions +* initial commit + +[#79]: https://github.com/howeyc/fsnotify/pull/79 +[#77]: https://github.com/howeyc/fsnotify/pull/77 +[#72]: https://github.com/howeyc/fsnotify/issues/72 +[#71]: https://github.com/howeyc/fsnotify/issues/71 +[#70]: https://github.com/howeyc/fsnotify/issues/70 +[#63]: https://github.com/howeyc/fsnotify/issues/63 +[#62]: https://github.com/howeyc/fsnotify/issues/62 +[#60]: https://github.com/howeyc/fsnotify/issues/60 +[#59]: https://github.com/howeyc/fsnotify/issues/59 +[#49]: https://github.com/howeyc/fsnotify/issues/49 +[#45]: https://github.com/howeyc/fsnotify/issues/45 +[#40]: https://github.com/howeyc/fsnotify/issues/40 +[#36]: https://github.com/howeyc/fsnotify/issues/36 +[#33]: https://github.com/howeyc/fsnotify/issues/33 +[#29]: https://github.com/howeyc/fsnotify/issues/29 +[#25]: https://github.com/howeyc/fsnotify/issues/25 +[#24]: https://github.com/howeyc/fsnotify/issues/24 +[#21]: https://github.com/howeyc/fsnotify/issues/21 diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md new file mode 100644 index 00000000..ea379759 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -0,0 +1,26 @@ +Thank you for your interest in contributing to fsnotify! We try to review and +merge PRs in a reasonable timeframe, but please be aware that: + +- To avoid "wasted" work, please discus changes on the issue tracker first. You + can just send PRs, but they may end up being rejected for one reason or the + other. + +- fsnotify is a cross-platform library, and changes must work reasonably well on + all supported platforms. + +- Changes will need to be compatible; old code should still compile, and the + runtime behaviour can't change in ways that are likely to lead to problems for + users. + +Testing +------- +Just `go test ./...` runs all the tests; the CI runs this on all supported +platforms. Testing different platforms locally can be done with something like +[goon] or [Vagrant], but this isn't super-easy to set up at the moment. + +Use the `-short` flag to make the "stress test" run faster. + + +[goon]: https://github.com/arp242/goon +[Vagrant]: https://www.vagrantup.com/ +[integration_test.go]: /integration_test.go diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE new file mode 100644 index 00000000..fb03ade7 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/LICENSE @@ -0,0 +1,25 @@ +Copyright © 2012 The Go Authors. All rights reserved. +Copyright © fsnotify Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. +* Neither the name of Google Inc. nor the names of its contributors may be used + to endorse or promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md new file mode 100644 index 00000000..d4e6080f --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/README.md @@ -0,0 +1,161 @@ +fsnotify is a Go library to provide cross-platform filesystem notifications on +Windows, Linux, macOS, and BSD systems. + +Go 1.16 or newer is required; the full documentation is at +https://pkg.go.dev/github.com/fsnotify/fsnotify + +**It's best to read the documentation at pkg.go.dev, as it's pinned to the last +released version, whereas this README is for the last development version which +may include additions/changes.** + +--- + +Platform support: + +| Adapter | OS | Status | +| --------------------- | ---------------| -------------------------------------------------------------| +| inotify | Linux 2.6.32+ | Supported | +| kqueue | BSD, macOS | Supported | +| ReadDirectoryChangesW | Windows | Supported | +| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | +| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) | +| fanotify | Linux 5.9+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) | +| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | +| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | + +Linux and macOS should include Android and iOS, but these are currently untested. + +Usage +----- +A basic example: + +```go +package main + +import ( + "log" + + "github.com/fsnotify/fsnotify" +) + +func main() { + // Create new watcher. + watcher, err := fsnotify.NewWatcher() + if err != nil { + log.Fatal(err) + } + defer watcher.Close() + + // Start listening for events. + go func() { + for { + select { + case event, ok := <-watcher.Events: + if !ok { + return + } + log.Println("event:", event) + if event.Has(fsnotify.Write) { + log.Println("modified file:", event.Name) + } + case err, ok := <-watcher.Errors: + if !ok { + return + } + log.Println("error:", err) + } + } + }() + + // Add a path. + err = watcher.Add("/tmp") + if err != nil { + log.Fatal(err) + } + + // Block main goroutine forever. + <-make(chan struct{}) +} +``` + +Some more examples can be found in [cmd/fsnotify](cmd/fsnotify), which can be +run with: + + % go run ./cmd/fsnotify + +FAQ +--- +### Will a file still be watched when it's moved to another directory? +No, not unless you are watching the location it was moved to. + +### Are subdirectories watched too? +No, you must add watches for any directory you want to watch (a recursive +watcher is on the roadmap: [#18]). + +[#18]: https://github.com/fsnotify/fsnotify/issues/18 + +### Do I have to watch the Error and Event channels in a goroutine? +As of now, yes (you can read both channels in the same goroutine using `select`, +you don't need a separate goroutine for both channels; see the example). + +### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys? +fsnotify requires support from underlying OS to work. The current NFS and SMB +protocols does not provide network level support for file notifications, and +neither do the /proc and /sys virtual filesystems. + +This could be fixed with a polling watcher ([#9]), but it's not yet implemented. + +[#9]: https://github.com/fsnotify/fsnotify/issues/9 + +Platform-specific notes +----------------------- +### Linux +When a file is removed a REMOVE event won't be emitted until all file +descriptors are closed; it will emit a CHMOD instead: + + fp := os.Open("file") + os.Remove("file") // CHMOD + fp.Close() // REMOVE + +This is the event that inotify sends, so not much can be changed about this. + +The `fs.inotify.max_user_watches` sysctl variable specifies the upper limit for +the number of watches per user, and `fs.inotify.max_user_instances` specifies +the maximum number of inotify instances per user. Every Watcher you create is an +"instance", and every path you add is a "watch". + +These are also exposed in `/proc` as `/proc/sys/fs/inotify/max_user_watches` and +`/proc/sys/fs/inotify/max_user_instances` + +To increase them you can use `sysctl` or write the value to proc file: + + # The default values on Linux 5.18 + sysctl fs.inotify.max_user_watches=124983 + sysctl fs.inotify.max_user_instances=128 + +To make the changes persist on reboot edit `/etc/sysctl.conf` or +`/usr/lib/sysctl.d/50-default.conf` (details differ per Linux distro; check your +distro's documentation): + + fs.inotify.max_user_watches=124983 + fs.inotify.max_user_instances=128 + +Reaching the limit will result in a "no space left on device" or "too many open +files" error. + +### kqueue (macOS, all BSD systems) +kqueue requires opening a file descriptor for every file that's being watched; +so if you're watching a directory with five files then that's six file +descriptors. You will run in to your system's "max open files" limit faster on +these platforms. + +The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to +control the maximum number of open files. + +### macOS +Spotlight indexing on macOS can result in multiple events (see [#15]). A temporary +workaround is to add your folder(s) to the *Spotlight Privacy settings* until we +have a native FSEvents implementation (see [#11]). + +[#11]: https://github.com/fsnotify/fsnotify/issues/11 +[#15]: https://github.com/fsnotify/fsnotify/issues/15 diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go new file mode 100644 index 00000000..1a95ad8e --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_fen.go @@ -0,0 +1,162 @@ +//go:build solaris +// +build solaris + +package fsnotify + +import ( + "errors" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # macOS notes +// +// Spotlight indexing on macOS can result in multiple events (see [#15]). A +// temporary workaround is to add your folder(s) to the "Spotlight Privacy +// Settings" until we have a native FSEvents implementation (see [#11]). +// +// [#11]: https://github.com/fsnotify/fsnotify/issues/11 +// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, so you + // probably want to wait until you've stopped receiving + // them (see the dedup example in cmd/fsnotify). + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // and on kqueue when a file is truncated. On Windows + // it's never sent. + Events chan Event + + // Errors sends any errors. + Errors chan error +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go new file mode 100644 index 00000000..54c77fbb --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go @@ -0,0 +1,459 @@ +//go:build linux +// +build linux + +package fsnotify + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # macOS notes +// +// Spotlight indexing on macOS can result in multiple events (see [#15]). A +// temporary workaround is to add your folder(s) to the "Spotlight Privacy +// Settings" until we have a native FSEvents implementation (see [#11]). +// +// [#11]: https://github.com/fsnotify/fsnotify/issues/11 +// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, so you + // probably want to wait until you've stopped receiving + // them (see the dedup example in cmd/fsnotify). + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // and on kqueue when a file is truncated. On Windows + // it's never sent. + Events chan Event + + // Errors sends any errors. + Errors chan error + + // Store fd here as os.File.Read() will no longer return on close after + // calling Fd(). See: https://github.com/golang/go/issues/26439 + fd int + mu sync.Mutex // Map access + inotifyFile *os.File + watches map[string]*watch // Map of inotify watches (key: path) + paths map[int]string // Map of watched paths (key: watch descriptor) + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + doneResp chan struct{} // Channel to respond to Close +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + // Create inotify fd + // Need to set the FD to nonblocking mode in order for SetDeadline methods to work + // Otherwise, blocking i/o operations won't terminate on close + fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) + if fd == -1 { + return nil, errno + } + + w := &Watcher{ + fd: fd, + inotifyFile: os.NewFile(uintptr(fd), ""), + watches: make(map[string]*watch), + paths: make(map[int]string), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + doneResp: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// Returns true if the event was sent, or false if watcher is closed. +func (w *Watcher) sendEvent(e Event) bool { + select { + case w.Events <- e: + return true + case <-w.done: + } + return false +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *Watcher) sendError(err error) bool { + select { + case w.Errors <- err: + return true + case <-w.done: + return false + } +} + +func (w *Watcher) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed() { + w.mu.Unlock() + return nil + } + + // Send 'close' signal to goroutine, and set the Watcher to closed. + close(w.done) + w.mu.Unlock() + + // Causes any blocking reads to return with an error, provided the file + // still supports deadline operations. + err := w.inotifyFile.Close() + if err != nil { + return err + } + + // Wait for goroutine to close + <-w.doneResp + + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + name = filepath.Clean(name) + if w.isClosed() { + return errors.New("inotify instance already closed") + } + + var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | + unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | + unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + + w.mu.Lock() + defer w.mu.Unlock() + watchEntry := w.watches[name] + if watchEntry != nil { + flags |= watchEntry.flags | unix.IN_MASK_ADD + } + wd, errno := unix.InotifyAddWatch(w.fd, name, flags) + if wd == -1 { + return errno + } + + if watchEntry == nil { + w.watches[name] = &watch{wd: uint32(wd), flags: flags} + w.paths[wd] = name + } else { + watchEntry.wd = uint32(wd) + watchEntry.flags = flags + } + + return nil +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + + // Fetch the watch. + w.mu.Lock() + defer w.mu.Unlock() + watch, ok := w.watches[name] + + // Remove it from inotify. + if !ok { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) + } + + // We successfully removed the watch if InotifyRmWatch doesn't return an + // error, we need to clean up our internal state to ensure it matches + // inotify's kernel state. + delete(w.paths, int(watch.wd)) + delete(w.watches, name) + + // inotify_rm_watch will return EINVAL if the file has been deleted; + // the inotify will already have been removed. + // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously + // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE + // so that EINVAL means that the wd is being rm_watch()ed or its file removed + // by another thread and we have not received IN_IGNORE event. + success, errno := unix.InotifyRmWatch(w.fd, watch.wd) + if success == -1 { + // TODO: Perhaps it's not helpful to return an error here in every case; + // The only two possible errors are: + // + // - EBADF, which happens when w.fd is not a valid file descriptor + // of any kind. + // - EINVAL, which is when fd is not an inotify descriptor or wd + // is not a valid watch descriptor. Watch descriptors are + // invalidated when they are removed explicitly or implicitly; + // explicitly by inotify_rm_watch, implicitly when the file they + // are watching is deleted. + return errno + } + + return nil +} + +// WatchList returns all paths added with [Add] (and are not yet removed). +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)) + for pathname := range w.watches { + entries = append(entries, pathname) + } + + return entries +} + +type watch struct { + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) +} + +// readEvents reads from the inotify file descriptor, converts the +// received events into Event objects and sends them via the Events channel +func (w *Watcher) readEvents() { + defer func() { + close(w.doneResp) + close(w.Errors) + close(w.Events) + }() + + var ( + buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events + errno error // Syscall errno + ) + for { + // See if we have been closed. + if w.isClosed() { + return + } + + n, err := w.inotifyFile.Read(buf[:]) + switch { + case errors.Unwrap(err) == os.ErrClosed: + return + case err != nil: + if !w.sendError(err) { + return + } + continue + } + + if n < unix.SizeofInotifyEvent { + var err error + if n == 0 { + // If EOF is received. This should really never happen. + err = io.EOF + } else if n < 0 { + // If an error occurred while reading. + err = errno + } else { + // Read was too short. + err = errors.New("notify: short read in readEvents()") + } + if !w.sendError(err) { + return + } + continue + } + + var offset uint32 + // We don't know how many events we just read into the buffer + // While the offset points to at least one whole event... + for offset <= uint32(n-unix.SizeofInotifyEvent) { + var ( + // Point "raw" to the event in the buffer + raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) + mask = uint32(raw.Mask) + nameLen = uint32(raw.Len) + ) + + if mask&unix.IN_Q_OVERFLOW != 0 { + if !w.sendError(ErrEventOverflow) { + return + } + } + + // If the event happened to the watched directory or the watched file, the kernel + // doesn't append the filename to the event, but we would like to always fill the + // the "Name" field with a valid filename. We retrieve the path of the watch from + // the "paths" map. + w.mu.Lock() + name, ok := w.paths[int(raw.Wd)] + // IN_DELETE_SELF occurs when the file/directory being watched is removed. + // This is a sign to clean up the maps, otherwise we are no longer in sync + // with the inotify kernel state which has already deleted the watch + // automatically. + if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + delete(w.paths, int(raw.Wd)) + delete(w.watches, name) + } + w.mu.Unlock() + + if nameLen > 0 { + // Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] + // The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + event := w.newEvent(name, mask) + + // Send the events that are not ignored on the events channel + if mask&unix.IN_IGNORED == 0 { + if !w.sendEvent(event) { + return + } + } + + // Move to the next event in the buffer + offset += unix.SizeofInotifyEvent + nameLen + } + } +} + +// newEvent returns an platform-independent Event based on an inotify mask. +func (w *Watcher) newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + e.Op |= Create + } + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { + e.Op |= Remove + } + if mask&unix.IN_MODIFY == unix.IN_MODIFY { + e.Op |= Write + } + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + e.Op |= Rename + } + if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { + e.Op |= Chmod + } + return e +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go new file mode 100644 index 00000000..29087469 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go @@ -0,0 +1,707 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin +// +build freebsd openbsd netbsd dragonfly darwin + +package fsnotify + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # macOS notes +// +// Spotlight indexing on macOS can result in multiple events (see [#15]). A +// temporary workaround is to add your folder(s) to the "Spotlight Privacy +// Settings" until we have a native FSEvents implementation (see [#11]). +// +// [#11]: https://github.com/fsnotify/fsnotify/issues/11 +// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, so you + // probably want to wait until you've stopped receiving + // them (see the dedup example in cmd/fsnotify). + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // and on kqueue when a file is truncated. On Windows + // it's never sent. + Events chan Event + + // Errors sends any errors. + Errors chan error + + done chan struct{} + kq int // File descriptor (as returned by the kqueue() syscall). + closepipe [2]int // Pipe used for closing. + mu sync.Mutex // Protects access to watcher data + watches map[string]int // Watched file descriptors (key: path). + watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)). + userWatches map[string]struct{} // Watches added with Watcher.Add() + dirFlags map[string]uint32 // Watched directories to fflags used in kqueue. + paths map[int]pathInfo // File descriptors to path names for processing kqueue events. + fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events). + isClosed bool // Set to true when Close() is first called +} + +type pathInfo struct { + name string + isDir bool +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + kq, closepipe, err := newKqueue() + if err != nil { + return nil, err + } + + w := &Watcher{ + kq: kq, + closepipe: closepipe, + watches: make(map[string]int), + watchesByDir: make(map[string]map[int]struct{}), + dirFlags: make(map[string]uint32), + paths: make(map[int]pathInfo), + fileExists: make(map[string]struct{}), + userWatches: make(map[string]struct{}), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// newKqueue creates a new kernel event queue and returns a descriptor. +// +// This registers a new event on closepipe, which will trigger an event when +// it's closed. This way we can use kevent() without timeout/polling; without +// the closepipe, it would block forever and we wouldn't be able to stop it at +// all. +func newKqueue() (kq int, closepipe [2]int, err error) { + kq, err = unix.Kqueue() + if kq == -1 { + return kq, closepipe, err + } + + // Register the close pipe. + err = unix.Pipe(closepipe[:]) + if err != nil { + unix.Close(kq) + return kq, closepipe, err + } + + // Register changes to listen on the closepipe. + changes := make([]unix.Kevent_t, 1) + // SetKevent converts int to the platform-specific types. + unix.SetKevent(&changes[0], closepipe[0], unix.EVFILT_READ, + unix.EV_ADD|unix.EV_ENABLE|unix.EV_ONESHOT) + + ok, err := unix.Kevent(kq, changes, nil, nil) + if ok == -1 { + unix.Close(kq) + unix.Close(closepipe[0]) + unix.Close(closepipe[1]) + return kq, closepipe, err + } + return kq, closepipe, nil +} + +// Returns true if the event was sent, or false if watcher is closed. +func (w *Watcher) sendEvent(e Event) bool { + select { + case w.Events <- e: + return true + case <-w.done: + } + return false +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *Watcher) sendError(err error) bool { + select { + case w.Errors <- err: + return true + case <-w.done: + } + return false +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + + // copy paths to remove while locked + pathsToRemove := make([]string, 0, len(w.watches)) + for name := range w.watches { + pathsToRemove = append(pathsToRemove, name) + } + w.mu.Unlock() // Unlock before calling Remove, which also locks + for _, name := range pathsToRemove { + w.Remove(name) + } + + // Send "quit" message to the reader goroutine. + unix.Close(w.closepipe[1]) + close(w.done) + + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + w.mu.Lock() + w.userWatches[name] = struct{}{} + w.mu.Unlock() + _, err := w.addWatch(name, noteAllEvents) + return err +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + w.mu.Lock() + watchfd, ok := w.watches[name] + w.mu.Unlock() + if !ok { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) + } + + err := w.register([]int{watchfd}, unix.EV_DELETE, 0) + if err != nil { + return err + } + + unix.Close(watchfd) + + w.mu.Lock() + isDir := w.paths[watchfd].isDir + delete(w.watches, name) + delete(w.userWatches, name) + + parentName := filepath.Dir(name) + delete(w.watchesByDir[parentName], watchfd) + + if len(w.watchesByDir[parentName]) == 0 { + delete(w.watchesByDir, parentName) + } + + delete(w.paths, watchfd) + delete(w.dirFlags, name) + delete(w.fileExists, name) + w.mu.Unlock() + + // Find all watched paths that are in this directory that are not external. + if isDir { + var pathsToRemove []string + w.mu.Lock() + for fd := range w.watchesByDir[name] { + path := w.paths[fd] + if _, ok := w.userWatches[path.name]; !ok { + pathsToRemove = append(pathsToRemove, path.name) + } + } + w.mu.Unlock() + for _, name := range pathsToRemove { + // Since these are internal, not much sense in propagating error + // to the user, as that will just confuse them with an error about + // a path they did not explicitly watch themselves. + w.Remove(name) + } + } + + return nil +} + +// WatchList returns all paths added with [Add] (and are not yet removed). +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.userWatches)) + for pathname := range w.userWatches { + entries = append(entries, pathname) + } + + return entries +} + +// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) +const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME + +// addWatch adds name to the watched file set. +// The flags are interpreted as described in kevent(2). +// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. +func (w *Watcher) addWatch(name string, flags uint32) (string, error) { + var isDir bool + // Make ./name and name equivalent + name = filepath.Clean(name) + + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return "", errors.New("kevent instance already closed") + } + watchfd, alreadyWatching := w.watches[name] + // We already have a watch, but we can still override flags. + if alreadyWatching { + isDir = w.paths[watchfd].isDir + } + w.mu.Unlock() + + if !alreadyWatching { + fi, err := os.Lstat(name) + if err != nil { + return "", err + } + + // Don't watch sockets or named pipes + if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) { + return "", nil + } + + // Follow Symlinks + // + // Linux can add unresolvable symlinks to the watch list without issue, + // and Windows can't do symlinks period. To maintain consistency, we + // will act like everything is fine if the link can't be resolved. + // There will simply be no file events for broken symlinks. Hence the + // returns of nil on errors. + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + name, err = filepath.EvalSymlinks(name) + if err != nil { + return "", nil + } + + w.mu.Lock() + _, alreadyWatching = w.watches[name] + w.mu.Unlock() + + if alreadyWatching { + return name, nil + } + + fi, err = os.Lstat(name) + if err != nil { + return "", nil + } + } + + // Retry on EINTR; open() can return EINTR in practice on macOS. + // See #354, and go issues 11180 and 39237. + for { + watchfd, err = unix.Open(name, openMode, 0) + if err == nil { + break + } + if errors.Is(err, unix.EINTR) { + continue + } + + return "", err + } + + isDir = fi.IsDir() + } + + err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) + if err != nil { + unix.Close(watchfd) + return "", err + } + + if !alreadyWatching { + w.mu.Lock() + parentName := filepath.Dir(name) + w.watches[name] = watchfd + + watchesByDir, ok := w.watchesByDir[parentName] + if !ok { + watchesByDir = make(map[int]struct{}, 1) + w.watchesByDir[parentName] = watchesByDir + } + watchesByDir[watchfd] = struct{}{} + + w.paths[watchfd] = pathInfo{name: name, isDir: isDir} + w.mu.Unlock() + } + + if isDir { + // Watch the directory if it has not been watched before, + // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + w.mu.Lock() + + watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && + (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) + // Store flags so this watch can be updated later + w.dirFlags[name] = flags + w.mu.Unlock() + + if watchDir { + if err := w.watchDirectoryFiles(name); err != nil { + return "", err + } + } + } + return name, nil +} + +// readEvents reads from kqueue and converts the received kevents into +// Event values that it sends down the Events channel. +func (w *Watcher) readEvents() { + defer func() { + err := unix.Close(w.kq) + if err != nil { + w.Errors <- err + } + unix.Close(w.closepipe[0]) + close(w.Events) + close(w.Errors) + }() + + eventBuffer := make([]unix.Kevent_t, 10) + for closed := false; !closed; { + kevents, err := w.read(eventBuffer) + // EINTR is okay, the syscall was interrupted before timeout expired. + if err != nil && err != unix.EINTR { + if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) { + closed = true + } + continue + } + + // Flush the events we received to the Events channel + for _, kevent := range kevents { + var ( + watchfd = int(kevent.Ident) + mask = uint32(kevent.Fflags) + ) + + // Shut down the loop when the pipe is closed, but only after all + // other events have been processed. + if watchfd == w.closepipe[0] { + closed = true + continue + } + + w.mu.Lock() + path := w.paths[watchfd] + w.mu.Unlock() + + event := w.newEvent(path.name, mask) + + if path.isDir && !event.Has(Remove) { + // Double check to make sure the directory exists. This can + // happen when we do a rm -fr on a recursively watched folders + // and we receive a modification event first but the folder has + // been deleted and later receive the delete event. + if _, err := os.Lstat(event.Name); os.IsNotExist(err) { + event.Op |= Remove + } + } + + if event.Has(Rename) || event.Has(Remove) { + w.Remove(event.Name) + w.mu.Lock() + delete(w.fileExists, event.Name) + w.mu.Unlock() + } + + if path.isDir && event.Has(Write) && !event.Has(Remove) { + w.sendDirectoryChangeEvents(event.Name) + } else { + if !w.sendEvent(event) { + closed = true + continue + } + } + + if event.Has(Remove) { + // Look for a file that may have overwritten this. + // For example, mv f1 f2 will delete f2, then create f2. + if path.isDir { + fileDir := filepath.Clean(event.Name) + w.mu.Lock() + _, found := w.watches[fileDir] + w.mu.Unlock() + if found { + // make sure the directory exists before we watch for changes. When we + // do a recursive watch and perform rm -fr, the parent directory might + // have gone missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the parent directory. + if _, err := os.Lstat(fileDir); err == nil { + w.sendDirectoryChangeEvents(fileDir) + } + } + } else { + filePath := filepath.Clean(event.Name) + if fileInfo, err := os.Lstat(filePath); err == nil { + w.sendFileCreatedEventIfNew(filePath, fileInfo) + } + } + } + } + } +} + +// newEvent returns an platform-independent Event based on kqueue Fflags. +func (w *Watcher) newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { + e.Op |= Remove + } + if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { + e.Op |= Write + } + if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { + e.Op |= Rename + } + if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { + e.Op |= Chmod + } + return e +} + +// watchDirectoryFiles to mimic inotify when adding a watch on a directory +func (w *Watcher) watchDirectoryFiles(dirPath string) error { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + return err + } + + for _, fileInfo := range files { + path := filepath.Join(dirPath, fileInfo.Name()) + + cleanPath, err := w.internalWatch(path, fileInfo) + if err != nil { + // No permission to read the file; that's not a problem: just skip. + // But do add it to w.fileExists to prevent it from being picked up + // as a "new" file later (it still shows up in the directory + // listing). + switch { + case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM): + cleanPath = filepath.Clean(path) + default: + return fmt.Errorf("%q: %w", filepath.Join(dirPath, fileInfo.Name()), err) + } + } + + w.mu.Lock() + w.fileExists[cleanPath] = struct{}{} + w.mu.Unlock() + } + + return nil +} + +// Search the directory for new files and send an event for them. +// +// This functionality is to have the BSD watcher match the inotify, which sends +// a create event for files created in a watched directory. +func (w *Watcher) sendDirectoryChangeEvents(dir string) { + // Get all files + files, err := ioutil.ReadDir(dir) + if err != nil { + if !w.sendError(fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)) { + return + } + } + + // Search for new files + for _, fi := range files { + err := w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi) + if err != nil { + return + } + } +} + +// sendFileCreatedEvent sends a create event if the file isn't already being tracked. +func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { + w.mu.Lock() + _, doesExist := w.fileExists[filePath] + w.mu.Unlock() + if !doesExist { + if !w.sendEvent(Event{Name: filePath, Op: Create}) { + return + } + } + + // like watchDirectoryFiles (but without doing another ReadDir) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = struct{}{} + w.mu.Unlock() + + return nil +} + +func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { + if fileInfo.IsDir() { + // mimic Linux providing delete events for subdirectories + // but preserve the flags used if currently watching subdirectory + w.mu.Lock() + flags := w.dirFlags[name] + w.mu.Unlock() + + flags |= unix.NOTE_DELETE | unix.NOTE_RENAME + return w.addWatch(name, flags) + } + + // watch file to mimic Linux inotify + return w.addWatch(name, noteAllEvents) +} + +// Register events with the queue. +func (w *Watcher) register(fds []int, flags int, fflags uint32) error { + changes := make([]unix.Kevent_t, len(fds)) + for i, fd := range fds { + // SetKevent converts int to the platform-specific types. + unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) + changes[i].Fflags = fflags + } + + // Register the events. + success, err := unix.Kevent(w.kq, changes, nil, nil) + if success == -1 { + return err + } + return nil +} + +// read retrieves pending events, or waits until an event occurs. +func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { + n, err := unix.Kevent(w.kq, nil, events, nil) + if err != nil { + return nil, err + } + return events[0:n], nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go new file mode 100644 index 00000000..a9bb1c3c --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_other.go @@ -0,0 +1,66 @@ +//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows +// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows + +package fsnotify + +import ( + "fmt" + "runtime" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct{} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS) +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go new file mode 100644 index 00000000..ae392867 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_windows.go @@ -0,0 +1,746 @@ +//go:build windows +// +build windows + +package fsnotify + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "reflect" + "runtime" + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/windows" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # macOS notes +// +// Spotlight indexing on macOS can result in multiple events (see [#15]). A +// temporary workaround is to add your folder(s) to the "Spotlight Privacy +// Settings" until we have a native FSEvents implementation (see [#11]). +// +// [#11]: https://github.com/fsnotify/fsnotify/issues/11 +// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, so you + // probably want to wait until you've stopped receiving + // them (see the dedup example in cmd/fsnotify). + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // and on kqueue when a file is truncated. On Windows + // it's never sent. + Events chan Event + + // Errors sends any errors. + Errors chan error + + port windows.Handle // Handle to completion port + input chan *input // Inputs to the reader are sent on this channel + quit chan chan<- error + + mu sync.Mutex // Protects access to watches, isClosed + watches watchMap // Map of watches (key: i-number) + isClosed bool // Set to true when Close() is first called +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) + if err != nil { + return nil, os.NewSyscallError("CreateIoCompletionPort", err) + } + w := &Watcher{ + port: port, + watches: make(watchMap), + input: make(chan *input, 1), + Events: make(chan Event, 50), + Errors: make(chan error), + quit: make(chan chan<- error, 1), + } + go w.readEvents() + return w, nil +} + +func (w *Watcher) sendEvent(name string, mask uint64) bool { + if mask == 0 { + return false + } + + event := w.newEvent(name, uint32(mask)) + select { + case ch := <-w.quit: + w.quit <- ch + case w.Events <- event: + } + return true +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *Watcher) sendError(err error) bool { + select { + case w.Errors <- err: + return true + case <-w.quit: + } + return false +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + w.mu.Unlock() + + // Send "quit" message to the reader goroutine + ch := make(chan error) + w.quit <- ch + if err := w.wakeupReader(); err != nil { + return err + } + return <-ch +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return errors.New("watcher already closed") + } + w.mu.Unlock() + + in := &input{ + op: opAddWatch, + path: filepath.Clean(name), + flags: sysFSALLEVENTS, + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + in := &input{ + op: opRemoveWatch, + path: filepath.Clean(name), + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// WatchList returns all paths added with [Add] (and are not yet removed). +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)) + for _, entry := range w.watches { + for _, watchEntry := range entry { + entries = append(entries, watchEntry.path) + } + } + + return entries +} + +// These options are from the old golang.org/x/exp/winfsnotify, where you could +// add various options to the watch. This has long since been removed. +// +// The "sys" in the name is misleading as they're not part of any "system". +// +// This should all be removed at some point, and just use windows.FILE_NOTIFY_* +const ( + sysFSALLEVENTS = 0xfff + sysFSATTRIB = 0x4 + sysFSCREATE = 0x100 + sysFSDELETE = 0x200 + sysFSDELETESELF = 0x400 + sysFSMODIFY = 0x2 + sysFSMOVE = 0xc0 + sysFSMOVEDFROM = 0x40 + sysFSMOVEDTO = 0x80 + sysFSMOVESELF = 0x800 + sysFSIGNORED = 0x8000 +) + +func (w *Watcher) newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { + e.Op |= Create + } + if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { + e.Op |= Remove + } + if mask&sysFSMODIFY == sysFSMODIFY { + e.Op |= Write + } + if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { + e.Op |= Rename + } + if mask&sysFSATTRIB == sysFSATTRIB { + e.Op |= Chmod + } + return e +} + +const ( + opAddWatch = iota + opRemoveWatch +) + +const ( + provisional uint64 = 1 << (32 + iota) +) + +type input struct { + op int + path string + flags uint32 + reply chan error +} + +type inode struct { + handle windows.Handle + volume uint32 + index uint64 +} + +type watch struct { + ov windows.Overlapped + ino *inode // i-number + path string // Directory path + mask uint64 // Directory itself is being watched with these notify flags + names map[string]uint64 // Map of names being watched and their notify flags + rename string // Remembers the old name while renaming a file + buf [65536]byte // 64K buffer +} + +type ( + indexMap map[uint64]*watch + watchMap map[uint32]indexMap +) + +func (w *Watcher) wakeupReader() error { + err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil) + if err != nil { + return os.NewSyscallError("PostQueuedCompletionStatus", err) + } + return nil +} + +func (w *Watcher) getDir(pathname string) (dir string, err error) { + attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname)) + if err != nil { + return "", os.NewSyscallError("GetFileAttributes", err) + } + if attr&windows.FILE_ATTRIBUTE_DIRECTORY != 0 { + dir = pathname + } else { + dir, _ = filepath.Split(pathname) + dir = filepath.Clean(dir) + } + return +} + +func (w *Watcher) getIno(path string) (ino *inode, err error) { + h, err := windows.CreateFile(windows.StringToUTF16Ptr(path), + windows.FILE_LIST_DIRECTORY, + windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, + nil, windows.OPEN_EXISTING, + windows.FILE_FLAG_BACKUP_SEMANTICS|windows.FILE_FLAG_OVERLAPPED, 0) + if err != nil { + return nil, os.NewSyscallError("CreateFile", err) + } + + var fi windows.ByHandleFileInformation + err = windows.GetFileInformationByHandle(h, &fi) + if err != nil { + windows.CloseHandle(h) + return nil, os.NewSyscallError("GetFileInformationByHandle", err) + } + ino = &inode{ + handle: h, + volume: fi.VolumeSerialNumber, + index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), + } + return ino, nil +} + +// Must run within the I/O thread. +func (m watchMap) get(ino *inode) *watch { + if i := m[ino.volume]; i != nil { + return i[ino.index] + } + return nil +} + +// Must run within the I/O thread. +func (m watchMap) set(ino *inode, watch *watch) { + i := m[ino.volume] + if i == nil { + i = make(indexMap) + m[ino.volume] = i + } + i[ino.index] = watch +} + +// Must run within the I/O thread. +func (w *Watcher) addWatch(pathname string, flags uint64) error { + dir, err := w.getDir(pathname) + if err != nil { + return err + } + + ino, err := w.getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watchEntry := w.watches.get(ino) + w.mu.Unlock() + if watchEntry == nil { + _, err := windows.CreateIoCompletionPort(ino.handle, w.port, 0, 0) + if err != nil { + windows.CloseHandle(ino.handle) + return os.NewSyscallError("CreateIoCompletionPort", err) + } + watchEntry = &watch{ + ino: ino, + path: dir, + names: make(map[string]uint64), + } + w.mu.Lock() + w.watches.set(ino, watchEntry) + w.mu.Unlock() + flags |= provisional + } else { + windows.CloseHandle(ino.handle) + } + if pathname == dir { + watchEntry.mask |= flags + } else { + watchEntry.names[filepath.Base(pathname)] |= flags + } + + err = w.startRead(watchEntry) + if err != nil { + return err + } + + if pathname == dir { + watchEntry.mask &= ^provisional + } else { + watchEntry.names[filepath.Base(pathname)] &= ^provisional + } + return nil +} + +// Must run within the I/O thread. +func (w *Watcher) remWatch(pathname string) error { + dir, err := w.getDir(pathname) + if err != nil { + return err + } + ino, err := w.getIno(dir) + if err != nil { + return err + } + + w.mu.Lock() + watch := w.watches.get(ino) + w.mu.Unlock() + + err = windows.CloseHandle(ino.handle) + if err != nil { + w.sendError(os.NewSyscallError("CloseHandle", err)) + } + if watch == nil { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname) + } + if pathname == dir { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + watch.mask = 0 + } else { + name := filepath.Base(pathname) + w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + + return w.startRead(watch) +} + +// Must run within the I/O thread. +func (w *Watcher) deleteWatch(watch *watch) { + for name, mask := range watch.names { + if mask&provisional == 0 { + w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + } + delete(watch.names, name) + } + if watch.mask != 0 { + if watch.mask&provisional == 0 { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + } + watch.mask = 0 + } +} + +// Must run within the I/O thread. +func (w *Watcher) startRead(watch *watch) error { + err := windows.CancelIo(watch.ino.handle) + if err != nil { + w.sendError(os.NewSyscallError("CancelIo", err)) + w.deleteWatch(watch) + } + mask := w.toWindowsFlags(watch.mask) + for _, m := range watch.names { + mask |= w.toWindowsFlags(m) + } + if mask == 0 { + err := windows.CloseHandle(watch.ino.handle) + if err != nil { + w.sendError(os.NewSyscallError("CloseHandle", err)) + } + w.mu.Lock() + delete(w.watches[watch.ino.volume], watch.ino.index) + w.mu.Unlock() + return nil + } + + rdErr := windows.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], + uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) + if rdErr != nil { + err := os.NewSyscallError("ReadDirectoryChanges", rdErr) + if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + err = nil + } + w.deleteWatch(watch) + w.startRead(watch) + return err + } + return nil +} + +// readEvents reads from the I/O completion port, converts the +// received events into Event objects and sends them via the Events channel. +// Entry point to the I/O thread. +func (w *Watcher) readEvents() { + var ( + n uint32 + key uintptr + ov *windows.Overlapped + ) + runtime.LockOSThread() + + for { + qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE) + // This error is handled after the watch == nil check below. NOTE: this + // seems odd, note sure if it's correct. + + watch := (*watch)(unsafe.Pointer(ov)) + if watch == nil { + select { + case ch := <-w.quit: + w.mu.Lock() + var indexes []indexMap + for _, index := range w.watches { + indexes = append(indexes, index) + } + w.mu.Unlock() + for _, index := range indexes { + for _, watch := range index { + w.deleteWatch(watch) + w.startRead(watch) + } + } + + err := windows.CloseHandle(w.port) + if err != nil { + err = os.NewSyscallError("CloseHandle", err) + } + close(w.Events) + close(w.Errors) + ch <- err + return + case in := <-w.input: + switch in.op { + case opAddWatch: + in.reply <- w.addWatch(in.path, uint64(in.flags)) + case opRemoveWatch: + in.reply <- w.remWatch(in.path) + } + default: + } + continue + } + + switch qErr { + case windows.ERROR_MORE_DATA: + if watch == nil { + w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")) + } else { + // The i/o succeeded but the buffer is full. + // In theory we should be building up a full packet. + // In practice we can get away with just carrying on. + n = uint32(unsafe.Sizeof(watch.buf)) + } + case windows.ERROR_ACCESS_DENIED: + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.deleteWatch(watch) + w.startRead(watch) + continue + case windows.ERROR_OPERATION_ABORTED: + // CancelIo was called on this handle + continue + default: + w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr)) + continue + case nil: + } + + var offset uint32 + for { + if n == 0 { + w.sendError(errors.New("short read in readEvents()")) + break + } + + // Point "raw" to the event in the buffer + raw := (*windows.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) + + // Create a buf that is the size of the path name + size := int(raw.FileNameLength / 2) + var buf []uint16 + // TODO: Use unsafe.Slice in Go 1.17; https://stackoverflow.com/questions/51187973 + sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + sh.Data = uintptr(unsafe.Pointer(&raw.FileName)) + sh.Len = size + sh.Cap = size + name := windows.UTF16ToString(buf) + fullname := filepath.Join(watch.path, name) + + var mask uint64 + switch raw.Action { + case windows.FILE_ACTION_REMOVED: + mask = sysFSDELETESELF + case windows.FILE_ACTION_MODIFIED: + mask = sysFSMODIFY + case windows.FILE_ACTION_RENAMED_OLD_NAME: + watch.rename = name + case windows.FILE_ACTION_RENAMED_NEW_NAME: + // Update saved path of all sub-watches. + old := filepath.Join(watch.path, watch.rename) + w.mu.Lock() + for _, watchMap := range w.watches { + for _, ww := range watchMap { + if strings.HasPrefix(ww.path, old) { + ww.path = filepath.Join(fullname, strings.TrimPrefix(ww.path, old)) + } + } + } + w.mu.Unlock() + + if watch.names[watch.rename] != 0 { + watch.names[name] |= watch.names[watch.rename] + delete(watch.names, watch.rename) + mask = sysFSMOVESELF + } + } + + sendNameEvent := func() { + w.sendEvent(fullname, watch.names[name]&mask) + } + if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME { + sendNameEvent() + } + if raw.Action == windows.FILE_ACTION_REMOVED { + w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + + w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action)) + if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { + fullname = filepath.Join(watch.path, watch.rename) + sendNameEvent() + } + + // Move to the next event in the buffer + if raw.NextEntryOffset == 0 { + break + } + offset += raw.NextEntryOffset + + // Error! + if offset >= n { + w.sendError(errors.New( + "Windows system assumed buffer larger than it is, events have likely been missed.")) + break + } + } + + if err := w.startRead(watch); err != nil { + w.sendError(err) + } + } +} + +func (w *Watcher) toWindowsFlags(mask uint64) uint32 { + var m uint32 + if mask&sysFSMODIFY != 0 { + m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE + } + if mask&sysFSATTRIB != 0 { + m |= windows.FILE_NOTIFY_CHANGE_ATTRIBUTES + } + if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { + m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME + } + return m +} + +func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { + switch action { + case windows.FILE_ACTION_ADDED: + return sysFSCREATE + case windows.FILE_ACTION_REMOVED: + return sysFSDELETE + case windows.FILE_ACTION_MODIFIED: + return sysFSMODIFY + case windows.FILE_ACTION_RENAMED_OLD_NAME: + return sysFSMOVEDFROM + case windows.FILE_ACTION_RENAMED_NEW_NAME: + return sysFSMOVEDTO + } + return 0 +} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go new file mode 100644 index 00000000..30a5bf0f --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -0,0 +1,81 @@ +//go:build !plan9 +// +build !plan9 + +// Package fsnotify provides a cross-platform interface for file system +// notifications. +package fsnotify + +import ( + "errors" + "fmt" + "strings" +) + +// Event represents a file system notification. +type Event struct { + // Path to the file or directory. + // + // Paths are relative to the input; for example with Add("dir") the Name + // will be set to "dir/file" if you create that file, but if you use + // Add("/path/to/dir") it will be "/path/to/dir/file". + Name string + + // File operation that triggered the event. + // + // This is a bitmask and some systems may send multiple operations at once. + // Use the Event.Has() method instead of comparing with ==. + Op Op +} + +// Op describes a set of file operations. +type Op uint32 + +// The operations fsnotify can trigger; see the documentation on [Watcher] for a +// full description, and check them with [Event.Has]. +const ( + Create Op = 1 << iota + Write + Remove + Rename + Chmod +) + +// Common errors that can be reported by a watcher +var ( + ErrNonExistentWatch = errors.New("can't remove non-existent watcher") + ErrEventOverflow = errors.New("fsnotify queue overflow") +) + +func (op Op) String() string { + var b strings.Builder + if op.Has(Create) { + b.WriteString("|CREATE") + } + if op.Has(Remove) { + b.WriteString("|REMOVE") + } + if op.Has(Write) { + b.WriteString("|WRITE") + } + if op.Has(Rename) { + b.WriteString("|RENAME") + } + if op.Has(Chmod) { + b.WriteString("|CHMOD") + } + if b.Len() == 0 { + return "[no events]" + } + return b.String()[1:] +} + +// Has reports if this operation has the given operation. +func (o Op) Has(h Op) bool { return o&h == h } + +// Has reports if this event has the given operation. +func (e Event) Has(op Op) bool { return e.Op.Has(op) } + +// String returns a string representation of the event with their path. +func (e Event) String() string { + return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh new file mode 100644 index 00000000..b09ef768 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh @@ -0,0 +1,208 @@ +#!/usr/bin/env zsh +[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1 +setopt err_exit no_unset pipefail extended_glob + +# Simple script to update the godoc comments on all watchers. Probably took me +# more time to write this than doing it manually, but ah well 🙃 + +watcher=$(</tmp/x + print -r -- $cmt >>/tmp/x + tail -n+$(( end + 1 )) $file >>/tmp/x + mv /tmp/x $file + done +} + +set-cmt '^type Watcher struct ' $watcher +set-cmt '^func NewWatcher(' $new +set-cmt '^func (w \*Watcher) Add(' $add +set-cmt '^func (w \*Watcher) Remove(' $remove +set-cmt '^func (w \*Watcher) Close(' $close +set-cmt '^func (w \*Watcher) WatchList(' $watchlist +set-cmt '^[[:space:]]*Events *chan Event$' $events +set-cmt '^[[:space:]]*Errors *chan error$' $errors diff --git a/vendor/github.com/fsnotify/fsnotify/system_bsd.go b/vendor/github.com/fsnotify/fsnotify/system_bsd.go new file mode 100644 index 00000000..4322b0b8 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/system_bsd.go @@ -0,0 +1,8 @@ +//go:build freebsd || openbsd || netbsd || dragonfly +// +build freebsd openbsd netbsd dragonfly + +package fsnotify + +import "golang.org/x/sys/unix" + +const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC diff --git a/vendor/github.com/fsnotify/fsnotify/system_darwin.go b/vendor/github.com/fsnotify/fsnotify/system_darwin.go new file mode 100644 index 00000000..5da5ffa7 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/system_darwin.go @@ -0,0 +1,9 @@ +//go:build darwin +// +build darwin + +package fsnotify + +import "golang.org/x/sys/unix" + +// note: this constant is not defined on BSD +const openMode = unix.O_EVTONLY | unix.O_CLOEXEC diff --git a/vendor/github.com/go-co-op/gocron/.gitignore b/vendor/github.com/go-co-op/gocron/.gitignore new file mode 100644 index 00000000..f6409f90 --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/.gitignore @@ -0,0 +1,19 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test +local_testing + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +vendor/ + +# IDE project files +.idea diff --git a/vendor/github.com/go-co-op/gocron/.golangci.yaml b/vendor/github.com/go-co-op/gocron/.golangci.yaml new file mode 100644 index 00000000..611fb365 --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/.golangci.yaml @@ -0,0 +1,49 @@ +run: + timeout: 2m + issues-exit-code: 1 + tests: true + +issues: + max-same-issues: 100 + exclude-rules: + - path: _test\.go + linters: + - bodyclose + - errcheck + - gosec + +linters: + enable: + - bodyclose + - deadcode + - errcheck + - gofmt + - revive + - gosec + - gosimple + - govet + - ineffassign + - misspell + - staticcheck + - structcheck + - typecheck + - unused + - varcheck + +output: + # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" + format: colored-line-number + # print lines of code with issue, default is true + print-issued-lines: true + # print linter name in the end of issue text, default is true + print-linter-name: true + # make issues output unique by line, default is true + uniq-by-line: true + # add a prefix to the output file references; default is no prefix + path-prefix: "" + # sorts results by: filepath, line and column + sort-results: true + +linters-settings: + golint: + min-confidence: 0.8 diff --git a/vendor/github.com/go-co-op/gocron/CODE_OF_CONDUCT.md b/vendor/github.com/go-co-op/gocron/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..7d913b55 --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/CODE_OF_CONDUCT.md @@ -0,0 +1,73 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone. And we mean everyone! + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and kind language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team initially on Slack to coordinate private communication. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq diff --git a/vendor/github.com/go-co-op/gocron/CONTRIBUTING.md b/vendor/github.com/go-co-op/gocron/CONTRIBUTING.md new file mode 100644 index 00000000..b2d3be83 --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/CONTRIBUTING.md @@ -0,0 +1,40 @@ +# Contributing to gocron + +Thank you for coming to contribute to gocron! We welcome new ideas, PRs and general feedback. + +## Reporting Bugs + +If you find a bug then please let the project know by opening an issue after doing the following: + +- Do a quick search of the existing issues to make sure the bug isn't already reported +- Try and make a minimal list of steps that can reliably reproduce the bug you are experiencing +- Collect as much information as you can to help identify what the issue is (project version, configuration files, etc) + +## Suggesting Enhancements + +If you have a use case that you don't see a way to support yet, we would welcome the feedback in an issue. Before opening the issue, please consider: + +- Is this a common use case? +- Is it simple to understand? + +You can help us out by doing the following before raising a new issue: + +- Check that the feature hasn't been requested already by searching existing issues +- Try and reduce your enhancement into a single, concise and deliverable request, rather than a general idea +- Explain your own use cases as the basis of the request + +## Adding Features + +Pull requests are always welcome. However, before going through the trouble of implementing a change it's worth creating a bug or feature request issue. +This allows us to discuss the changes and make sure they are a good fit for the project. + +Please always make sure a pull request has been: + +- Unit tested with `make test` +- Linted with `make lint` +- Vetted with `make vet` +- Formatted with `make fmt` or validated with `make check-fmt` + +## Writing Tests + +Tests should follow the [table driven test pattern](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go). See other tests in the code base for additional examples. diff --git a/vendor/github.com/go-co-op/gocron/LICENSE b/vendor/github.com/go-co-op/gocron/LICENSE new file mode 100644 index 00000000..3357d57d --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2014, 辣椒面 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-co-op/gocron/Makefile b/vendor/github.com/go-co-op/gocron/Makefile new file mode 100644 index 00000000..81a8a69d --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/Makefile @@ -0,0 +1,12 @@ +.PHONY: fmt check-fmt lint vet test + +GO_PKGS := $(shell go list -f {{.Dir}} ./...) + +fmt: + @go list -f {{.Dir}} ./... | xargs -I{} gofmt -w -s {} + +lint: + @golangci-lint run + +test: + @go test -race -v $(GO_FLAGS) -count=1 $(GO_PKGS) diff --git a/vendor/github.com/go-co-op/gocron/README.md b/vendor/github.com/go-co-op/gocron/README.md new file mode 100644 index 00000000..7805fc0a --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/README.md @@ -0,0 +1,132 @@ +# gocron: A Golang Job Scheduling Package. + +[![CI State](https://github.com/go-co-op/gocron/workflows/Go%20Test/badge.svg)](https://github.com/go-co-op/gocron/actions?query=workflow%3A"lint") ![Go Report Card](https://goreportcard.com/badge/github.com/go-co-op/gocron) [![Go Doc](https://godoc.org/github.com/go-co-op/gocron?status.svg)](https://pkg.go.dev/github.com/go-co-op/gocron) + +gocron is a job scheduling package which lets you run Go functions at pre-determined intervals using a simple, human-friendly syntax. + +gocron is a Golang scheduler implementation similar to the Ruby module [clockwork](https://github.com/tomykaira/clockwork) and the Python job scheduling package [schedule](https://github.com/dbader/schedule). + +See also these two great articles that were used for design input: + +- [Rethinking Cron](http://adam.herokuapp.com/past/2010/4/13/rethinking_cron/) +- [Replace Cron with Clockwork](http://adam.herokuapp.com/past/2010/6/30/replace_cron_with_clockwork/) + +If you want to chat, you can find us at Slack! [](https://gophers.slack.com/archives/CQ7T0T1FW) + +## Concepts + +- **Scheduler**: The scheduler tracks all the jobs assigned to it and makes sure they are passed to the executor when ready to be run. The scheduler is able to manage overall aspects of job behavior like limiting how many jobs are running at one time. +- **Job**: The job is simply aware of the task (go function) it's provided and is therefore only able to perform actions related to that task like preventing itself from overruning a previous task that is taking a long time. +- **Executor**: The executor, as it's name suggests, is simply responsible for calling the task (go function) that the job hands to it when sent by the scheduler. + +## Examples + +```golang +s := gocron.NewScheduler(time.UTC) + +s.Every(5).Seconds().Do(func(){ ... }) + +// strings parse to duration +s.Every("5m").Do(func(){ ... }) + +s.Every(5).Days().Do(func(){ ... }) + +s.Every(1).Month(1, 2, 3).Do(func(){ ... }) + +// set time +s.Every(1).Day().At("10:30").Do(func(){ ... }) + +// set multiple times +s.Every(1).Day().At("10:30;08:00").Do(func(){ ... }) + +s.Every(1).Day().At("10:30").At("08:00").Do(func(){ ... }) + +// Schedule each last day of the month +s.Every(1).MonthLastDay().Do(func(){ ... }) + +// Or each last day of every other month +s.Every(2).MonthLastDay().Do(func(){ ... }) + +// cron expressions supported +s.Cron("*/1 * * * *").Do(task) // every minute + +// you can start running the scheduler in two different ways: +// starts the scheduler asynchronously +s.StartAsync() +// starts the scheduler and blocks current execution path +s.StartBlocking() +``` + +For more examples, take a look in our [go docs](https://pkg.go.dev/github.com/go-co-op/gocron#pkg-examples) + +## Options + +| Interval | Supported schedule options | +| ------------ | ------------------------------------------------------------------- | +| sub-second | `StartAt()` | +| milliseconds | `StartAt()` | +| seconds | `StartAt()` | +| minutes | `StartAt()` | +| hours | `StartAt()` | +| days | `StartAt()`, `At()` | +| weeks | `StartAt()`, `At()`, `Weekday()` (and all week day named functions) | +| months | `StartAt()`, `At()` | + +There are several options available to restrict how jobs run: + +| Mode | Function | Behavior | +| --------------- | ------------------------ | ------------------------------------------------------------------------------- | +| Default | | jobs are rescheduled at every interval | +| Job singleton | `SingletonMode()` | a long running job will not be rescheduled until the current run is completed | +| Scheduler limit | `SetMaxConcurrentJobs()` | set a collective maximum number of concurrent jobs running across the scheduler | + +## Tags + +Jobs may have arbitrary tags added which can be useful when tracking many jobs. +The scheduler supports both enforcing tags to be unique and when not unique, +running all jobs with a given tag. + +```golang +s := gocron.NewScheduler(time.UTC) +s.TagsUnique() + +_, _ = s.Every(1).Week().Tag("foo").Do(task) +_, err := s.Every(1).Week().Tag("foo").Do(task) +// error!!! + +s := gocron.NewScheduler(time.UTC) + +s.Every(2).Day().Tag("tag").At("10:00").Do(task) +s.Every(1).Minute().Tag("tag").Do(task) +s.RunByTag("tag") +// both jobs will run +``` + +## FAQ + +- Q: I'm running multiple pods on a distributed environment. How can I make a job not run once per pod causing duplication? + - A: We recommend using your own lock solution within the jobs themselves (you could use [Redis](https://redis.io/topics/distlock), for example) + +- Q: I've removed my job from the scheduler, but how can I stop a long-running job that has already been triggered? + - A: We recommend using a means of canceling your job, e.g. a `context.WithCancel()`. + +--- + +Looking to contribute? Try to follow these guidelines: + +- Use issues for everything +- For a small change, just send a PR! +- For bigger changes, please open an issue for discussion before sending a PR. +- PRs should have: tests, documentation and examples (if it makes sense) +- You can also contribute by: + - Reporting issues + - Suggesting new features or enhancements + - Improving/fixing documentation + +--- + +## Design + +![design-diagram](https://user-images.githubusercontent.com/19351306/110375142-2ba88680-8017-11eb-80c3-554cc746b165.png) + +[Jetbrains](https://www.jetbrains.com/?from=gocron) supports this project with GoLand licenses. We appreciate their support for free and open source software! diff --git a/vendor/github.com/go-co-op/gocron/SECURITY.md b/vendor/github.com/go-co-op/gocron/SECURITY.md new file mode 100644 index 00000000..6b986412 --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/SECURITY.md @@ -0,0 +1,15 @@ +# Security Policy + +## Supported Versions + +The current plan is to maintain version 1 as long as possible incorporating any necessary security patches. + +| Version | Supported | +| ------- | ------------------ | +| 1.x.x | :white_check_mark: | + +## Reporting a Vulnerability + +Vulnerabilities can be reported by [opening an issue](https://github.com/go-co-op/gocron/issues/new/choose) or reaching out on Slack: [](https://gophers.slack.com/archives/CQ7T0T1FW) + +We will do our best to addrerss any vulnerabilities in an expeditious manner. diff --git a/vendor/github.com/go-co-op/gocron/executor.go b/vendor/github.com/go-co-op/gocron/executor.go new file mode 100644 index 00000000..cf87ff21 --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/executor.go @@ -0,0 +1,127 @@ +package gocron + +import ( + "context" + "sync" + + "golang.org/x/sync/semaphore" +) + +const ( + // RescheduleMode - the default is that if a limit on maximum + // concurrent jobs is set and the limit is reached, a job will + // skip it's run and try again on the next occurrence in the schedule + RescheduleMode limitMode = iota + + // WaitMode - if a limit on maximum concurrent jobs is set + // and the limit is reached, a job will wait to try and run + // until a spot in the limit is freed up. + // + // Note: this mode can produce unpredictable results as + // job execution order isn't guaranteed. For example, a job that + // executes frequently may pile up in the wait queue and be executed + // many times back to back when the queue opens. + WaitMode +) + +type executor struct { + jobFunctions chan jobFunction + stopCh chan struct{} + stoppedCh chan struct{} + limitMode limitMode + maxRunningJobs *semaphore.Weighted +} + +func newExecutor() executor { + return executor{ + jobFunctions: make(chan jobFunction, 1), + stopCh: make(chan struct{}), + stoppedCh: make(chan struct{}), + } +} + +func (e *executor) start() { + stopCtx, cancel := context.WithCancel(context.Background()) + runningJobsWg := sync.WaitGroup{} + + for { + select { + case f := <-e.jobFunctions: + runningJobsWg.Add(1) + go func() { + defer runningJobsWg.Done() + + panicHandlerMutex.RLock() + defer panicHandlerMutex.RUnlock() + + if panicHandler != nil { + defer func() { + if r := recover(); r != any(nil) { + panicHandler(f.name, r) + } + }() + } + + if e.maxRunningJobs != nil { + if !e.maxRunningJobs.TryAcquire(1) { + + switch e.limitMode { + case RescheduleMode: + return + case WaitMode: + select { + case <-stopCtx.Done(): + return + case <-f.ctx.Done(): + return + default: + } + + if err := e.maxRunningJobs.Acquire(f.ctx, 1); err != nil { + break + + } + } + } + + defer e.maxRunningJobs.Release(1) + } + + runJob := func() { + f.incrementRunState() + callJobFunc(f.eventListeners.onBeforeJobExecution) + callJobFuncWithParams(f.function, f.parameters) + callJobFunc(f.eventListeners.onAfterJobExecution) + f.decrementRunState() + } + + switch f.runConfig.mode { + case defaultMode: + runJob() + case singletonMode: + _, _, _ = f.limiter.Do("main", func() (any, error) { + select { + case <-stopCtx.Done(): + return nil, nil + case <-f.ctx.Done(): + return nil, nil + default: + } + runJob() + return nil, nil + }) + } + }() + case <-e.stopCh: + cancel() + runningJobsWg.Wait() + close(e.stoppedCh) + return + } + } +} + +func (e *executor) stop() { + close(e.stopCh) + <-e.stoppedCh +} diff --git a/vendor/github.com/go-co-op/gocron/gocron.go b/vendor/github.com/go-co-op/gocron/gocron.go new file mode 100644 index 00000000..c512ca85 --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/gocron.go @@ -0,0 +1,129 @@ +// Package gocron : A Golang Job Scheduling Package. +// +// An in-process scheduler for periodic jobs that uses the builder pattern +// for configuration. gocron lets you run Golang functions periodically +// at pre-determined intervals using a simple, human-friendly syntax. +package gocron + +import ( + "errors" + "fmt" + "reflect" + "regexp" + "runtime" + "sync" + "time" +) + +// PanicHandlerFunc represents a type that can be set to handle panics occurring +// during job execution. +type PanicHandlerFunc func(jobName string, recoverData any) + +// The global panic handler +var ( + panicHandler PanicHandlerFunc + panicHandlerMutex = sync.RWMutex{} +) + +// SetPanicHandler sets the global panicHandler to the given function. +// Leaving it nil or setting it to nil disables automatic panic handling. +// If the panicHandler is not nil, any panic that occurs during executing a job will be recovered +// and the panicHandlerFunc will be called with the job's name and the recover data. +func SetPanicHandler(handler PanicHandlerFunc) { + panicHandlerMutex.Lock() + defer panicHandlerMutex.Unlock() + panicHandler = handler +} + +// Error declarations for gocron related errors +var ( + ErrNotAFunction = errors.New("only functions can be scheduled into the job queue") + ErrNotScheduledWeekday = errors.New("job not scheduled weekly on a weekday") + ErrJobNotFoundWithTag = errors.New("no jobs found with given tag") + ErrUnsupportedTimeFormat = errors.New("the given time format is not supported") + ErrInvalidInterval = errors.New(".Every() interval must be greater than 0") + ErrInvalidIntervalType = errors.New(".Every() interval must be int, time.Duration, or string") + ErrInvalidIntervalUnitsSelection = errors.New(".Every(time.Duration) and .Cron() cannot be used with units (e.g. .Seconds())") + ErrInvalidFunctionParameters = errors.New("length of function parameters must match job function parameters") + + ErrAtTimeNotSupported = errors.New("the At() method is not supported for this time unit") + ErrWeekdayNotSupported = errors.New("weekday is not supported for time unit") + ErrInvalidDayOfMonthEntry = errors.New("only days 1 through 28 are allowed for monthly schedules") + ErrTagsUnique = func(tag string) error { return fmt.Errorf("a non-unique tag was set on the job: %s", tag) } + ErrWrongParams = errors.New("wrong list of params") + ErrDoWithJobDetails = errors.New("DoWithJobDetails expects a function whose last parameter is a gocron.Job") + ErrUpdateCalledWithoutJob = errors.New("a call to Scheduler.Update() requires a call to Scheduler.Job() first") + ErrCronParseFailure = errors.New("cron expression failed to be parsed") + ErrInvalidDaysOfMonthDuplicateValue = errors.New("duplicate days of month is not allowed in Month() and Months() methods") +) + +func wrapOrError(toWrap error, err error) error { + var returnErr error + if toWrap != nil && !errors.Is(err, toWrap) { + returnErr = fmt.Errorf("%s: %w", err, toWrap) + } else { + returnErr = err + } + return returnErr +} + +// regex patterns for supported time formats +var ( + timeWithSeconds = regexp.MustCompile(`(?m)^\d{1,2}:\d\d:\d\d$`) + timeWithoutSeconds = regexp.MustCompile(`(?m)^\d{1,2}:\d\d$`) +) + +type schedulingUnit int + +const ( + // default unit is seconds + milliseconds schedulingUnit = iota + seconds + minutes + hours + days + weeks + months + duration + crontab +) + +func callJobFunc(jobFunc any) { + if jobFunc != nil { + reflect.ValueOf(jobFunc).Call([]reflect.Value{}) + } +} + +func callJobFuncWithParams(jobFunc any, params []any) { + f := reflect.ValueOf(jobFunc) + if len(params) != f.Type().NumIn() { + return + } + in := make([]reflect.Value, len(params)) + for k, param := range params { + in[k] = reflect.ValueOf(param) + } + f.Call(in) +} + +func getFunctionName(fn any) string { + return runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name() +} + +func parseTime(t string) (hour, min, sec int, err error) { + var timeLayout string + switch { + case timeWithSeconds.Match([]byte(t)): + timeLayout = "15:04:05" + case timeWithoutSeconds.Match([]byte(t)): + timeLayout = "15:04" + default: + return 0, 0, 0, ErrUnsupportedTimeFormat + } + + parsedTime, err := time.Parse(timeLayout, t) + if err != nil { + return 0, 0, 0, ErrUnsupportedTimeFormat + } + return parsedTime.Hour(), parsedTime.Minute(), parsedTime.Second(), nil +} diff --git a/vendor/github.com/go-co-op/gocron/job.go b/vendor/github.com/go-co-op/gocron/job.go new file mode 100644 index 00000000..085fe8ed --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/job.go @@ -0,0 +1,480 @@ +package gocron + +import ( + "context" + "fmt" + "math/rand" + "sort" + "sync" + "sync/atomic" + "time" + + "github.com/robfig/cron/v3" + "golang.org/x/sync/singleflight" +) + +// Job struct stores the information necessary to run a Job +type Job struct { + mu *jobMutex + jobFunction + interval int // interval * unit between runs + random // details for randomness + duration time.Duration // time duration between runs + unit schedulingUnit // time units, e.g. 'minutes', 'hours'... + startsImmediately bool // if the Job should run upon scheduler start + atTimes []time.Duration // optional time(s) at which this Job runs when interval is day + startAtTime time.Time // optional time at which the Job starts + error error // error related to Job + lastRun time.Time // datetime of last run + nextRun time.Time // datetime of next run + scheduledWeekdays []time.Weekday // Specific days of the week to start on + daysOfTheMonth []int // Specific days of the month to run the job + tags []string // allow the user to tag Jobs with certain labels + runCount int // number of times the job ran + timer *time.Timer // handles running tasks at specific time + cronSchedule cron.Schedule // stores the schedule when a task uses cron + runWithDetails bool // when true the job is passed as the last arg of the jobFunc +} + +type random struct { + rand *rand.Rand + randomizeInterval bool // whether the interval is random + randomIntervalRange [2]int // random interval range +} + +type jobFunction struct { + eventListeners // additional functions to allow run 'em during job performing + function any // task's function + parameters []any // task's function parameters + parametersLen int // length of the passed parameters + name string // nolint the function name to run + runConfig runConfig // configuration for how many times to run the job + limiter *singleflight.Group // limits inflight runs of job to one + ctx context.Context // for cancellation + cancel context.CancelFunc // for cancellation + runState *int64 // will be non-zero when jobs are running +} + +type eventListeners struct { + onBeforeJobExecution any // performs before job executing + onAfterJobExecution any // performs after job executing +} + +type jobMutex struct { + sync.RWMutex +} + +func (jf *jobFunction) incrementRunState() { + if jf.runState != nil { + atomic.AddInt64(jf.runState, 1) + } +} + +func (jf *jobFunction) decrementRunState() { + if jf.runState != nil { + atomic.AddInt64(jf.runState, -1) + } +} + +func (jf *jobFunction) copy() jobFunction { + cp := jobFunction{ + eventListeners: jf.eventListeners, + function: jf.function, + parameters: nil, + parametersLen: jf.parametersLen, + name: jf.name, + runConfig: jf.runConfig, + limiter: jf.limiter, + ctx: jf.ctx, + cancel: jf.cancel, + runState: jf.runState, + } + cp.parameters = append(cp.parameters, jf.parameters...) + return cp +} + +type runConfig struct { + finiteRuns bool + maxRuns int + mode mode +} + +// mode is the Job's running mode +type mode int8 + +const ( + // defaultMode disable any mode + defaultMode mode = iota + + // singletonMode switch to single job mode + singletonMode +) + +// newJob creates a new Job with the provided interval +func newJob(interval int, startImmediately bool, singletonMode bool) *Job { + ctx, cancel := context.WithCancel(context.Background()) + var zero int64 + job := &Job{ + mu: &jobMutex{}, + interval: interval, + unit: seconds, + lastRun: time.Time{}, + nextRun: time.Time{}, + jobFunction: jobFunction{ + ctx: ctx, + cancel: cancel, + runState: &zero, + }, + tags: []string{}, + startsImmediately: startImmediately, + } + if singletonMode { + job.SingletonMode() + } + return job +} + +func (j *Job) setRandomInterval(a, b int) { + j.random.rand = rand.New(rand.NewSource(time.Now().UnixNano())) // nolint + + j.random.randomizeInterval = true + if a < b { + j.random.randomIntervalRange[0] = a + j.random.randomIntervalRange[1] = b + 1 + } else { + j.random.randomIntervalRange[0] = b + j.random.randomIntervalRange[1] = a + 1 + } +} + +func (j *Job) getRandomInterval() int { + randNum := j.rand.Intn(j.randomIntervalRange[1] - j.randomIntervalRange[0]) + return j.randomIntervalRange[0] + randNum +} + +func (j *Job) getInterval() int { + if j.randomizeInterval { + return j.getRandomInterval() + } + return j.interval +} + +func (j *Job) neverRan() bool { + jobLastRun := j.LastRun() + return jobLastRun.IsZero() +} + +func (j *Job) getStartsImmediately() bool { + return j.startsImmediately +} + +func (j *Job) setStartsImmediately(b bool) { + j.startsImmediately = b +} + +func (j *Job) setTimer(t *time.Timer) { + j.mu.Lock() + defer j.mu.Unlock() + j.timer = t +} + +func (j *Job) getFirstAtTime() time.Duration { + var t time.Duration + if len(j.atTimes) > 0 { + t = j.atTimes[0] + } + + return t +} + +func (j *Job) getAtTime(lastRun time.Time) time.Duration { + var r time.Duration + if len(j.atTimes) == 0 { + return r + } + + if len(j.atTimes) == 1 { + return j.atTimes[0] + } + + if lastRun.IsZero() { + r = j.atTimes[0] + } else { + for _, d := range j.atTimes { + nt := time.Date(lastRun.Year(), lastRun.Month(), lastRun.Day(), 0, 0, 0, 0, lastRun.Location()).Add(d) + if nt.After(lastRun) { + r = d + break + } + } + } + + return r +} + +func (j *Job) addAtTime(t time.Duration) { + if len(j.atTimes) == 0 { + j.atTimes = append(j.atTimes, t) + return + } + exist := false + index := sort.Search(len(j.atTimes), func(i int) bool { + atTime := j.atTimes[i] + b := atTime >= t + if b { + exist = atTime == t + } + return b + }) + + // ignore if present + if exist { + return + } + + j.atTimes = append(j.atTimes, time.Duration(0)) + copy(j.atTimes[index+1:], j.atTimes[index:]) + j.atTimes[index] = t +} + +func (j *Job) getStartAtTime() time.Time { + return j.startAtTime +} + +func (j *Job) setStartAtTime(t time.Time) { + j.startAtTime = t +} + +func (j *Job) getUnit() schedulingUnit { + j.mu.RLock() + defer j.mu.RUnlock() + return j.unit +} + +func (j *Job) setUnit(t schedulingUnit) { + j.mu.Lock() + defer j.mu.Unlock() + j.unit = t +} + +func (j *Job) getDuration() time.Duration { + j.mu.RLock() + defer j.mu.RUnlock() + return j.duration +} + +func (j *Job) setDuration(t time.Duration) { + j.mu.Lock() + defer j.mu.Unlock() + j.duration = t +} + +// hasTags returns true if all tags are matched on this Job +func (j *Job) hasTags(tags ...string) bool { + // Build map of all Job tags for easy comparison + jobTags := map[string]int{} + for _, tag := range j.tags { + jobTags[tag] = 0 + } + + // Loop through required tags and if one doesn't exist, return false + for _, tag := range tags { + _, ok := jobTags[tag] + if !ok { + return false + } + } + return true +} + +// Error returns an error if one occurred while creating the Job. +// If multiple errors occurred, they will be wrapped and can be +// checked using the standard unwrap options. +func (j *Job) Error() error { + return j.error +} + +// Tag allows you to add arbitrary labels to a Job that do not +// impact the functionality of the Job +func (j *Job) Tag(tags ...string) { + j.tags = append(j.tags, tags...) +} + +// Untag removes a tag from a Job +func (j *Job) Untag(t string) { + var newTags []string + for _, tag := range j.tags { + if t != tag { + newTags = append(newTags, tag) + } + } + + j.tags = newTags +} + +// Tags returns the tags attached to the Job +func (j *Job) Tags() []string { + return j.tags +} + +// SetEventListeners accepts two functions that will be called, one before and one after the job is run +func (j *Job) SetEventListeners(onBeforeJobExecution any, onAfterJobExecution any) { + j.eventListeners = eventListeners{ + onBeforeJobExecution: onBeforeJobExecution, + onAfterJobExecution: onAfterJobExecution, + } +} + +// ScheduledTime returns the time of the Job's next scheduled run +func (j *Job) ScheduledTime() time.Time { + j.mu.RLock() + defer j.mu.RUnlock() + return j.nextRun +} + +// ScheduledAtTime returns the specific time of day the Job will run at. +// If multiple times are set, the earliest time will be returned. +func (j *Job) ScheduledAtTime() string { + if len(j.atTimes) == 0 { + return "00:00" + } + + return fmt.Sprintf("%02d:%02d", j.getFirstAtTime()/time.Hour, (j.getFirstAtTime()%time.Hour)/time.Minute) +} + +// ScheduledAtTimes returns the specific times of day the Job will run at +func (j *Job) ScheduledAtTimes() []string { + r := make([]string, len(j.atTimes)) + for i, t := range j.atTimes { + r[i] = fmt.Sprintf("%02d:%02d", t/time.Hour, (t%time.Hour)/time.Minute) + } + + return r +} + +// Weekday returns which day of the week the Job will run on and +// will return an error if the Job is not scheduled weekly +func (j *Job) Weekday() (time.Weekday, error) { + if len(j.scheduledWeekdays) == 0 { + return time.Sunday, ErrNotScheduledWeekday + } + return j.scheduledWeekdays[0], nil +} + +// Weekdays returns a slice of time.Weekday that the Job will run in a week and +// will return an error if the Job is not scheduled weekly +func (j *Job) Weekdays() []time.Weekday { + // appending on j.scheduledWeekdays may cause a side effect + if len(j.scheduledWeekdays) == 0 { + return []time.Weekday{time.Sunday} + } + + return j.scheduledWeekdays +} + +// LimitRunsTo limits the number of executions of this job to n. +// Upon reaching the limit, the job is removed from the scheduler. +// +// Note: If a job is added to a running scheduler and this method is then used +// you may see the job run more than the set limit as job is scheduled immediately +// by default upon being added to the scheduler. It is recommended to use the +// LimitRunsTo() func on the scheduler chain when scheduling the job. +// For example: scheduler.LimitRunsTo(1).Do() +func (j *Job) LimitRunsTo(n int) { + j.mu.Lock() + defer j.mu.Unlock() + j.runConfig.finiteRuns = true + j.runConfig.maxRuns = n +} + +// SingletonMode prevents a new job from starting if the prior job has not yet +// completed it's run +// Note: If a job is added to a running scheduler and this method is then used +// you may see the job run overrun itself as job is scheduled immediately +// by default upon being added to the scheduler. It is recommended to use the +// SingletonMode() func on the scheduler chain when scheduling the job. +func (j *Job) SingletonMode() { + j.mu.Lock() + defer j.mu.Unlock() + j.runConfig.mode = singletonMode + j.jobFunction.limiter = &singleflight.Group{} +} + +// shouldRun evaluates if this job should run again +// based on the runConfig +func (j *Job) shouldRun() bool { + j.mu.RLock() + defer j.mu.RUnlock() + return !j.runConfig.finiteRuns || j.runCount < j.runConfig.maxRuns +} + +// LastRun returns the time the job was run last +func (j *Job) LastRun() time.Time { + j.mu.RLock() + defer j.mu.RUnlock() + return j.lastRun +} + +func (j *Job) setLastRun(t time.Time) { + j.lastRun = t +} + +// NextRun returns the time the job will run next +func (j *Job) NextRun() time.Time { + j.mu.RLock() + defer j.mu.RUnlock() + return j.nextRun +} + +func (j *Job) setNextRun(t time.Time) { + j.mu.Lock() + defer j.mu.Unlock() + j.nextRun = t +} + +// RunCount returns the number of time the job ran so far +func (j *Job) RunCount() int { + j.mu.Lock() + defer j.mu.Unlock() + return j.runCount +} + +func (j *Job) stop() { + j.mu.Lock() + defer j.mu.Unlock() + if j.timer != nil { + j.timer.Stop() + } + if j.cancel != nil { + j.cancel() + } +} + +// IsRunning reports whether any instances of the job function are currently running +func (j *Job) IsRunning() bool { + return atomic.LoadInt64(j.runState) != 0 +} + +// you must lock the job before calling copy +func (j *Job) copy() Job { + return Job{ + mu: &jobMutex{}, + jobFunction: j.jobFunction, + interval: j.interval, + duration: j.duration, + unit: j.unit, + startsImmediately: j.startsImmediately, + atTimes: j.atTimes, + startAtTime: j.startAtTime, + error: j.error, + lastRun: j.lastRun, + nextRun: j.nextRun, + scheduledWeekdays: j.scheduledWeekdays, + daysOfTheMonth: j.daysOfTheMonth, + tags: j.tags, + runCount: j.runCount, + timer: j.timer, + cronSchedule: j.cronSchedule, + runWithDetails: j.runWithDetails, + } +} diff --git a/vendor/github.com/go-co-op/gocron/scheduler.go b/vendor/github.com/go-co-op/gocron/scheduler.go new file mode 100644 index 00000000..ffc588f7 --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/scheduler.go @@ -0,0 +1,1339 @@ +package gocron + +import ( + "context" + "fmt" + "reflect" + "sort" + "strings" + "sync" + "time" + + "github.com/robfig/cron/v3" + "golang.org/x/sync/semaphore" +) + +type limitMode int8 + +// Scheduler struct stores a list of Jobs and the location of time used by the Scheduler, +// and implements the sort. any for sorting Jobs, by the time of nextRun +type Scheduler struct { + jobsMutex sync.RWMutex + jobs []*Job + + locationMutex sync.RWMutex + location *time.Location + runningMutex sync.RWMutex + running bool // represents if the scheduler is running at the moment or not + + time TimeWrapper // wrapper around time.Time + timer func(d time.Duration, f func()) *time.Timer + executor *executor // executes jobs passed via chan + + tags sync.Map // for storing tags when unique tags is set + + tagsUnique bool // defines whether tags should be unique + updateJob bool // so the scheduler knows to create a new job or update the current + waitForInterval bool // defaults jobs to waiting for first interval to start + singletonMode bool // defaults all jobs to use SingletonMode() + jobCreated bool // so the scheduler knows a job was created prior to calling Every or Cron + + startBlockingStopChanMutex sync.Mutex + startBlockingStopChan chan struct{} // stops the scheduler +} + +// days in a week +const allWeekDays = 7 + +// NewScheduler creates a new Scheduler +func NewScheduler(loc *time.Location) *Scheduler { + executor := newExecutor() + + return &Scheduler{ + jobs: make([]*Job, 0), + location: loc, + running: false, + time: &trueTime{}, + executor: &executor, + tagsUnique: false, + timer: afterFunc, + } +} + +// SetMaxConcurrentJobs limits how many jobs can be running at the same time. +// This is useful when running resource intensive jobs and a precise start time is not critical. +func (s *Scheduler) SetMaxConcurrentJobs(n int, mode limitMode) { + s.executor.maxRunningJobs = semaphore.NewWeighted(int64(n)) + s.executor.limitMode = mode +} + +// StartBlocking starts all jobs and blocks the current thread. +// This blocking method can be stopped with Stop() from a separate goroutine. +func (s *Scheduler) StartBlocking() { + s.StartAsync() + s.startBlockingStopChanMutex.Lock() + s.startBlockingStopChan = make(chan struct{}, 1) + s.startBlockingStopChanMutex.Unlock() + <-s.startBlockingStopChan +} + +// StartAsync starts all jobs without blocking the current thread +func (s *Scheduler) StartAsync() { + if !s.IsRunning() { + s.start() + } +} + +// start starts the scheduler, scheduling and running jobs +func (s *Scheduler) start() { + go s.executor.start() + s.setRunning(true) + s.runJobs(s.Jobs()) +} + +func (s *Scheduler) runJobs(jobs []*Job) { + for _, job := range jobs { + s.runContinuous(job) + } +} + +func (s *Scheduler) setRunning(b bool) { + s.runningMutex.Lock() + defer s.runningMutex.Unlock() + s.running = b +} + +// IsRunning returns true if the scheduler is running +func (s *Scheduler) IsRunning() bool { + s.runningMutex.RLock() + defer s.runningMutex.RUnlock() + return s.running +} + +// Jobs returns the list of Jobs from the Scheduler +func (s *Scheduler) Jobs() []*Job { + s.jobsMutex.RLock() + defer s.jobsMutex.RUnlock() + return s.jobs +} + +func (s *Scheduler) setJobs(jobs []*Job) { + s.jobsMutex.Lock() + defer s.jobsMutex.Unlock() + s.jobs = jobs +} + +// Len returns the number of Jobs in the Scheduler - implemented for sort +func (s *Scheduler) Len() int { + s.jobsMutex.RLock() + defer s.jobsMutex.RUnlock() + return len(s.jobs) +} + +// Swap places each job into the other job's position given +// the provided job indexes. +func (s *Scheduler) Swap(i, j int) { + s.jobsMutex.Lock() + defer s.jobsMutex.Unlock() + s.jobs[i], s.jobs[j] = s.jobs[j], s.jobs[i] +} + +// Less compares the next run of jobs based on their index. +// Returns true if the second job is after the first. +func (s *Scheduler) Less(first, second int) bool { + return s.Jobs()[second].NextRun().Unix() >= s.Jobs()[first].NextRun().Unix() +} + +// ChangeLocation changes the default time location +func (s *Scheduler) ChangeLocation(newLocation *time.Location) { + s.locationMutex.Lock() + defer s.locationMutex.Unlock() + s.location = newLocation +} + +// Location provides the current location set on the scheduler +func (s *Scheduler) Location() *time.Location { + s.locationMutex.RLock() + defer s.locationMutex.RUnlock() + return s.location +} + +type nextRun struct { + duration time.Duration + dateTime time.Time +} + +// scheduleNextRun Compute the instant when this Job should run next +func (s *Scheduler) scheduleNextRun(job *Job) (bool, nextRun) { + now := s.now() + if !s.jobPresent(job) { + return false, nextRun{} + } + + lastRun := now + + if job.neverRan() { + // Increment startAtTime to the future + if !job.startAtTime.IsZero() && job.startAtTime.Before(now) { + duration := s.durationToNextRun(job.startAtTime, job).duration + job.startAtTime = job.startAtTime.Add(duration) + if job.startAtTime.Before(now) { + diff := now.Sub(job.startAtTime) + duration := s.durationToNextRun(job.startAtTime, job).duration + count := diff / duration + if diff%duration != 0 { + count++ + } + job.startAtTime = job.startAtTime.Add(duration * count) + } + } + } else { + lastRun = job.LastRun() + } + + if !job.shouldRun() { + s.RemoveByReference(job) + return false, nextRun{} + } + + next := s.durationToNextRun(lastRun, job) + + job.setLastRun(job.NextRun()) + if next.dateTime.IsZero() { + next.dateTime = lastRun.Add(next.duration) + job.setNextRun(next.dateTime) + } else { + job.setNextRun(next.dateTime) + } + return true, next +} + +// durationToNextRun calculate how much time to the next run, depending on unit +func (s *Scheduler) durationToNextRun(lastRun time.Time, job *Job) nextRun { + // job can be scheduled with .StartAt() + if job.getStartAtTime().After(lastRun) { + return nextRun{duration: job.getStartAtTime().Sub(s.now()), dateTime: job.getStartAtTime()} + } + + var next nextRun + switch job.getUnit() { + case milliseconds, seconds, minutes, hours: + next.duration = s.calculateDuration(job) + case days: + next = s.calculateDays(job, lastRun) + case weeks: + if len(job.scheduledWeekdays) != 0 { // weekday selected, Every().Monday(), for example + next = s.calculateWeekday(job, lastRun) + } else { + next = s.calculateWeeks(job, lastRun) + } + case months: + next = s.calculateMonths(job, lastRun) + case duration: + next.duration = job.getDuration() + case crontab: + next.dateTime = job.cronSchedule.Next(lastRun) + next.duration = next.dateTime.Sub(lastRun) + } + return next +} + +func (s *Scheduler) calculateMonths(job *Job, lastRun time.Time) nextRun { + // Special case: the last day of the month + if len(job.daysOfTheMonth) == 1 && job.daysOfTheMonth[0] == -1 { + return calculateNextRunForLastDayOfMonth(s, job, lastRun) + } + + if len(job.daysOfTheMonth) != 0 { // calculate days to job.daysOfTheMonth + + nextRunDateMap := make(map[int]nextRun) + for _, day := range job.daysOfTheMonth { + nextRunDateMap[day] = calculateNextRunForMonth(s, job, lastRun, day) + } + + nextRunResult := nextRun{} + for _, val := range nextRunDateMap { + if nextRunResult.dateTime.IsZero() { + nextRunResult = val + } else if nextRunResult.dateTime.Sub(val.dateTime).Milliseconds() > 0 { + nextRunResult = val + } + } + + return nextRunResult + } + next := s.roundToMidnightAndAddDSTAware(lastRun, job.getFirstAtTime()).AddDate(0, job.getInterval(), 0) + return nextRun{duration: until(lastRun, next), dateTime: next} +} + +func calculateNextRunForLastDayOfMonth(s *Scheduler, job *Job, lastRun time.Time) nextRun { + // Calculate the last day of the next month, by adding job.interval+1 months (i.e. the + // first day of the month after the next month), and subtracting one day, unless the + // last run occurred before the end of the month. + addMonth := job.getInterval() + atTime := job.getAtTime(lastRun) + if testDate := lastRun.AddDate(0, 0, 1); testDate.Month() != lastRun.Month() && + !s.roundToMidnightAndAddDSTAware(lastRun, atTime).After(lastRun) { + // Our last run was on the last day of this month. + addMonth++ + atTime = job.getFirstAtTime() + } + + next := time.Date(lastRun.Year(), lastRun.Month(), 1, 0, 0, 0, 0, s.Location()). + Add(atTime). + AddDate(0, addMonth, 0). + AddDate(0, 0, -1) + return nextRun{duration: until(lastRun, next), dateTime: next} +} + +func calculateNextRunForMonth(s *Scheduler, job *Job, lastRun time.Time, dayOfMonth int) nextRun { + atTime := job.getAtTime(lastRun) + natTime := atTime + + hours, minutes, seconds := s.deconstructDuration(atTime) + jobDay := time.Date(lastRun.Year(), lastRun.Month(), dayOfMonth, hours, minutes, seconds, 0, s.Location()) + + difference := absDuration(lastRun.Sub(jobDay)) + next := lastRun + if jobDay.Before(lastRun) { // shouldn't run this month; schedule for next interval minus day difference + next = next.AddDate(0, job.getInterval(), -0) + next = next.Add(-difference) + natTime = job.getFirstAtTime() + } else { + if job.getInterval() == 1 && !jobDay.Equal(lastRun) { // every month counts current month + next = next.AddDate(0, job.getInterval()-1, 0) + } else { // should run next month interval + next = next.AddDate(0, job.getInterval(), 0) + natTime = job.getFirstAtTime() + } + next = next.Add(difference) + } + if atTime != natTime { + next = next.Add(-atTime).Add(natTime) + } + return nextRun{duration: until(lastRun, next), dateTime: next} +} + +func (s *Scheduler) calculateWeekday(job *Job, lastRun time.Time) nextRun { + daysToWeekday := s.remainingDaysToWeekday(lastRun, job) + totalDaysDifference := s.calculateTotalDaysDifference(lastRun, daysToWeekday, job) + acTime := job.getAtTime(lastRun) + if totalDaysDifference > 0 { + acTime = job.getFirstAtTime() + } + next := s.roundToMidnightAndAddDSTAware(lastRun, acTime).AddDate(0, 0, totalDaysDifference) + return nextRun{duration: until(lastRun, next), dateTime: next} +} + +func (s *Scheduler) calculateWeeks(job *Job, lastRun time.Time) nextRun { + totalDaysDifference := int(job.getInterval()) * 7 + next := s.roundToMidnightAndAddDSTAware(lastRun, job.getFirstAtTime()).AddDate(0, 0, totalDaysDifference) + return nextRun{duration: until(lastRun, next), dateTime: next} +} + +func (s *Scheduler) calculateTotalDaysDifference(lastRun time.Time, daysToWeekday int, job *Job) int { + if job.getInterval() > 1 { + // just count weeks after the first jobs were done + if job.RunCount() < len(job.Weekdays()) { + return daysToWeekday + } + if daysToWeekday > 0 { + return int(job.getInterval())*7 - (allWeekDays - daysToWeekday) + } + return int(job.getInterval()) * 7 + } + + if daysToWeekday == 0 { // today, at future time or already passed + lastRunAtTime := time.Date(lastRun.Year(), lastRun.Month(), lastRun.Day(), 0, 0, 0, 0, s.Location()).Add(job.getAtTime(lastRun)) + if lastRun.Before(lastRunAtTime) { + return 0 + } + return 7 + } + return daysToWeekday +} + +func (s *Scheduler) calculateDays(job *Job, lastRun time.Time) nextRun { + if job.getInterval() == 1 { + lastRunDayPlusJobAtTime := s.roundToMidnightAndAddDSTAware(lastRun, job.getAtTime(lastRun)) + + if shouldRunToday(lastRun, lastRunDayPlusJobAtTime) { + return nextRun{duration: until(lastRun, lastRunDayPlusJobAtTime), dateTime: lastRunDayPlusJobAtTime} + } + } + + nextRunAtTime := s.roundToMidnightAndAddDSTAware(lastRun, job.getFirstAtTime()).AddDate(0, 0, job.getInterval()).In(s.Location()) + return nextRun{duration: until(lastRun, nextRunAtTime), dateTime: nextRunAtTime} +} + +func until(from time.Time, until time.Time) time.Duration { + return until.Sub(from) +} + +func shouldRunToday(lastRun time.Time, atTime time.Time) bool { + return lastRun.Before(atTime) +} + +func in(scheduleWeekdays []time.Weekday, weekday time.Weekday) bool { + in := false + + for _, weekdayInSchedule := range scheduleWeekdays { + if int(weekdayInSchedule) == int(weekday) { + in = true + break + } + } + return in +} + +func (s *Scheduler) calculateDuration(job *Job) time.Duration { + interval := job.getInterval() + switch job.getUnit() { + case milliseconds: + return time.Duration(interval) * time.Millisecond + case seconds: + return time.Duration(interval) * time.Second + case minutes: + return time.Duration(interval) * time.Minute + default: + return time.Duration(interval) * time.Hour + } +} + +func (s *Scheduler) remainingDaysToWeekday(lastRun time.Time, job *Job) int { + weekDays := job.Weekdays() + sort.Slice(weekDays, func(i, j int) bool { + return weekDays[i] < weekDays[j] + }) + + equals := false + lastRunWeekday := lastRun.Weekday() + index := sort.Search(len(weekDays), func(i int) bool { + b := weekDays[i] >= lastRunWeekday + if b { + equals = weekDays[i] == lastRunWeekday + } + return b + }) + // check atTime + if equals { + if s.roundToMidnightAndAddDSTAware(lastRun, job.getAtTime(lastRun)).After(lastRun) { + return 0 + } + index++ + } + + if index < len(weekDays) { + return int(weekDays[index] - lastRunWeekday) + } + + return int(weekDays[0]) + allWeekDays - int(lastRunWeekday) +} + +// absDuration returns the abs time difference +func absDuration(a time.Duration) time.Duration { + if a >= 0 { + return a + } + return -a +} + +func (s *Scheduler) deconstructDuration(d time.Duration) (hours int, minutes int, seconds int) { + hours = int(d.Seconds()) / int(time.Hour/time.Second) + minutes = (int(d.Seconds()) % int(time.Hour/time.Second)) / int(time.Minute/time.Second) + seconds = int(d.Seconds()) % int(time.Minute/time.Second) + return +} + +// roundToMidnightAndAddDSTAware truncates time to midnight and "adds" duration in a DST aware manner +func (s *Scheduler) roundToMidnightAndAddDSTAware(t time.Time, d time.Duration) time.Time { + hours, minutes, seconds := s.deconstructDuration(d) + return time.Date(t.Year(), t.Month(), t.Day(), hours, minutes, seconds, 0, s.Location()) +} + +// NextRun datetime when the next Job should run. +func (s *Scheduler) NextRun() (*Job, time.Time) { + if len(s.Jobs()) <= 0 { + return nil, s.now() + } + + sort.Sort(s) + + return s.Jobs()[0], s.Jobs()[0].NextRun() +} + +// EveryRandom schedules a new period Job that runs at random intervals +// between the provided lower (inclusive) and upper (inclusive) bounds. +// The default unit is Seconds(). Call a different unit in the chain +// if you would like to change that. For example, Minutes(), Hours(), etc. +func (s *Scheduler) EveryRandom(lower, upper int) *Scheduler { + job := s.newJob(0) + if s.updateJob || s.jobCreated { + job = s.getCurrentJob() + } + + job.setRandomInterval(lower, upper) + + if s.updateJob || s.jobCreated { + s.setJobs(append(s.Jobs()[:len(s.Jobs())-1], job)) + if s.jobCreated { + s.jobCreated = false + } + } else { + s.setJobs(append(s.Jobs(), job)) + } + + return s +} + +// Every schedules a new periodic Job with an interval. +// Interval can be an int, time.Duration or a string that +// parses with time.ParseDuration(). +// Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". +func (s *Scheduler) Every(interval any) *Scheduler { + job := s.newJob(0) + if s.updateJob || s.jobCreated { + job = s.getCurrentJob() + } + + switch interval := interval.(type) { + case int: + job.interval = interval + if interval <= 0 { + job.error = wrapOrError(job.error, ErrInvalidInterval) + } + case time.Duration: + job.interval = 0 + job.setDuration(interval) + job.setUnit(duration) + case string: + d, err := time.ParseDuration(interval) + if err != nil { + job.error = wrapOrError(job.error, err) + } + job.setDuration(d) + job.setUnit(duration) + default: + job.error = wrapOrError(job.error, ErrInvalidIntervalType) + } + + if s.updateJob || s.jobCreated { + s.setJobs(append(s.Jobs()[:len(s.Jobs())-1], job)) + if s.jobCreated { + s.jobCreated = false + } + } else { + s.setJobs(append(s.Jobs(), job)) + } + + return s +} + +func (s *Scheduler) run(job *Job) { + if !s.IsRunning() { + return + } + + job.mu.Lock() + + if job.function == nil { + job.mu.Unlock() + s.Remove(job) + return + } + + defer job.mu.Unlock() + + if job.runWithDetails { + switch len(job.parameters) { + case job.parametersLen: + job.parameters = append(job.parameters, job.copy()) + case job.parametersLen + 1: + job.parameters[job.parametersLen] = job.copy() + default: + // something is really wrong and we should never get here + job.error = wrapOrError(job.error, ErrInvalidFunctionParameters) + return + } + } + + s.executor.jobFunctions <- job.jobFunction.copy() + job.runCount++ +} + +func (s *Scheduler) runContinuous(job *Job) { + shouldRun, next := s.scheduleNextRun(job) + if !shouldRun { + return + } + + if !job.getStartsImmediately() { + job.setStartsImmediately(true) + } else { + s.run(job) + } + + nextRun := next.dateTime.Sub(s.now()) + if nextRun < 0 { + time.Sleep(absDuration(nextRun)) + shouldRun, next := s.scheduleNextRun(job) + if !shouldRun { + return + } + nextRun = next.dateTime.Sub(s.now()) + } + + job.setTimer(s.timer(nextRun, func() { + if !next.dateTime.IsZero() { + for { + n := s.now().UnixNano() - next.dateTime.UnixNano() + if n >= 0 { + break + } + s.time.Sleep(time.Duration(n)) + } + } + s.runContinuous(job) + })) +} + +// RunAll run all Jobs regardless if they are scheduled to run or not +func (s *Scheduler) RunAll() { + s.RunAllWithDelay(0) +} + +// RunAllWithDelay runs all jobs with the provided delay in between each job +func (s *Scheduler) RunAllWithDelay(d time.Duration) { + for _, job := range s.Jobs() { + s.run(job) + s.time.Sleep(d) + } +} + +// RunByTag runs all the jobs containing a specific tag +// regardless of whether they are scheduled to run or not +func (s *Scheduler) RunByTag(tag string) error { + return s.RunByTagWithDelay(tag, 0) +} + +// RunByTagWithDelay is same as RunByTag but introduces a delay between +// each job execution +func (s *Scheduler) RunByTagWithDelay(tag string, d time.Duration) error { + jobs, err := s.FindJobsByTag(tag) + if err != nil { + return err + } + for _, job := range jobs { + s.run(job) + s.time.Sleep(d) + } + return nil +} + +// Remove specific Job by function +// +// Removing a job stops that job's timer. However, if a job has already +// been started by by the job's timer before being removed, there is no way to stop +// it through gocron as https://pkg.go.dev/time#Timer.Stop explains. +// The job function would need to have implemented a means of +// stopping, e.g. using a context.WithCancel(). +func (s *Scheduler) Remove(job any) { + fName := getFunctionName(job) + j := s.findJobByTaskName(fName) + s.removeJobsUniqueTags(j) + s.removeByCondition(func(someJob *Job) bool { + return someJob.name == fName + }) +} + +// RemoveByReference removes specific Job by reference +func (s *Scheduler) RemoveByReference(job *Job) { + s.removeJobsUniqueTags(job) + s.removeByCondition(func(someJob *Job) bool { + job.mu.RLock() + defer job.mu.RUnlock() + return someJob == job + }) +} + +func (s *Scheduler) findJobByTaskName(name string) *Job { + for _, job := range s.Jobs() { + if job.name == name { + return job + } + } + return nil +} + +func (s *Scheduler) removeJobsUniqueTags(job *Job) { + if job == nil { + return + } + if s.tagsUnique && len(job.tags) > 0 { + for _, tag := range job.tags { + s.tags.Delete(tag) + } + } +} + +func (s *Scheduler) removeByCondition(shouldRemove func(*Job) bool) { + retainedJobs := make([]*Job, 0) + for _, job := range s.Jobs() { + if !shouldRemove(job) { + retainedJobs = append(retainedJobs, job) + } else { + job.stop() + } + } + s.setJobs(retainedJobs) +} + +// RemoveByTag will remove Jobs that match the given tag. +func (s *Scheduler) RemoveByTag(tag string) error { + return s.RemoveByTags(tag) +} + +// RemoveByTags will remove Jobs that match all given tags. +func (s *Scheduler) RemoveByTags(tags ...string) error { + jobs, err := s.FindJobsByTag(tags...) + if err != nil { + return err + } + + for _, job := range jobs { + s.RemoveByReference(job) + } + return nil +} + +// RemoveByTagsAny will remove Jobs that match any one of the given tags. +func (s *Scheduler) RemoveByTagsAny(tags ...string) error { + var errs error + mJob := make(map[*Job]struct{}) + for _, tag := range tags { + jobs, err := s.FindJobsByTag(tag) + if err != nil { + errs = wrapOrError(errs, fmt.Errorf("%s: %s", err.Error(), tag)) + } + for _, job := range jobs { + mJob[job] = struct{}{} + } + } + + for job := range mJob { + s.RemoveByReference(job) + } + + return errs +} + +// FindJobsByTag will return a slice of Jobs that match all given tags +func (s *Scheduler) FindJobsByTag(tags ...string) ([]*Job, error) { + var jobs []*Job + +Jobs: + for _, job := range s.Jobs() { + if job.hasTags(tags...) { + jobs = append(jobs, job) + continue Jobs + } + } + + if len(jobs) > 0 { + return jobs, nil + } + return nil, ErrJobNotFoundWithTag +} + +// MonthFirstWeekday sets the job to run the first specified weekday of the month +func (s *Scheduler) MonthFirstWeekday(weekday time.Weekday) *Scheduler { + _, month, day := s.time.Now(time.UTC).Date() + + if day < 7 { + return s.Cron(fmt.Sprintf("0 0 %d %d %d", day, month, weekday)) + } + + return s.Cron(fmt.Sprintf("0 0 %d %d %d", day, month+1, weekday)) +} + +// LimitRunsTo limits the number of executions of this job to n. +// Upon reaching the limit, the job is removed from the scheduler. +func (s *Scheduler) LimitRunsTo(i int) *Scheduler { + job := s.getCurrentJob() + job.LimitRunsTo(i) + return s +} + +// SingletonMode prevents a new job from starting if the prior job has not yet +// completed its run +func (s *Scheduler) SingletonMode() *Scheduler { + job := s.getCurrentJob() + job.SingletonMode() + return s +} + +// SingletonModeAll prevents new jobs from starting if the prior instance of the +// particular job has not yet completed its run +func (s *Scheduler) SingletonModeAll() { + s.singletonMode = true +} + +// TaskPresent checks if specific job's function was added to the scheduler. +func (s *Scheduler) TaskPresent(j any) bool { + for _, job := range s.Jobs() { + if job.name == getFunctionName(j) { + return true + } + } + return false +} + +// To avoid the recursive read lock on s.Jobs() and this function, +// creating this new function and distributing the lock between jobPresent, _jobPresent +func (s *Scheduler) _jobPresent(j *Job, jobs []*Job) bool { + s.jobsMutex.RLock() + defer s.jobsMutex.RUnlock() + for _, job := range jobs { + if job == j { + return true + } + } + return false +} + +func (s *Scheduler) jobPresent(j *Job) bool { + return s._jobPresent(j, s.Jobs()) +} + +// Clear clears all Jobs from this scheduler +func (s *Scheduler) Clear() { + for _, job := range s.Jobs() { + job.stop() + } + s.setJobs(make([]*Job, 0)) + // If unique tags was enabled, delete all the tags loaded in the tags sync.Map + if s.tagsUnique { + s.tags.Range(func(key any, value any) bool { + s.tags.Delete(key) + return true + }) + } +} + +// Stop stops the scheduler. This is a no-op if the scheduler is already stopped. +// It waits for all running jobs to finish before returning, so it is safe to assume that running jobs will finish when calling this. +func (s *Scheduler) Stop() { + if s.IsRunning() { + s.stop() + } +} + +func (s *Scheduler) stop() { + s.setRunning(false) + s.stopJobs(s.jobs) + s.executor.stop() + s.StopBlockingChan() +} + +func (s *Scheduler) stopJobs(jobs []*Job) { + for _, job := range jobs { + job.stop() + } +} + +func (s *Scheduler) doCommon(jobFun any, params ...any) (*Job, error) { + job := s.getCurrentJob() + + jobUnit := job.getUnit() + jobLastRun := job.LastRun() + if job.getAtTime(jobLastRun) != 0 && (jobUnit <= hours || jobUnit >= duration) { + job.error = wrapOrError(job.error, ErrAtTimeNotSupported) + } + + if len(job.scheduledWeekdays) != 0 && jobUnit != weeks { + job.error = wrapOrError(job.error, ErrWeekdayNotSupported) + } + + if job.unit != crontab && job.getInterval() == 0 { + if job.unit != duration { + job.error = wrapOrError(job.error, ErrInvalidInterval) + } + } + + if job.error != nil { + // delete the job from the scheduler as this job + // cannot be executed + s.RemoveByReference(job) + return nil, job.error + } + + typ := reflect.TypeOf(jobFun) + if typ.Kind() != reflect.Func { + // delete the job for the same reason as above + s.RemoveByReference(job) + return nil, ErrNotAFunction + } + + fname := getFunctionName(jobFun) + if job.name != fname { + job.function = jobFun + job.parameters = params + job.name = fname + } + + f := reflect.ValueOf(jobFun) + expectedParamLength := f.Type().NumIn() + if job.runWithDetails { + expectedParamLength-- + } + + if len(params) != expectedParamLength { + s.RemoveByReference(job) + job.error = wrapOrError(job.error, ErrWrongParams) + return nil, job.error + } + + if job.runWithDetails && f.Type().In(len(params)).Kind() != reflect.ValueOf(*job).Kind() { + s.RemoveByReference(job) + job.error = wrapOrError(job.error, ErrDoWithJobDetails) + return nil, job.error + } + + // we should not schedule if not running since we can't foresee how long it will take for the scheduler to start + if s.IsRunning() { + s.runContinuous(job) + } + + return job, nil +} + +// Do specifies the jobFunc that should be called every time the Job runs +func (s *Scheduler) Do(jobFun any, params ...any) (*Job, error) { + return s.doCommon(jobFun, params...) +} + +// DoWithJobDetails specifies the jobFunc that should be called every time the Job runs +// and additionally passes the details of the current job to the jobFunc. +// The last argument of the function must be a gocron.Job that will be passed by +// the scheduler when the function is called. +func (s *Scheduler) DoWithJobDetails(jobFun any, params ...any) (*Job, error) { + job := s.getCurrentJob() + job.runWithDetails = true + job.parametersLen = len(params) + return s.doCommon(jobFun, params...) +} + +// At schedules the Job at a specific time of day in the form "HH:MM:SS" or "HH:MM" +// or time.Time (note that only the hours, minutes, seconds and nanos are used). +func (s *Scheduler) At(i any) *Scheduler { + job := s.getCurrentJob() + + switch t := i.(type) { + case string: + for _, tt := range strings.Split(t, ";") { + hour, min, sec, err := parseTime(tt) + if err != nil { + job.error = wrapOrError(job.error, err) + return s + } + // save atTime start as duration from midnight + job.addAtTime(time.Duration(hour)*time.Hour + time.Duration(min)*time.Minute + time.Duration(sec)*time.Second) + } + case time.Time: + job.addAtTime(time.Duration(t.Hour())*time.Hour + time.Duration(t.Minute())*time.Minute + time.Duration(t.Second())*time.Second + time.Duration(t.Nanosecond())*time.Nanosecond) + default: + job.error = wrapOrError(job.error, ErrUnsupportedTimeFormat) + } + job.startsImmediately = false + return s +} + +// Tag will add a tag when creating a job. +func (s *Scheduler) Tag(t ...string) *Scheduler { + job := s.getCurrentJob() + + if s.tagsUnique { + for _, tag := range t { + if _, ok := s.tags.Load(tag); ok { + job.error = wrapOrError(job.error, ErrTagsUnique(tag)) + return s + } + s.tags.Store(tag, struct{}{}) + } + } + + job.tags = append(job.tags, t...) + return s +} + +// StartAt schedules the next run of the Job. If this time is in the past, the configured interval will be used +// to calculate the next future time +func (s *Scheduler) StartAt(t time.Time) *Scheduler { + job := s.getCurrentJob() + job.setStartAtTime(t) + job.startsImmediately = false + return s +} + +// setUnit sets the unit type +func (s *Scheduler) setUnit(unit schedulingUnit) { + job := s.getCurrentJob() + currentUnit := job.getUnit() + if currentUnit == duration || currentUnit == crontab { + job.error = wrapOrError(job.error, ErrInvalidIntervalUnitsSelection) + return + } + job.setUnit(unit) +} + +// Millisecond sets the unit with seconds +func (s *Scheduler) Millisecond() *Scheduler { + return s.Milliseconds() +} + +// Milliseconds sets the unit with seconds +func (s *Scheduler) Milliseconds() *Scheduler { + s.setUnit(milliseconds) + return s +} + +// Second sets the unit with seconds +func (s *Scheduler) Second() *Scheduler { + return s.Seconds() +} + +// Seconds sets the unit with seconds +func (s *Scheduler) Seconds() *Scheduler { + s.setUnit(seconds) + return s +} + +// Minute sets the unit with minutes +func (s *Scheduler) Minute() *Scheduler { + return s.Minutes() +} + +// Minutes sets the unit with minutes +func (s *Scheduler) Minutes() *Scheduler { + s.setUnit(minutes) + return s +} + +// Hour sets the unit with hours +func (s *Scheduler) Hour() *Scheduler { + return s.Hours() +} + +// Hours sets the unit with hours +func (s *Scheduler) Hours() *Scheduler { + s.setUnit(hours) + return s +} + +// Day sets the unit with days +func (s *Scheduler) Day() *Scheduler { + s.setUnit(days) + return s +} + +// Days set the unit with days +func (s *Scheduler) Days() *Scheduler { + s.setUnit(days) + return s +} + +// Week sets the unit with weeks +func (s *Scheduler) Week() *Scheduler { + s.setUnit(weeks) + return s +} + +// Weeks sets the unit with weeks +func (s *Scheduler) Weeks() *Scheduler { + s.setUnit(weeks) + return s +} + +// Month sets the unit with months +func (s *Scheduler) Month(daysOfMonth ...int) *Scheduler { + return s.Months(daysOfMonth...) +} + +// MonthLastDay sets the unit with months at every last day of the month +func (s *Scheduler) MonthLastDay() *Scheduler { + return s.Months(-1) +} + +// Months sets the unit with months +// Note: Only days 1 through 28 are allowed for monthly schedules +// Note: Multiple add same days of month cannot be allowed +// Note: -1 is a special value and can only occur as single argument +func (s *Scheduler) Months(daysOfTheMonth ...int) *Scheduler { + job := s.getCurrentJob() + + if len(daysOfTheMonth) == 0 { + job.error = wrapOrError(job.error, ErrInvalidDayOfMonthEntry) + } else if len(daysOfTheMonth) == 1 { + dayOfMonth := daysOfTheMonth[0] + if dayOfMonth != -1 && (dayOfMonth < 1 || dayOfMonth > 28) { + job.error = wrapOrError(job.error, ErrInvalidDayOfMonthEntry) + } + } else { + + repeatMap := make(map[int]int) + for _, dayOfMonth := range daysOfTheMonth { + + if dayOfMonth < 1 || dayOfMonth > 28 { + job.error = wrapOrError(job.error, ErrInvalidDayOfMonthEntry) + break + } + + for _, dayOfMonthInJob := range job.daysOfTheMonth { + if dayOfMonthInJob == dayOfMonth { + job.error = wrapOrError(job.error, ErrInvalidDaysOfMonthDuplicateValue) + break + } + } + + if _, ok := repeatMap[dayOfMonth]; ok { + job.error = wrapOrError(job.error, ErrInvalidDaysOfMonthDuplicateValue) + break + } else { + repeatMap[dayOfMonth]++ + } + } + } + if job.daysOfTheMonth == nil { + job.daysOfTheMonth = make([]int, 0) + } + job.daysOfTheMonth = append(job.daysOfTheMonth, daysOfTheMonth...) + job.startsImmediately = false + s.setUnit(months) + return s +} + +// NOTE: If the dayOfTheMonth for the above two functions is +// more than the number of days in that month, the extra day(s) +// spill over to the next month. Similarly, if it's less than 0, +// it will go back to the month before + +// Weekday sets the scheduledWeekdays with a specifics weekdays +func (s *Scheduler) Weekday(weekDay time.Weekday) *Scheduler { + job := s.getCurrentJob() + + if in := in(job.scheduledWeekdays, weekDay); !in { + job.scheduledWeekdays = append(job.scheduledWeekdays, weekDay) + } + + job.startsImmediately = false + s.setUnit(weeks) + return s +} + +func (s *Scheduler) Midday() *Scheduler { + return s.At("12:00") +} + +// Monday sets the start day as Monday +func (s *Scheduler) Monday() *Scheduler { + return s.Weekday(time.Monday) +} + +// Tuesday sets the start day as Tuesday +func (s *Scheduler) Tuesday() *Scheduler { + return s.Weekday(time.Tuesday) +} + +// Wednesday sets the start day as Wednesday +func (s *Scheduler) Wednesday() *Scheduler { + return s.Weekday(time.Wednesday) +} + +// Thursday sets the start day as Thursday +func (s *Scheduler) Thursday() *Scheduler { + return s.Weekday(time.Thursday) +} + +// Friday sets the start day as Friday +func (s *Scheduler) Friday() *Scheduler { + return s.Weekday(time.Friday) +} + +// Saturday sets the start day as Saturday +func (s *Scheduler) Saturday() *Scheduler { + return s.Weekday(time.Saturday) +} + +// Sunday sets the start day as Sunday +func (s *Scheduler) Sunday() *Scheduler { + return s.Weekday(time.Sunday) +} + +func (s *Scheduler) getCurrentJob() *Job { + + if len(s.Jobs()) == 0 { + s.setJobs([]*Job{s.newJob(0)}) + s.jobCreated = true + } + + s.jobsMutex.RLock() + defer s.jobsMutex.RUnlock() + + return s.jobs[len(s.jobs)-1] + +} + +func (s *Scheduler) now() time.Time { + return s.time.Now(s.Location()) +} + +// TagsUnique forces job tags to be unique across the scheduler +// when adding tags with (s *Scheduler) Tag(). +// This does not enforce uniqueness on tags added via +// (j *Job) Tag() +func (s *Scheduler) TagsUnique() { + s.tagsUnique = true +} + +// Job puts the provided job in focus for the purpose +// of making changes to the job with the scheduler chain +// and finalized by calling Update() +func (s *Scheduler) Job(j *Job) *Scheduler { + jobs := s.Jobs() + for index, job := range jobs { + if job == j { + // the current job is always last, so put this job there + s.Swap(len(jobs)-1, index) + } + } + s.updateJob = true + return s +} + +// Update stops the job (if running) and starts it with any updates +// that were made to the job in the scheduler chain. Job() must be +// called first to put the given job in focus. +func (s *Scheduler) Update() (*Job, error) { + job := s.getCurrentJob() + + if !s.updateJob { + return job, wrapOrError(job.error, ErrUpdateCalledWithoutJob) + } + s.updateJob = false + job.stop() + job.ctx, job.cancel = context.WithCancel(context.Background()) + job.setStartsImmediately(false) + + if job.runWithDetails { + return s.DoWithJobDetails(job.function, job.parameters...) + } + + return s.Do(job.function, job.parameters...) +} + +func (s *Scheduler) Cron(cronExpression string) *Scheduler { + return s.cron(cronExpression, false) +} + +func (s *Scheduler) CronWithSeconds(cronExpression string) *Scheduler { + return s.cron(cronExpression, true) +} + +func (s *Scheduler) cron(cronExpression string, withSeconds bool) *Scheduler { + job := s.newJob(0) + if s.updateJob || s.jobCreated { + job = s.getCurrentJob() + } + + var withLocation string + if strings.HasPrefix(cronExpression, "TZ=") || strings.HasPrefix(cronExpression, "CRON_TZ=") { + withLocation = cronExpression + } else { + withLocation = fmt.Sprintf("CRON_TZ=%s %s", s.location.String(), cronExpression) + } + + var ( + cronSchedule cron.Schedule + err error + ) + + if withSeconds { + p := cron.NewParser(cron.Second | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor) + cronSchedule, err = p.Parse(withLocation) + } else { + cronSchedule, err = cron.ParseStandard(withLocation) + } + + if err != nil { + job.error = wrapOrError(err, ErrCronParseFailure) + } + + job.cronSchedule = cronSchedule + job.setUnit(crontab) + job.startsImmediately = false + + if s.updateJob || s.jobCreated { + s.setJobs(append(s.Jobs()[:len(s.Jobs())-1], job)) + s.jobCreated = false + } else { + s.setJobs(append(s.Jobs(), job)) + } + return s +} + +func (s *Scheduler) newJob(interval int) *Job { + return newJob(interval, !s.waitForInterval, s.singletonMode) +} + +// WaitForScheduleAll defaults the scheduler to create all +// new jobs with the WaitForSchedule option as true. +// The jobs will not start immediately but rather will +// wait until their first scheduled interval. +func (s *Scheduler) WaitForScheduleAll() { + s.waitForInterval = true +} + +// WaitForSchedule sets the job to not start immediately +// but rather wait until the first scheduled interval. +func (s *Scheduler) WaitForSchedule() *Scheduler { + job := s.getCurrentJob() + job.startsImmediately = false + return s +} + +// StartImmediately sets the job to run immediately upon +// starting the scheduler or adding the job to a running +// scheduler. This overrides the jobs start status of any +// previously called methods in the chain. +// +// Note: This is the default behavior of the scheduler +// for most jobs, but is useful for overriding the default +// behavior of Cron scheduled jobs which default to +// WaitForSchedule. +func (s *Scheduler) StartImmediately() *Scheduler { + job := s.getCurrentJob() + job.startsImmediately = true + return s +} + +// CustomTime takes an in a struct that implements the TimeWrapper interface +// allowing the caller to mock the time used by the scheduler. This is useful +// for tests relying on gocron. +func (s *Scheduler) CustomTime(customTimeWrapper TimeWrapper) { + s.time = customTimeWrapper +} + +// CustomTimer takes in a function that mirrors the time.AfterFunc +// This is used to mock the time.AfterFunc function used by the scheduler +// for testing long intervals in a short amount of time. +func (s *Scheduler) CustomTimer(customTimer func(d time.Duration, f func()) *time.Timer) { + s.timer = customTimer +} + +func (s *Scheduler) StopBlockingChan() { + s.startBlockingStopChanMutex.Lock() + if s.startBlockingStopChan != nil { + s.startBlockingStopChan <- struct{}{} + } + s.startBlockingStopChanMutex.Unlock() +} diff --git a/vendor/github.com/go-co-op/gocron/timeHelper.go b/vendor/github.com/go-co-op/gocron/timeHelper.go new file mode 100644 index 00000000..487a7a2a --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/timeHelper.go @@ -0,0 +1,33 @@ +package gocron + +import "time" + +var _ TimeWrapper = (*trueTime)(nil) + +// TimeWrapper is an interface that wraps the Now, Sleep, and Unix methods of the time package. +// This allows the library and users to mock the time package for testing. +type TimeWrapper interface { + Now(*time.Location) time.Time + Unix(int64, int64) time.Time + Sleep(time.Duration) +} + +type trueTime struct{} + +func (t *trueTime) Now(location *time.Location) time.Time { + return time.Now().In(location) +} + +func (t *trueTime) Unix(sec int64, nsec int64) time.Time { + return time.Unix(sec, nsec) +} + +func (t *trueTime) Sleep(d time.Duration) { + time.Sleep(d) +} + +// afterFunc proxies the time.AfterFunc function. +// This allows it to be mocked for testing. +func afterFunc(d time.Duration, f func()) *time.Timer { + return time.AfterFunc(d, f) +} diff --git a/vendor/github.com/go-logr/logr/.golangci.yaml b/vendor/github.com/go-logr/logr/.golangci.yaml new file mode 100644 index 00000000..94ff801d --- /dev/null +++ b/vendor/github.com/go-logr/logr/.golangci.yaml @@ -0,0 +1,29 @@ +run: + timeout: 1m + tests: true + +linters: + disable-all: true + enable: + - asciicheck + - deadcode + - errcheck + - forcetypeassert + - gocritic + - gofmt + - goimports + - gosimple + - govet + - ineffassign + - misspell + - revive + - staticcheck + - structcheck + - typecheck + - unused + - varcheck + +issues: + exclude-use-default: false + max-issues-per-linter: 0 + max-same-issues: 10 diff --git a/vendor/github.com/go-logr/logr/CHANGELOG.md b/vendor/github.com/go-logr/logr/CHANGELOG.md new file mode 100644 index 00000000..c3569600 --- /dev/null +++ b/vendor/github.com/go-logr/logr/CHANGELOG.md @@ -0,0 +1,6 @@ +# CHANGELOG + +## v1.0.0-rc1 + +This is the first logged release. Major changes (including breaking changes) +have occurred since earlier tags. diff --git a/vendor/github.com/go-logr/logr/CONTRIBUTING.md b/vendor/github.com/go-logr/logr/CONTRIBUTING.md new file mode 100644 index 00000000..5d37e294 --- /dev/null +++ b/vendor/github.com/go-logr/logr/CONTRIBUTING.md @@ -0,0 +1,17 @@ +# Contributing + +Logr is open to pull-requests, provided they fit within the intended scope of +the project. Specifically, this library aims to be VERY small and minimalist, +with no external dependencies. + +## Compatibility + +This project intends to follow [semantic versioning](http://semver.org) and +is very strict about compatibility. Any proposed changes MUST follow those +rules. + +## Performance + +As a logging library, logr must be as light-weight as possible. Any proposed +code change must include results of running the [benchmark](./benchmark) +before and after the change. diff --git a/vendor/github.com/go-logr/logr/LICENSE b/vendor/github.com/go-logr/logr/LICENSE new file mode 100644 index 00000000..8dada3ed --- /dev/null +++ b/vendor/github.com/go-logr/logr/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md new file mode 100644 index 00000000..ab593118 --- /dev/null +++ b/vendor/github.com/go-logr/logr/README.md @@ -0,0 +1,282 @@ +# A minimal logging API for Go + +[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr) + +logr offers an(other) opinion on how Go programs and libraries can do logging +without becoming coupled to a particular logging implementation. This is not +an implementation of logging - it is an API. In fact it is two APIs with two +different sets of users. + +The `Logger` type is intended for application and library authors. It provides +a relatively small API which can be used everywhere you want to emit logs. It +defers the actual act of writing logs (to files, to stdout, or whatever) to the +`LogSink` interface. + +The `LogSink` interface is intended for logging library implementers. It is a +pure interface which can be implemented by logging frameworks to provide the actual logging +functionality. + +This decoupling allows application and library developers to write code in +terms of `logr.Logger` (which has very low dependency fan-out) while the +implementation of logging is managed "up stack" (e.g. in or near `main()`.) +Application developers can then switch out implementations as necessary. + +Many people assert that libraries should not be logging, and as such efforts +like this are pointless. Those people are welcome to convince the authors of +the tens-of-thousands of libraries that *DO* write logs that they are all +wrong. In the meantime, logr takes a more practical approach. + +## Typical usage + +Somewhere, early in an application's life, it will make a decision about which +logging library (implementation) it actually wants to use. Something like: + +``` + func main() { + // ... other setup code ... + + // Create the "root" logger. We have chosen the "logimpl" implementation, + // which takes some initial parameters and returns a logr.Logger. + logger := logimpl.New(param1, param2) + + // ... other setup code ... +``` + +Most apps will call into other libraries, create structures to govern the flow, +etc. The `logr.Logger` object can be passed to these other libraries, stored +in structs, or even used as a package-global variable, if needed. For example: + +``` + app := createTheAppObject(logger) + app.Run() +``` + +Outside of this early setup, no other packages need to know about the choice of +implementation. They write logs in terms of the `logr.Logger` that they +received: + +``` + type appObject struct { + // ... other fields ... + logger logr.Logger + // ... other fields ... + } + + func (app *appObject) Run() { + app.logger.Info("starting up", "timestamp", time.Now()) + + // ... app code ... +``` + +## Background + +If the Go standard library had defined an interface for logging, this project +probably would not be needed. Alas, here we are. + +### Inspiration + +Before you consider this package, please read [this blog post by the +inimitable Dave Cheney][warning-makes-no-sense]. We really appreciate what +he has to say, and it largely aligns with our own experiences. + +### Differences from Dave's ideas + +The main differences are: + +1. Dave basically proposes doing away with the notion of a logging API in favor +of `fmt.Printf()`. We disagree, especially when you consider things like output +locations, timestamps, file and line decorations, and structured logging. This +package restricts the logging API to just 2 types of logs: info and error. + +Info logs are things you want to tell the user which are not errors. Error +logs are, well, errors. If your code receives an `error` from a subordinate +function call and is logging that `error` *and not returning it*, use error +logs. + +2. Verbosity-levels on info logs. This gives developers a chance to indicate +arbitrary grades of importance for info logs, without assigning names with +semantic meaning such as "warning", "trace", and "debug." Superficially this +may feel very similar, but the primary difference is the lack of semantics. +Because verbosity is a numerical value, it's safe to assume that an app running +with higher verbosity means more (and less important) logs will be generated. + +## Implementations (non-exhaustive) + +There are implementations for the following logging libraries: + +- **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr) +- **a testing.T** (for use in Go tests, with JSON-like output): [testr](https://github.com/go-logr/logr/tree/master/testr) +- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr) +- **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr) +- **a testing.T** (with klog-like text output): [ktesting](https://git.k8s.io/klog/ktesting) +- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr) +- **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr) +- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr) +- **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend) +- **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr) +- **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr) +- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0) +- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing) + +## FAQ + +### Conceptual + +#### Why structured logging? + +- **Structured logs are more easily queryable**: Since you've got + key-value pairs, it's much easier to query your structured logs for + particular values by filtering on the contents of a particular key -- + think searching request logs for error codes, Kubernetes reconcilers for + the name and namespace of the reconciled object, etc. + +- **Structured logging makes it easier to have cross-referenceable logs**: + Similarly to searchability, if you maintain conventions around your + keys, it becomes easy to gather all log lines related to a particular + concept. + +- **Structured logs allow better dimensions of filtering**: if you have + structure to your logs, you've got more precise control over how much + information is logged -- you might choose in a particular configuration + to log certain keys but not others, only log lines where a certain key + matches a certain value, etc., instead of just having v-levels and names + to key off of. + +- **Structured logs better represent structured data**: sometimes, the + data that you want to log is inherently structured (think tuple-link + objects.) Structured logs allow you to preserve that structure when + outputting. + +#### Why V-levels? + +**V-levels give operators an easy way to control the chattiness of log +operations**. V-levels provide a way for a given package to distinguish +the relative importance or verbosity of a given log message. Then, if +a particular logger or package is logging too many messages, the user +of the package can simply change the v-levels for that library. + +#### Why not named levels, like Info/Warning/Error? + +Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences +from Dave's ideas](#differences-from-daves-ideas). + +#### Why not allow format strings, too? + +**Format strings negate many of the benefits of structured logs**: + +- They're not easily searchable without resorting to fuzzy searching, + regular expressions, etc. + +- They don't store structured data well, since contents are flattened into + a string. + +- They're not cross-referenceable. + +- They don't compress easily, since the message is not constant. + +(Unless you turn positional parameters into key-value pairs with numerical +keys, at which point you've gotten key-value logging with meaningless +keys.) + +### Practical + +#### Why key-value pairs, and not a map? + +Key-value pairs are *much* easier to optimize, especially around +allocations. Zap (a structured logger that inspired logr's interface) has +[performance measurements](https://github.com/uber-go/zap#performance) +that show this quite nicely. + +While the interface ends up being a little less obvious, you get +potentially better performance, plus avoid making users type +`map[string]string{}` every time they want to log. + +#### What if my V-levels differ between libraries? + +That's fine. Control your V-levels on a per-logger basis, and use the +`WithName` method to pass different loggers to different libraries. + +Generally, you should take care to ensure that you have relatively +consistent V-levels within a given logger, however, as this makes deciding +on what verbosity of logs to request easier. + +#### But I really want to use a format string! + +That's not actually a question. Assuming your question is "how do +I convert my mental model of logging with format strings to logging with +constant messages": + +1. Figure out what the error actually is, as you'd write in a TL;DR style, + and use that as a message. + +2. For every place you'd write a format specifier, look to the word before + it, and add that as a key value pair. + +For instance, consider the following examples (all taken from spots in the +Kubernetes codebase): + +- `klog.V(4).Infof("Client is returning errors: code %v, error %v", + responseCode, err)` becomes `logger.Error(err, "client returned an + error", "code", responseCode)` + +- `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", + seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after + response when requesting url", "attempt", retries, "after + seconds", seconds, "url", url)` + +If you *really* must use a format string, use it in a key's value, and +call `fmt.Sprintf` yourself. For instance: `log.Printf("unable to +reflect over type %T")` becomes `logger.Info("unable to reflect over +type", "type", fmt.Sprintf("%T"))`. In general though, the cases where +this is necessary should be few and far between. + +#### How do I choose my V-levels? + +This is basically the only hard constraint: increase V-levels to denote +more verbose or more debug-y logs. + +Otherwise, you can start out with `0` as "you always want to see this", +`1` as "common logging that you might *possibly* want to turn off", and +`10` as "I would like to performance-test your log collection stack." + +Then gradually choose levels in between as you need them, working your way +down from 10 (for debug and trace style logs) and up from 1 (for chattier +info-type logs.) + +#### How do I choose my keys? + +Keys are fairly flexible, and can hold more or less any string +value. For best compatibility with implementations and consistency +with existing code in other projects, there are a few conventions you +should consider. + +- Make your keys human-readable. +- Constant keys are generally a good idea. +- Be consistent across your codebase. +- Keys should naturally match parts of the message string. +- Use lower case for simple keys and + [lowerCamelCase](https://en.wiktionary.org/wiki/lowerCamelCase) for + more complex ones. Kubernetes is one example of a project that has + [adopted that + convention](https://github.com/kubernetes/community/blob/HEAD/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments). + +While key names are mostly unrestricted (and spaces are acceptable), +it's generally a good idea to stick to printable ascii characters, or at +least match the general character set of your log lines. + +#### Why should keys be constant values? + +The point of structured logging is to make later log processing easier. Your +keys are, effectively, the schema of each log message. If you use different +keys across instances of the same log line, you will make your structured logs +much harder to use. `Sprintf()` is for values, not for keys! + +#### Why is this not a pure interface? + +The Logger type is implemented as a struct in order to allow the Go compiler to +optimize things like high-V `Info` logs that are not triggered. Not all of +these implementations are implemented yet, but this structure was suggested as +a way to ensure they *can* be implemented. All of the real work is behind the +`LogSink` interface. + +[warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging diff --git a/vendor/github.com/go-logr/logr/discard.go b/vendor/github.com/go-logr/logr/discard.go new file mode 100644 index 00000000..9d92a38f --- /dev/null +++ b/vendor/github.com/go-logr/logr/discard.go @@ -0,0 +1,54 @@ +/* +Copyright 2020 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +// Discard returns a Logger that discards all messages logged to it. It can be +// used whenever the caller is not interested in the logs. Logger instances +// produced by this function always compare as equal. +func Discard() Logger { + return Logger{ + level: 0, + sink: discardLogSink{}, + } +} + +// discardLogSink is a LogSink that discards all messages. +type discardLogSink struct{} + +// Verify that it actually implements the interface +var _ LogSink = discardLogSink{} + +func (l discardLogSink) Init(RuntimeInfo) { +} + +func (l discardLogSink) Enabled(int) bool { + return false +} + +func (l discardLogSink) Info(int, string, ...interface{}) { +} + +func (l discardLogSink) Error(error, string, ...interface{}) { +} + +func (l discardLogSink) WithValues(...interface{}) LogSink { + return l +} + +func (l discardLogSink) WithName(string) LogSink { + return l +} diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go new file mode 100644 index 00000000..7accdb0c --- /dev/null +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -0,0 +1,787 @@ +/* +Copyright 2021 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package funcr implements formatting of structured log messages and +// optionally captures the call site and timestamp. +// +// The simplest way to use it is via its implementation of a +// github.com/go-logr/logr.LogSink with output through an arbitrary +// "write" function. See New and NewJSON for details. +// +// Custom LogSinks +// +// For users who need more control, a funcr.Formatter can be embedded inside +// your own custom LogSink implementation. This is useful when the LogSink +// needs to implement additional methods, for example. +// +// Formatting +// +// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for +// values which are being logged. When rendering a struct, funcr will use Go's +// standard JSON tags (all except "string"). +package funcr + +import ( + "bytes" + "encoding" + "fmt" + "path/filepath" + "reflect" + "runtime" + "strconv" + "strings" + "time" + + "github.com/go-logr/logr" +) + +// New returns a logr.Logger which is implemented by an arbitrary function. +func New(fn func(prefix, args string), opts Options) logr.Logger { + return logr.New(newSink(fn, NewFormatter(opts))) +} + +// NewJSON returns a logr.Logger which is implemented by an arbitrary function +// and produces JSON output. +func NewJSON(fn func(obj string), opts Options) logr.Logger { + fnWrapper := func(_, obj string) { + fn(obj) + } + return logr.New(newSink(fnWrapper, NewFormatterJSON(opts))) +} + +// Underlier exposes access to the underlying logging function. Since +// callers only have a logr.Logger, they have to know which +// implementation is in use, so this interface is less of an +// abstraction and more of a way to test type conversion. +type Underlier interface { + GetUnderlying() func(prefix, args string) +} + +func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink { + l := &fnlogger{ + Formatter: formatter, + write: fn, + } + // For skipping fnlogger.Info and fnlogger.Error. + l.Formatter.AddCallDepth(1) + return l +} + +// Options carries parameters which influence the way logs are generated. +type Options struct { + // LogCaller tells funcr to add a "caller" key to some or all log lines. + // This has some overhead, so some users might not want it. + LogCaller MessageClass + + // LogCallerFunc tells funcr to also log the calling function name. This + // has no effect if caller logging is not enabled (see Options.LogCaller). + LogCallerFunc bool + + // LogTimestamp tells funcr to add a "ts" key to log lines. This has some + // overhead, so some users might not want it. + LogTimestamp bool + + // TimestampFormat tells funcr how to render timestamps when LogTimestamp + // is enabled. If not specified, a default format will be used. For more + // details, see docs for Go's time.Layout. + TimestampFormat string + + // Verbosity tells funcr which V logs to produce. Higher values enable + // more logs. Info logs at or below this level will be written, while logs + // above this level will be discarded. + Verbosity int + + // RenderBuiltinsHook allows users to mutate the list of key-value pairs + // while a log line is being rendered. The kvList argument follows logr + // conventions - each pair of slice elements is comprised of a string key + // and an arbitrary value (verified and sanitized before calling this + // hook). The value returned must follow the same conventions. This hook + // can be used to audit or modify logged data. For example, you might want + // to prefix all of funcr's built-in keys with some string. This hook is + // only called for built-in (provided by funcr itself) key-value pairs. + // Equivalent hooks are offered for key-value pairs saved via + // logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and + // for user-provided pairs (see RenderArgsHook). + RenderBuiltinsHook func(kvList []interface{}) []interface{} + + // RenderValuesHook is the same as RenderBuiltinsHook, except that it is + // only called for key-value pairs saved via logr.Logger.WithValues. See + // RenderBuiltinsHook for more details. + RenderValuesHook func(kvList []interface{}) []interface{} + + // RenderArgsHook is the same as RenderBuiltinsHook, except that it is only + // called for key-value pairs passed directly to Info and Error. See + // RenderBuiltinsHook for more details. + RenderArgsHook func(kvList []interface{}) []interface{} + + // MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct + // that contains a struct, etc.) it may log. Every time it finds a struct, + // slice, array, or map the depth is increased by one. When the maximum is + // reached, the value will be converted to a string indicating that the max + // depth has been exceeded. If this field is not specified, a default + // value will be used. + MaxLogDepth int +} + +// MessageClass indicates which category or categories of messages to consider. +type MessageClass int + +const ( + // None ignores all message classes. + None MessageClass = iota + // All considers all message classes. + All + // Info only considers info messages. + Info + // Error only considers error messages. + Error +) + +// fnlogger inherits some of its LogSink implementation from Formatter +// and just needs to add some glue code. +type fnlogger struct { + Formatter + write func(prefix, args string) +} + +func (l fnlogger) WithName(name string) logr.LogSink { + l.Formatter.AddName(name) + return &l +} + +func (l fnlogger) WithValues(kvList ...interface{}) logr.LogSink { + l.Formatter.AddValues(kvList) + return &l +} + +func (l fnlogger) WithCallDepth(depth int) logr.LogSink { + l.Formatter.AddCallDepth(depth) + return &l +} + +func (l fnlogger) Info(level int, msg string, kvList ...interface{}) { + prefix, args := l.FormatInfo(level, msg, kvList) + l.write(prefix, args) +} + +func (l fnlogger) Error(err error, msg string, kvList ...interface{}) { + prefix, args := l.FormatError(err, msg, kvList) + l.write(prefix, args) +} + +func (l fnlogger) GetUnderlying() func(prefix, args string) { + return l.write +} + +// Assert conformance to the interfaces. +var _ logr.LogSink = &fnlogger{} +var _ logr.CallDepthLogSink = &fnlogger{} +var _ Underlier = &fnlogger{} + +// NewFormatter constructs a Formatter which emits a JSON-like key=value format. +func NewFormatter(opts Options) Formatter { + return newFormatter(opts, outputKeyValue) +} + +// NewFormatterJSON constructs a Formatter which emits strict JSON. +func NewFormatterJSON(opts Options) Formatter { + return newFormatter(opts, outputJSON) +} + +// Defaults for Options. +const defaultTimestampFormat = "2006-01-02 15:04:05.000000" +const defaultMaxLogDepth = 16 + +func newFormatter(opts Options, outfmt outputFormat) Formatter { + if opts.TimestampFormat == "" { + opts.TimestampFormat = defaultTimestampFormat + } + if opts.MaxLogDepth == 0 { + opts.MaxLogDepth = defaultMaxLogDepth + } + f := Formatter{ + outputFormat: outfmt, + prefix: "", + values: nil, + depth: 0, + opts: opts, + } + return f +} + +// Formatter is an opaque struct which can be embedded in a LogSink +// implementation. It should be constructed with NewFormatter. Some of +// its methods directly implement logr.LogSink. +type Formatter struct { + outputFormat outputFormat + prefix string + values []interface{} + valuesStr string + depth int + opts Options +} + +// outputFormat indicates which outputFormat to use. +type outputFormat int + +const ( + // outputKeyValue emits a JSON-like key=value format, but not strict JSON. + outputKeyValue outputFormat = iota + // outputJSON emits strict JSON. + outputJSON +) + +// PseudoStruct is a list of key-value pairs that gets logged as a struct. +type PseudoStruct []interface{} + +// render produces a log line, ready to use. +func (f Formatter) render(builtins, args []interface{}) string { + // Empirically bytes.Buffer is faster than strings.Builder for this. + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + if f.outputFormat == outputJSON { + buf.WriteByte('{') + } + vals := builtins + if hook := f.opts.RenderBuiltinsHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + f.flatten(buf, vals, false, false) // keys are ours, no need to escape + continuing := len(builtins) > 0 + if len(f.valuesStr) > 0 { + if continuing { + if f.outputFormat == outputJSON { + buf.WriteByte(',') + } else { + buf.WriteByte(' ') + } + } + continuing = true + buf.WriteString(f.valuesStr) + } + vals = args + if hook := f.opts.RenderArgsHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + f.flatten(buf, vals, continuing, true) // escape user-provided keys + if f.outputFormat == outputJSON { + buf.WriteByte('}') + } + return buf.String() +} + +// flatten renders a list of key-value pairs into a buffer. If continuing is +// true, it assumes that the buffer has previous values and will emit a +// separator (which depends on the output format) before the first pair it +// writes. If escapeKeys is true, the keys are assumed to have +// non-JSON-compatible characters in them and must be evaluated for escapes. +// +// This function returns a potentially modified version of kvList, which +// ensures that there is a value for every key (adding a value if needed) and +// that each key is a string (substituting a key if needed). +func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing bool, escapeKeys bool) []interface{} { + // This logic overlaps with sanitize() but saves one type-cast per key, + // which can be measurable. + if len(kvList)%2 != 0 { + kvList = append(kvList, noValue) + } + for i := 0; i < len(kvList); i += 2 { + k, ok := kvList[i].(string) + if !ok { + k = f.nonStringKey(kvList[i]) + kvList[i] = k + } + v := kvList[i+1] + + if i > 0 || continuing { + if f.outputFormat == outputJSON { + buf.WriteByte(',') + } else { + // In theory the format could be something we don't understand. In + // practice, we control it, so it won't be. + buf.WriteByte(' ') + } + } + + if escapeKeys { + buf.WriteString(prettyString(k)) + } else { + // this is faster + buf.WriteByte('"') + buf.WriteString(k) + buf.WriteByte('"') + } + if f.outputFormat == outputJSON { + buf.WriteByte(':') + } else { + buf.WriteByte('=') + } + buf.WriteString(f.pretty(v)) + } + return kvList +} + +func (f Formatter) pretty(value interface{}) string { + return f.prettyWithFlags(value, 0, 0) +} + +const ( + flagRawStruct = 0x1 // do not print braces on structs +) + +// TODO: This is not fast. Most of the overhead goes here. +func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string { + if depth > f.opts.MaxLogDepth { + return `""` + } + + // Handle types that take full control of logging. + if v, ok := value.(logr.Marshaler); ok { + // Replace the value with what the type wants to get logged. + // That then gets handled below via reflection. + value = invokeMarshaler(v) + } + + // Handle types that want to format themselves. + switch v := value.(type) { + case fmt.Stringer: + value = invokeStringer(v) + case error: + value = invokeError(v) + } + + // Handling the most common types without reflect is a small perf win. + switch v := value.(type) { + case bool: + return strconv.FormatBool(v) + case string: + return prettyString(v) + case int: + return strconv.FormatInt(int64(v), 10) + case int8: + return strconv.FormatInt(int64(v), 10) + case int16: + return strconv.FormatInt(int64(v), 10) + case int32: + return strconv.FormatInt(int64(v), 10) + case int64: + return strconv.FormatInt(int64(v), 10) + case uint: + return strconv.FormatUint(uint64(v), 10) + case uint8: + return strconv.FormatUint(uint64(v), 10) + case uint16: + return strconv.FormatUint(uint64(v), 10) + case uint32: + return strconv.FormatUint(uint64(v), 10) + case uint64: + return strconv.FormatUint(v, 10) + case uintptr: + return strconv.FormatUint(uint64(v), 10) + case float32: + return strconv.FormatFloat(float64(v), 'f', -1, 32) + case float64: + return strconv.FormatFloat(v, 'f', -1, 64) + case complex64: + return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"` + case complex128: + return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"` + case PseudoStruct: + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + v = f.sanitize(v) + if flags&flagRawStruct == 0 { + buf.WriteByte('{') + } + for i := 0; i < len(v); i += 2 { + if i > 0 { + buf.WriteByte(',') + } + k, _ := v[i].(string) // sanitize() above means no need to check success + // arbitrary keys might need escaping + buf.WriteString(prettyString(k)) + buf.WriteByte(':') + buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1)) + } + if flags&flagRawStruct == 0 { + buf.WriteByte('}') + } + return buf.String() + } + + buf := bytes.NewBuffer(make([]byte, 0, 256)) + t := reflect.TypeOf(value) + if t == nil { + return "null" + } + v := reflect.ValueOf(value) + switch t.Kind() { + case reflect.Bool: + return strconv.FormatBool(v.Bool()) + case reflect.String: + return prettyString(v.String()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(int64(v.Int()), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return strconv.FormatUint(uint64(v.Uint()), 10) + case reflect.Float32: + return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32) + case reflect.Float64: + return strconv.FormatFloat(v.Float(), 'f', -1, 64) + case reflect.Complex64: + return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"` + case reflect.Complex128: + return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"` + case reflect.Struct: + if flags&flagRawStruct == 0 { + buf.WriteByte('{') + } + for i := 0; i < t.NumField(); i++ { + fld := t.Field(i) + if fld.PkgPath != "" { + // reflect says this field is only defined for non-exported fields. + continue + } + if !v.Field(i).CanInterface() { + // reflect isn't clear exactly what this means, but we can't use it. + continue + } + name := "" + omitempty := false + if tag, found := fld.Tag.Lookup("json"); found { + if tag == "-" { + continue + } + if comma := strings.Index(tag, ","); comma != -1 { + if n := tag[:comma]; n != "" { + name = n + } + rest := tag[comma:] + if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") { + omitempty = true + } + } else { + name = tag + } + } + if omitempty && isEmpty(v.Field(i)) { + continue + } + if i > 0 { + buf.WriteByte(',') + } + if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" { + buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1)) + continue + } + if name == "" { + name = fld.Name + } + // field names can't contain characters which need escaping + buf.WriteByte('"') + buf.WriteString(name) + buf.WriteByte('"') + buf.WriteByte(':') + buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1)) + } + if flags&flagRawStruct == 0 { + buf.WriteByte('}') + } + return buf.String() + case reflect.Slice, reflect.Array: + buf.WriteByte('[') + for i := 0; i < v.Len(); i++ { + if i > 0 { + buf.WriteByte(',') + } + e := v.Index(i) + buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1)) + } + buf.WriteByte(']') + return buf.String() + case reflect.Map: + buf.WriteByte('{') + // This does not sort the map keys, for best perf. + it := v.MapRange() + i := 0 + for it.Next() { + if i > 0 { + buf.WriteByte(',') + } + // If a map key supports TextMarshaler, use it. + keystr := "" + if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok { + txt, err := m.MarshalText() + if err != nil { + keystr = fmt.Sprintf("", err.Error()) + } else { + keystr = string(txt) + } + keystr = prettyString(keystr) + } else { + // prettyWithFlags will produce already-escaped values + keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1) + if t.Key().Kind() != reflect.String { + // JSON only does string keys. Unlike Go's standard JSON, we'll + // convert just about anything to a string. + keystr = prettyString(keystr) + } + } + buf.WriteString(keystr) + buf.WriteByte(':') + buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1)) + i++ + } + buf.WriteByte('}') + return buf.String() + case reflect.Ptr, reflect.Interface: + if v.IsNil() { + return "null" + } + return f.prettyWithFlags(v.Elem().Interface(), 0, depth) + } + return fmt.Sprintf(`""`, t.Kind().String()) +} + +func prettyString(s string) string { + // Avoid escaping (which does allocations) if we can. + if needsEscape(s) { + return strconv.Quote(s) + } + b := bytes.NewBuffer(make([]byte, 0, 1024)) + b.WriteByte('"') + b.WriteString(s) + b.WriteByte('"') + return b.String() +} + +// needsEscape determines whether the input string needs to be escaped or not, +// without doing any allocations. +func needsEscape(s string) bool { + for _, r := range s { + if !strconv.IsPrint(r) || r == '\\' || r == '"' { + return true + } + } + return false +} + +func isEmpty(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Complex64, reflect.Complex128: + return v.Complex() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func invokeMarshaler(m logr.Marshaler) (ret interface{}) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return m.MarshalLog() +} + +func invokeStringer(s fmt.Stringer) (ret string) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return s.String() +} + +func invokeError(e error) (ret string) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return e.Error() +} + +// Caller represents the original call site for a log line, after considering +// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and +// Line fields will always be provided, while the Func field is optional. +// Users can set the render hook fields in Options to examine logged key-value +// pairs, one of which will be {"caller", Caller} if the Options.LogCaller +// field is enabled for the given MessageClass. +type Caller struct { + // File is the basename of the file for this call site. + File string `json:"file"` + // Line is the line number in the file for this call site. + Line int `json:"line"` + // Func is the function name for this call site, or empty if + // Options.LogCallerFunc is not enabled. + Func string `json:"function,omitempty"` +} + +func (f Formatter) caller() Caller { + // +1 for this frame, +1 for Info/Error. + pc, file, line, ok := runtime.Caller(f.depth + 2) + if !ok { + return Caller{"", 0, ""} + } + fn := "" + if f.opts.LogCallerFunc { + if fp := runtime.FuncForPC(pc); fp != nil { + fn = fp.Name() + } + } + + return Caller{filepath.Base(file), line, fn} +} + +const noValue = "" + +func (f Formatter) nonStringKey(v interface{}) string { + return fmt.Sprintf("", f.snippet(v)) +} + +// snippet produces a short snippet string of an arbitrary value. +func (f Formatter) snippet(v interface{}) string { + const snipLen = 16 + + snip := f.pretty(v) + if len(snip) > snipLen { + snip = snip[:snipLen] + } + return snip +} + +// sanitize ensures that a list of key-value pairs has a value for every key +// (adding a value if needed) and that each key is a string (substituting a key +// if needed). +func (f Formatter) sanitize(kvList []interface{}) []interface{} { + if len(kvList)%2 != 0 { + kvList = append(kvList, noValue) + } + for i := 0; i < len(kvList); i += 2 { + _, ok := kvList[i].(string) + if !ok { + kvList[i] = f.nonStringKey(kvList[i]) + } + } + return kvList +} + +// Init configures this Formatter from runtime info, such as the call depth +// imposed by logr itself. +// Note that this receiver is a pointer, so depth can be saved. +func (f *Formatter) Init(info logr.RuntimeInfo) { + f.depth += info.CallDepth +} + +// Enabled checks whether an info message at the given level should be logged. +func (f Formatter) Enabled(level int) bool { + return level <= f.opts.Verbosity +} + +// GetDepth returns the current depth of this Formatter. This is useful for +// implementations which do their own caller attribution. +func (f Formatter) GetDepth() int { + return f.depth +} + +// FormatInfo renders an Info log message into strings. The prefix will be +// empty when no names were set (via AddNames), or when the output is +// configured for JSON. +func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (prefix, argsStr string) { + args := make([]interface{}, 0, 64) // using a constant here impacts perf + prefix = f.prefix + if f.outputFormat == outputJSON { + args = append(args, "logger", prefix) + prefix = "" + } + if f.opts.LogTimestamp { + args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) + } + if policy := f.opts.LogCaller; policy == All || policy == Info { + args = append(args, "caller", f.caller()) + } + args = append(args, "level", level, "msg", msg) + return prefix, f.render(args, kvList) +} + +// FormatError renders an Error log message into strings. The prefix will be +// empty when no names were set (via AddNames), or when the output is +// configured for JSON. +func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (prefix, argsStr string) { + args := make([]interface{}, 0, 64) // using a constant here impacts perf + prefix = f.prefix + if f.outputFormat == outputJSON { + args = append(args, "logger", prefix) + prefix = "" + } + if f.opts.LogTimestamp { + args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) + } + if policy := f.opts.LogCaller; policy == All || policy == Error { + args = append(args, "caller", f.caller()) + } + args = append(args, "msg", msg) + var loggableErr interface{} + if err != nil { + loggableErr = err.Error() + } + args = append(args, "error", loggableErr) + return f.prefix, f.render(args, kvList) +} + +// AddName appends the specified name. funcr uses '/' characters to separate +// name elements. Callers should not pass '/' in the provided name string, but +// this library does not actually enforce that. +func (f *Formatter) AddName(name string) { + if len(f.prefix) > 0 { + f.prefix += "/" + } + f.prefix += name +} + +// AddValues adds key-value pairs to the set of saved values to be logged with +// each log line. +func (f *Formatter) AddValues(kvList []interface{}) { + // Three slice args forces a copy. + n := len(f.values) + f.values = append(f.values[:n:n], kvList...) + + vals := f.values + if hook := f.opts.RenderValuesHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + + // Pre-render values, so we don't have to do it on each Info/Error call. + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + f.flatten(buf, vals, false, true) // escape user-provided keys + f.valuesStr = buf.String() +} + +// AddCallDepth increases the number of stack-frames to skip when attributing +// the log line to a file and line. +func (f *Formatter) AddCallDepth(depth int) { + f.depth += depth +} diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go new file mode 100644 index 00000000..c3b56b3d --- /dev/null +++ b/vendor/github.com/go-logr/logr/logr.go @@ -0,0 +1,510 @@ +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This design derives from Dave Cheney's blog: +// http://dave.cheney.net/2015/11/05/lets-talk-about-logging + +// Package logr defines a general-purpose logging API and abstract interfaces +// to back that API. Packages in the Go ecosystem can depend on this package, +// while callers can implement logging with whatever backend is appropriate. +// +// Usage +// +// Logging is done using a Logger instance. Logger is a concrete type with +// methods, which defers the actual logging to a LogSink interface. The main +// methods of Logger are Info() and Error(). Arguments to Info() and Error() +// are key/value pairs rather than printf-style formatted strings, emphasizing +// "structured logging". +// +// With Go's standard log package, we might write: +// log.Printf("setting target value %s", targetValue) +// +// With logr's structured logging, we'd write: +// logger.Info("setting target", "value", targetValue) +// +// Errors are much the same. Instead of: +// log.Printf("failed to open the pod bay door for user %s: %v", user, err) +// +// We'd write: +// logger.Error(err, "failed to open the pod bay door", "user", user) +// +// Info() and Error() are very similar, but they are separate methods so that +// LogSink implementations can choose to do things like attach additional +// information (such as stack traces) on calls to Error(). Error() messages are +// always logged, regardless of the current verbosity. If there is no error +// instance available, passing nil is valid. +// +// Verbosity +// +// Often we want to log information only when the application in "verbose +// mode". To write log lines that are more verbose, Logger has a V() method. +// The higher the V-level of a log line, the less critical it is considered. +// Log-lines with V-levels that are not enabled (as per the LogSink) will not +// be written. Level V(0) is the default, and logger.V(0).Info() has the same +// meaning as logger.Info(). Negative V-levels have the same meaning as V(0). +// Error messages do not have a verbosity level and are always logged. +// +// Where we might have written: +// if flVerbose >= 2 { +// log.Printf("an unusual thing happened") +// } +// +// We can write: +// logger.V(2).Info("an unusual thing happened") +// +// Logger Names +// +// Logger instances can have name strings so that all messages logged through +// that instance have additional context. For example, you might want to add +// a subsystem name: +// +// logger.WithName("compactor").Info("started", "time", time.Now()) +// +// The WithName() method returns a new Logger, which can be passed to +// constructors or other functions for further use. Repeated use of WithName() +// will accumulate name "segments". These name segments will be joined in some +// way by the LogSink implementation. It is strongly recommended that name +// segments contain simple identifiers (letters, digits, and hyphen), and do +// not contain characters that could muddle the log output or confuse the +// joining operation (e.g. whitespace, commas, periods, slashes, brackets, +// quotes, etc). +// +// Saved Values +// +// Logger instances can store any number of key/value pairs, which will be +// logged alongside all messages logged through that instance. For example, +// you might want to create a Logger instance per managed object: +// +// With the standard log package, we might write: +// log.Printf("decided to set field foo to value %q for object %s/%s", +// targetValue, object.Namespace, object.Name) +// +// With logr we'd write: +// // Elsewhere: set up the logger to log the object name. +// obj.logger = mainLogger.WithValues( +// "name", obj.name, "namespace", obj.namespace) +// +// // later on... +// obj.logger.Info("setting foo", "value", targetValue) +// +// Best Practices +// +// Logger has very few hard rules, with the goal that LogSink implementations +// might have a lot of freedom to differentiate. There are, however, some +// things to consider. +// +// The log message consists of a constant message attached to the log line. +// This should generally be a simple description of what's occurring, and should +// never be a format string. Variable information can then be attached using +// named values. +// +// Keys are arbitrary strings, but should generally be constant values. Values +// may be any Go value, but how the value is formatted is determined by the +// LogSink implementation. +// +// Logger instances are meant to be passed around by value. Code that receives +// such a value can call its methods without having to check whether the +// instance is ready for use. +// +// Calling methods with the null logger (Logger{}) as instance will crash +// because it has no LogSink. Therefore this null logger should never be passed +// around. For cases where passing a logger is optional, a pointer to Logger +// should be used. +// +// Key Naming Conventions +// +// Keys are not strictly required to conform to any specification or regex, but +// it is recommended that they: +// * be human-readable and meaningful (not auto-generated or simple ordinals) +// * be constant (not dependent on input data) +// * contain only printable characters +// * not contain whitespace or punctuation +// * use lower case for simple keys and lowerCamelCase for more complex ones +// +// These guidelines help ensure that log data is processed properly regardless +// of the log implementation. For example, log implementations will try to +// output JSON data or will store data for later database (e.g. SQL) queries. +// +// While users are generally free to use key names of their choice, it's +// generally best to avoid using the following keys, as they're frequently used +// by implementations: +// * "caller": the calling information (file/line) of a particular log line +// * "error": the underlying error value in the `Error` method +// * "level": the log level +// * "logger": the name of the associated logger +// * "msg": the log message +// * "stacktrace": the stack trace associated with a particular log line or +// error (often from the `Error` message) +// * "ts": the timestamp for a log line +// +// Implementations are encouraged to make use of these keys to represent the +// above concepts, when necessary (for example, in a pure-JSON output form, it +// would be necessary to represent at least message and timestamp as ordinary +// named values). +// +// Break Glass +// +// Implementations may choose to give callers access to the underlying +// logging implementation. The recommended pattern for this is: +// // Underlier exposes access to the underlying logging implementation. +// // Since callers only have a logr.Logger, they have to know which +// // implementation is in use, so this interface is less of an abstraction +// // and more of way to test type conversion. +// type Underlier interface { +// GetUnderlying() +// } +// +// Logger grants access to the sink to enable type assertions like this: +// func DoSomethingWithImpl(log logr.Logger) { +// if underlier, ok := log.GetSink()(impl.Underlier) { +// implLogger := underlier.GetUnderlying() +// ... +// } +// } +// +// Custom `With*` functions can be implemented by copying the complete +// Logger struct and replacing the sink in the copy: +// // WithFooBar changes the foobar parameter in the log sink and returns a +// // new logger with that modified sink. It does nothing for loggers where +// // the sink doesn't support that parameter. +// func WithFoobar(log logr.Logger, foobar int) logr.Logger { +// if foobarLogSink, ok := log.GetSink()(FoobarSink); ok { +// log = log.WithSink(foobarLogSink.WithFooBar(foobar)) +// } +// return log +// } +// +// Don't use New to construct a new Logger with a LogSink retrieved from an +// existing Logger. Source code attribution might not work correctly and +// unexported fields in Logger get lost. +// +// Beware that the same LogSink instance may be shared by different logger +// instances. Calling functions that modify the LogSink will affect all of +// those. +package logr + +import ( + "context" +) + +// New returns a new Logger instance. This is primarily used by libraries +// implementing LogSink, rather than end users. +func New(sink LogSink) Logger { + logger := Logger{} + logger.setSink(sink) + sink.Init(runtimeInfo) + return logger +} + +// setSink stores the sink and updates any related fields. It mutates the +// logger and thus is only safe to use for loggers that are not currently being +// used concurrently. +func (l *Logger) setSink(sink LogSink) { + l.sink = sink +} + +// GetSink returns the stored sink. +func (l Logger) GetSink() LogSink { + return l.sink +} + +// WithSink returns a copy of the logger with the new sink. +func (l Logger) WithSink(sink LogSink) Logger { + l.setSink(sink) + return l +} + +// Logger is an interface to an abstract logging implementation. This is a +// concrete type for performance reasons, but all the real work is passed on to +// a LogSink. Implementations of LogSink should provide their own constructors +// that return Logger, not LogSink. +// +// The underlying sink can be accessed through GetSink and be modified through +// WithSink. This enables the implementation of custom extensions (see "Break +// Glass" in the package documentation). Normally the sink should be used only +// indirectly. +type Logger struct { + sink LogSink + level int +} + +// Enabled tests whether this Logger is enabled. For example, commandline +// flags might be used to set the logging verbosity and disable some info logs. +func (l Logger) Enabled() bool { + return l.sink.Enabled(l.level) +} + +// Info logs a non-error message with the given key/value pairs as context. +// +// The msg argument should be used to add some constant description to the log +// line. The key/value pairs can then be used to add additional variable +// information. The key/value pairs must alternate string keys and arbitrary +// values. +func (l Logger) Info(msg string, keysAndValues ...interface{}) { + if l.Enabled() { + if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { + withHelper.GetCallStackHelper()() + } + l.sink.Info(l.level, msg, keysAndValues...) + } +} + +// Error logs an error, with the given message and key/value pairs as context. +// It functions similarly to Info, but may have unique behavior, and should be +// preferred for logging errors (see the package documentations for more +// information). The log message will always be emitted, regardless of +// verbosity level. +// +// The msg argument should be used to add context to any underlying error, +// while the err argument should be used to attach the actual error that +// triggered this log line, if present. The err parameter is optional +// and nil may be passed instead of an error instance. +func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { + if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { + withHelper.GetCallStackHelper()() + } + l.sink.Error(err, msg, keysAndValues...) +} + +// V returns a new Logger instance for a specific verbosity level, relative to +// this Logger. In other words, V-levels are additive. A higher verbosity +// level means a log message is less important. Negative V-levels are treated +// as 0. +func (l Logger) V(level int) Logger { + if level < 0 { + level = 0 + } + l.level += level + return l +} + +// WithValues returns a new Logger instance with additional key/value pairs. +// See Info for documentation on how key/value pairs work. +func (l Logger) WithValues(keysAndValues ...interface{}) Logger { + l.setSink(l.sink.WithValues(keysAndValues...)) + return l +} + +// WithName returns a new Logger instance with the specified name element added +// to the Logger's name. Successive calls with WithName append additional +// suffixes to the Logger's name. It's strongly recommended that name segments +// contain only letters, digits, and hyphens (see the package documentation for +// more information). +func (l Logger) WithName(name string) Logger { + l.setSink(l.sink.WithName(name)) + return l +} + +// WithCallDepth returns a Logger instance that offsets the call stack by the +// specified number of frames when logging call site information, if possible. +// This is useful for users who have helper functions between the "real" call +// site and the actual calls to Logger methods. If depth is 0 the attribution +// should be to the direct caller of this function. If depth is 1 the +// attribution should skip 1 call frame, and so on. Successive calls to this +// are additive. +// +// If the underlying log implementation supports a WithCallDepth(int) method, +// it will be called and the result returned. If the implementation does not +// support CallDepthLogSink, the original Logger will be returned. +// +// To skip one level, WithCallStackHelper() should be used instead of +// WithCallDepth(1) because it works with implementions that support the +// CallDepthLogSink and/or CallStackHelperLogSink interfaces. +func (l Logger) WithCallDepth(depth int) Logger { + if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { + l.setSink(withCallDepth.WithCallDepth(depth)) + } + return l +} + +// WithCallStackHelper returns a new Logger instance that skips the direct +// caller when logging call site information, if possible. This is useful for +// users who have helper functions between the "real" call site and the actual +// calls to Logger methods and want to support loggers which depend on marking +// each individual helper function, like loggers based on testing.T. +// +// In addition to using that new logger instance, callers also must call the +// returned function. +// +// If the underlying log implementation supports a WithCallDepth(int) method, +// WithCallDepth(1) will be called to produce a new logger. If it supports a +// WithCallStackHelper() method, that will be also called. If the +// implementation does not support either of these, the original Logger will be +// returned. +func (l Logger) WithCallStackHelper() (func(), Logger) { + var helper func() + if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { + l.setSink(withCallDepth.WithCallDepth(1)) + } + if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { + helper = withHelper.GetCallStackHelper() + } else { + helper = func() {} + } + return helper, l +} + +// contextKey is how we find Loggers in a context.Context. +type contextKey struct{} + +// FromContext returns a Logger from ctx or an error if no Logger is found. +func FromContext(ctx context.Context) (Logger, error) { + if v, ok := ctx.Value(contextKey{}).(Logger); ok { + return v, nil + } + + return Logger{}, notFoundError{} +} + +// notFoundError exists to carry an IsNotFound method. +type notFoundError struct{} + +func (notFoundError) Error() string { + return "no logr.Logger was present" +} + +func (notFoundError) IsNotFound() bool { + return true +} + +// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this +// returns a Logger that discards all log messages. +func FromContextOrDiscard(ctx context.Context) Logger { + if v, ok := ctx.Value(contextKey{}).(Logger); ok { + return v + } + + return Discard() +} + +// NewContext returns a new Context, derived from ctx, which carries the +// provided Logger. +func NewContext(ctx context.Context, logger Logger) context.Context { + return context.WithValue(ctx, contextKey{}, logger) +} + +// RuntimeInfo holds information that the logr "core" library knows which +// LogSinks might want to know. +type RuntimeInfo struct { + // CallDepth is the number of call frames the logr library adds between the + // end-user and the LogSink. LogSink implementations which choose to print + // the original logging site (e.g. file & line) should climb this many + // additional frames to find it. + CallDepth int +} + +// runtimeInfo is a static global. It must not be changed at run time. +var runtimeInfo = RuntimeInfo{ + CallDepth: 1, +} + +// LogSink represents a logging implementation. End-users will generally not +// interact with this type. +type LogSink interface { + // Init receives optional information about the logr library for LogSink + // implementations that need it. + Init(info RuntimeInfo) + + // Enabled tests whether this LogSink is enabled at the specified V-level. + // For example, commandline flags might be used to set the logging + // verbosity and disable some info logs. + Enabled(level int) bool + + // Info logs a non-error message with the given key/value pairs as context. + // The level argument is provided for optional logging. This method will + // only be called when Enabled(level) is true. See Logger.Info for more + // details. + Info(level int, msg string, keysAndValues ...interface{}) + + // Error logs an error, with the given message and key/value pairs as + // context. See Logger.Error for more details. + Error(err error, msg string, keysAndValues ...interface{}) + + // WithValues returns a new LogSink with additional key/value pairs. See + // Logger.WithValues for more details. + WithValues(keysAndValues ...interface{}) LogSink + + // WithName returns a new LogSink with the specified name appended. See + // Logger.WithName for more details. + WithName(name string) LogSink +} + +// CallDepthLogSink represents a Logger that knows how to climb the call stack +// to identify the original call site and can offset the depth by a specified +// number of frames. This is useful for users who have helper functions +// between the "real" call site and the actual calls to Logger methods. +// Implementations that log information about the call site (such as file, +// function, or line) would otherwise log information about the intermediate +// helper functions. +// +// This is an optional interface and implementations are not required to +// support it. +type CallDepthLogSink interface { + // WithCallDepth returns a LogSink that will offset the call + // stack by the specified number of frames when logging call + // site information. + // + // If depth is 0, the LogSink should skip exactly the number + // of call frames defined in RuntimeInfo.CallDepth when Info + // or Error are called, i.e. the attribution should be to the + // direct caller of Logger.Info or Logger.Error. + // + // If depth is 1 the attribution should skip 1 call frame, and so on. + // Successive calls to this are additive. + WithCallDepth(depth int) LogSink +} + +// CallStackHelperLogSink represents a Logger that knows how to climb +// the call stack to identify the original call site and can skip +// intermediate helper functions if they mark themselves as +// helper. Go's testing package uses that approach. +// +// This is useful for users who have helper functions between the +// "real" call site and the actual calls to Logger methods. +// Implementations that log information about the call site (such as +// file, function, or line) would otherwise log information about the +// intermediate helper functions. +// +// This is an optional interface and implementations are not required +// to support it. Implementations that choose to support this must not +// simply implement it as WithCallDepth(1), because +// Logger.WithCallStackHelper will call both methods if they are +// present. This should only be implemented for LogSinks that actually +// need it, as with testing.T. +type CallStackHelperLogSink interface { + // GetCallStackHelper returns a function that must be called + // to mark the direct caller as helper function when logging + // call site information. + GetCallStackHelper() func() +} + +// Marshaler is an optional interface that logged values may choose to +// implement. Loggers with structured output, such as JSON, should +// log the object return by the MarshalLog method instead of the +// original value. +type Marshaler interface { + // MarshalLog can be used to: + // - ensure that structs are not logged as strings when the original + // value has a String method: return a different type without a + // String method + // - select which fields of a complex type should get logged: + // return a simpler struct with fewer fields + // - log unexported fields: return a different struct + // with exported fields + // + // It may return any value of any type. + MarshalLog() interface{} +} diff --git a/vendor/github.com/go-logr/stdr/LICENSE b/vendor/github.com/go-logr/stdr/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/go-logr/stdr/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-logr/stdr/README.md b/vendor/github.com/go-logr/stdr/README.md new file mode 100644 index 00000000..51586678 --- /dev/null +++ b/vendor/github.com/go-logr/stdr/README.md @@ -0,0 +1,6 @@ +# Minimal Go logging using logr and Go's standard library + +[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/stdr.svg)](https://pkg.go.dev/github.com/go-logr/stdr) + +This package implements the [logr interface](https://github.com/go-logr/logr) +in terms of Go's standard log package(https://pkg.go.dev/log). diff --git a/vendor/github.com/go-logr/stdr/stdr.go b/vendor/github.com/go-logr/stdr/stdr.go new file mode 100644 index 00000000..93a8aab5 --- /dev/null +++ b/vendor/github.com/go-logr/stdr/stdr.go @@ -0,0 +1,170 @@ +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package stdr implements github.com/go-logr/logr.Logger in terms of +// Go's standard log package. +package stdr + +import ( + "log" + "os" + + "github.com/go-logr/logr" + "github.com/go-logr/logr/funcr" +) + +// The global verbosity level. See SetVerbosity(). +var globalVerbosity int + +// SetVerbosity sets the global level against which all info logs will be +// compared. If this is greater than or equal to the "V" of the logger, the +// message will be logged. A higher value here means more logs will be written. +// The previous verbosity value is returned. This is not concurrent-safe - +// callers must be sure to call it from only one goroutine. +func SetVerbosity(v int) int { + old := globalVerbosity + globalVerbosity = v + return old +} + +// New returns a logr.Logger which is implemented by Go's standard log package, +// or something like it. If std is nil, this will use a default logger +// instead. +// +// Example: stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) +func New(std StdLogger) logr.Logger { + return NewWithOptions(std, Options{}) +} + +// NewWithOptions returns a logr.Logger which is implemented by Go's standard +// log package, or something like it. See New for details. +func NewWithOptions(std StdLogger, opts Options) logr.Logger { + if std == nil { + // Go's log.Default() is only available in 1.16 and higher. + std = log.New(os.Stderr, "", log.LstdFlags) + } + + if opts.Depth < 0 { + opts.Depth = 0 + } + + fopts := funcr.Options{ + LogCaller: funcr.MessageClass(opts.LogCaller), + } + + sl := &logger{ + Formatter: funcr.NewFormatter(fopts), + std: std, + } + + // For skipping our own logger.Info/Error. + sl.Formatter.AddCallDepth(1 + opts.Depth) + + return logr.New(sl) +} + +// Options carries parameters which influence the way logs are generated. +type Options struct { + // Depth biases the assumed number of call frames to the "true" caller. + // This is useful when the calling code calls a function which then calls + // stdr (e.g. a logging shim to another API). Values less than zero will + // be treated as zero. + Depth int + + // LogCaller tells stdr to add a "caller" key to some or all log lines. + // Go's log package has options to log this natively, too. + LogCaller MessageClass + + // TODO: add an option to log the date/time +} + +// MessageClass indicates which category or categories of messages to consider. +type MessageClass int + +const ( + // None ignores all message classes. + None MessageClass = iota + // All considers all message classes. + All + // Info only considers info messages. + Info + // Error only considers error messages. + Error +) + +// StdLogger is the subset of the Go stdlib log.Logger API that is needed for +// this adapter. +type StdLogger interface { + // Output is the same as log.Output and log.Logger.Output. + Output(calldepth int, logline string) error +} + +type logger struct { + funcr.Formatter + std StdLogger +} + +var _ logr.LogSink = &logger{} +var _ logr.CallDepthLogSink = &logger{} + +func (l logger) Enabled(level int) bool { + return globalVerbosity >= level +} + +func (l logger) Info(level int, msg string, kvList ...interface{}) { + prefix, args := l.FormatInfo(level, msg, kvList) + if prefix != "" { + args = prefix + ": " + args + } + _ = l.std.Output(l.Formatter.GetDepth()+1, args) +} + +func (l logger) Error(err error, msg string, kvList ...interface{}) { + prefix, args := l.FormatError(err, msg, kvList) + if prefix != "" { + args = prefix + ": " + args + } + _ = l.std.Output(l.Formatter.GetDepth()+1, args) +} + +func (l logger) WithName(name string) logr.LogSink { + l.Formatter.AddName(name) + return &l +} + +func (l logger) WithValues(kvList ...interface{}) logr.LogSink { + l.Formatter.AddValues(kvList) + return &l +} + +func (l logger) WithCallDepth(depth int) logr.LogSink { + l.Formatter.AddCallDepth(depth) + return &l +} + +// Underlier exposes access to the underlying logging implementation. Since +// callers only have a logr.Logger, they have to know which implementation is +// in use, so this interface is less of an abstraction and more of way to test +// type conversion. +type Underlier interface { + GetUnderlying() StdLogger +} + +// GetUnderlying returns the StdLogger underneath this logger. Since StdLogger +// is itself an interface, the result may or may not be a Go log.Logger. +func (l logger) GetUnderlying() StdLogger { + return l.std +} diff --git a/vendor/github.com/gogf/gf/v2/LICENSE b/vendor/github.com/gogf/gf/v2/LICENSE new file mode 100644 index 00000000..0c20e2aa --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 john@goframe.org https://goframe.org + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/gogf/gf/v2/container/garray/garray.go b/vendor/github.com/gogf/gf/v2/container/garray/garray.go new file mode 100644 index 00000000..08e9ece8 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/garray/garray.go @@ -0,0 +1,8 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package garray provides most commonly used array containers which also support concurrent-safe/unsafe switch feature. +package garray diff --git a/vendor/github.com/gogf/gf/v2/container/garray/garray_func.go b/vendor/github.com/gogf/gf/v2/container/garray/garray_func.go new file mode 100644 index 00000000..155cca0d --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/garray/garray_func.go @@ -0,0 +1,69 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package garray + +import "strings" + +// defaultComparatorInt for int comparison. +func defaultComparatorInt(a, b int) int { + if a < b { + return -1 + } + if a > b { + return 1 + } + return 0 +} + +// defaultComparatorStr for string comparison. +func defaultComparatorStr(a, b string) int { + return strings.Compare(a, b) +} + +// quickSortInt is the quick-sorting algorithm implements for int. +func quickSortInt(values []int, comparator func(a, b int) int) { + if len(values) <= 1 { + return + } + mid, i := values[0], 1 + head, tail := 0, len(values)-1 + for head < tail { + if comparator(values[i], mid) > 0 { + values[i], values[tail] = values[tail], values[i] + tail-- + } else { + values[i], values[head] = values[head], values[i] + head++ + i++ + } + } + values[head] = mid + quickSortInt(values[:head], comparator) + quickSortInt(values[head+1:], comparator) +} + +// quickSortStr is the quick-sorting algorithm implements for string. +func quickSortStr(values []string, comparator func(a, b string) int) { + if len(values) <= 1 { + return + } + mid, i := values[0], 1 + head, tail := 0, len(values)-1 + for head < tail { + if comparator(values[i], mid) > 0 { + values[i], values[tail] = values[tail], values[i] + tail-- + } else { + values[i], values[head] = values[head], values[i] + head++ + i++ + } + } + values[head] = mid + quickSortStr(values[:head], comparator) + quickSortStr(values[head+1:], comparator) +} diff --git a/vendor/github.com/gogf/gf/v2/container/garray/garray_normal_any.go b/vendor/github.com/gogf/gf/v2/container/garray/garray_normal_any.go new file mode 100644 index 00000000..78d7ae4a --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/garray/garray_normal_any.go @@ -0,0 +1,837 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package garray + +import ( + "bytes" + "fmt" + "math" + "sort" + + "github.com/gogf/gf/v2/errors/gcode" + "github.com/gogf/gf/v2/errors/gerror" + "github.com/gogf/gf/v2/internal/deepcopy" + "github.com/gogf/gf/v2/internal/empty" + "github.com/gogf/gf/v2/internal/json" + "github.com/gogf/gf/v2/internal/rwmutex" + "github.com/gogf/gf/v2/text/gstr" + "github.com/gogf/gf/v2/util/gconv" + "github.com/gogf/gf/v2/util/grand" +) + +// Array is a golang array with rich features. +// It contains a concurrent-safe/unsafe switch, which should be set +// when its initialization and cannot be changed then. +type Array struct { + mu rwmutex.RWMutex + array []interface{} +} + +// New creates and returns an empty array. +// The parameter `safe` is used to specify whether using array in concurrent-safety, +// which is false in default. +func New(safe ...bool) *Array { + return NewArraySize(0, 0, safe...) +} + +// NewArray is alias of New, please see New. +func NewArray(safe ...bool) *Array { + return NewArraySize(0, 0, safe...) +} + +// NewArraySize create and returns an array with given size and cap. +// The parameter `safe` is used to specify whether using array in concurrent-safety, +// which is false in default. +func NewArraySize(size int, cap int, safe ...bool) *Array { + return &Array{ + mu: rwmutex.Create(safe...), + array: make([]interface{}, size, cap), + } +} + +// NewArrayRange creates and returns an array by a range from `start` to `end` +// with step value `step`. +func NewArrayRange(start, end, step int, safe ...bool) *Array { + if step == 0 { + panic(fmt.Sprintf(`invalid step value: %d`, step)) + } + slice := make([]interface{}, 0) + index := 0 + for i := start; i <= end; i += step { + slice = append(slice, i) + index++ + } + return NewArrayFrom(slice, safe...) +} + +// NewFrom is alias of NewArrayFrom. +// See NewArrayFrom. +func NewFrom(array []interface{}, safe ...bool) *Array { + return NewArrayFrom(array, safe...) +} + +// NewFromCopy is alias of NewArrayFromCopy. +// See NewArrayFromCopy. +func NewFromCopy(array []interface{}, safe ...bool) *Array { + return NewArrayFromCopy(array, safe...) +} + +// NewArrayFrom creates and returns an array with given slice `array`. +// The parameter `safe` is used to specify whether using array in concurrent-safety, +// which is false in default. +func NewArrayFrom(array []interface{}, safe ...bool) *Array { + return &Array{ + mu: rwmutex.Create(safe...), + array: array, + } +} + +// NewArrayFromCopy creates and returns an array from a copy of given slice `array`. +// The parameter `safe` is used to specify whether using array in concurrent-safety, +// which is false in default. +func NewArrayFromCopy(array []interface{}, safe ...bool) *Array { + newArray := make([]interface{}, len(array)) + copy(newArray, array) + return &Array{ + mu: rwmutex.Create(safe...), + array: newArray, + } +} + +// At returns the value by the specified index. +// If the given `index` is out of range of the array, it returns `nil`. +func (a *Array) At(index int) (value interface{}) { + value, _ = a.Get(index) + return +} + +// Get returns the value by the specified index. +// If the given `index` is out of range of the array, the `found` is false. +func (a *Array) Get(index int) (value interface{}, found bool) { + a.mu.RLock() + defer a.mu.RUnlock() + if index < 0 || index >= len(a.array) { + return nil, false + } + return a.array[index], true +} + +// Set sets value to specified index. +func (a *Array) Set(index int, value interface{}) error { + a.mu.Lock() + defer a.mu.Unlock() + if index < 0 || index >= len(a.array) { + return gerror.NewCodef(gcode.CodeInvalidParameter, "index %d out of array range %d", index, len(a.array)) + } + a.array[index] = value + return nil +} + +// SetArray sets the underlying slice array with the given `array`. +func (a *Array) SetArray(array []interface{}) *Array { + a.mu.Lock() + defer a.mu.Unlock() + a.array = array + return a +} + +// Replace replaces the array items by given `array` from the beginning of array. +func (a *Array) Replace(array []interface{}) *Array { + a.mu.Lock() + defer a.mu.Unlock() + max := len(array) + if max > len(a.array) { + max = len(a.array) + } + for i := 0; i < max; i++ { + a.array[i] = array[i] + } + return a +} + +// Sum returns the sum of values in an array. +func (a *Array) Sum() (sum int) { + a.mu.RLock() + defer a.mu.RUnlock() + for _, v := range a.array { + sum += gconv.Int(v) + } + return +} + +// SortFunc sorts the array by custom function `less`. +func (a *Array) SortFunc(less func(v1, v2 interface{}) bool) *Array { + a.mu.Lock() + defer a.mu.Unlock() + sort.Slice(a.array, func(i, j int) bool { + return less(a.array[i], a.array[j]) + }) + return a +} + +// InsertBefore inserts the `values` to the front of `index`. +func (a *Array) InsertBefore(index int, values ...interface{}) error { + a.mu.Lock() + defer a.mu.Unlock() + if index < 0 || index >= len(a.array) { + return gerror.NewCodef(gcode.CodeInvalidParameter, "index %d out of array range %d", index, len(a.array)) + } + rear := append([]interface{}{}, a.array[index:]...) + a.array = append(a.array[0:index], values...) + a.array = append(a.array, rear...) + return nil +} + +// InsertAfter inserts the `values` to the back of `index`. +func (a *Array) InsertAfter(index int, values ...interface{}) error { + a.mu.Lock() + defer a.mu.Unlock() + if index < 0 || index >= len(a.array) { + return gerror.NewCodef(gcode.CodeInvalidParameter, "index %d out of array range %d", index, len(a.array)) + } + rear := append([]interface{}{}, a.array[index+1:]...) + a.array = append(a.array[0:index+1], values...) + a.array = append(a.array, rear...) + return nil +} + +// Remove removes an item by index. +// If the given `index` is out of range of the array, the `found` is false. +func (a *Array) Remove(index int) (value interface{}, found bool) { + a.mu.Lock() + defer a.mu.Unlock() + return a.doRemoveWithoutLock(index) +} + +// doRemoveWithoutLock removes an item by index without lock. +func (a *Array) doRemoveWithoutLock(index int) (value interface{}, found bool) { + if index < 0 || index >= len(a.array) { + return nil, false + } + // Determine array boundaries when deleting to improve deletion efficiency. + if index == 0 { + value := a.array[0] + a.array = a.array[1:] + return value, true + } else if index == len(a.array)-1 { + value := a.array[index] + a.array = a.array[:index] + return value, true + } + // If it is a non-boundary delete, + // it will involve the creation of an array, + // then the deletion is less efficient. + value = a.array[index] + a.array = append(a.array[:index], a.array[index+1:]...) + return value, true +} + +// RemoveValue removes an item by value. +// It returns true if value is found in the array, or else false if not found. +func (a *Array) RemoveValue(value interface{}) bool { + if i := a.Search(value); i != -1 { + a.Remove(i) + return true + } + return false +} + +// PushLeft pushes one or multiple items to the beginning of array. +func (a *Array) PushLeft(value ...interface{}) *Array { + a.mu.Lock() + a.array = append(value, a.array...) + a.mu.Unlock() + return a +} + +// PushRight pushes one or multiple items to the end of array. +// It equals to Append. +func (a *Array) PushRight(value ...interface{}) *Array { + a.mu.Lock() + a.array = append(a.array, value...) + a.mu.Unlock() + return a +} + +// PopRand randomly pops and return an item out of array. +// Note that if the array is empty, the `found` is false. +func (a *Array) PopRand() (value interface{}, found bool) { + a.mu.Lock() + defer a.mu.Unlock() + return a.doRemoveWithoutLock(grand.Intn(len(a.array))) +} + +// PopRands randomly pops and returns `size` items out of array. +func (a *Array) PopRands(size int) []interface{} { + a.mu.Lock() + defer a.mu.Unlock() + if size <= 0 || len(a.array) == 0 { + return nil + } + if size >= len(a.array) { + size = len(a.array) + } + array := make([]interface{}, size) + for i := 0; i < size; i++ { + array[i], _ = a.doRemoveWithoutLock(grand.Intn(len(a.array))) + } + return array +} + +// PopLeft pops and returns an item from the beginning of array. +// Note that if the array is empty, the `found` is false. +func (a *Array) PopLeft() (value interface{}, found bool) { + a.mu.Lock() + defer a.mu.Unlock() + if len(a.array) == 0 { + return nil, false + } + value = a.array[0] + a.array = a.array[1:] + return value, true +} + +// PopRight pops and returns an item from the end of array. +// Note that if the array is empty, the `found` is false. +func (a *Array) PopRight() (value interface{}, found bool) { + a.mu.Lock() + defer a.mu.Unlock() + index := len(a.array) - 1 + if index < 0 { + return nil, false + } + value = a.array[index] + a.array = a.array[:index] + return value, true +} + +// PopLefts pops and returns `size` items from the beginning of array. +func (a *Array) PopLefts(size int) []interface{} { + a.mu.Lock() + defer a.mu.Unlock() + if size <= 0 || len(a.array) == 0 { + return nil + } + if size >= len(a.array) { + array := a.array + a.array = a.array[:0] + return array + } + value := a.array[0:size] + a.array = a.array[size:] + return value +} + +// PopRights pops and returns `size` items from the end of array. +func (a *Array) PopRights(size int) []interface{} { + a.mu.Lock() + defer a.mu.Unlock() + if size <= 0 || len(a.array) == 0 { + return nil + } + index := len(a.array) - size + if index <= 0 { + array := a.array + a.array = a.array[:0] + return array + } + value := a.array[index:] + a.array = a.array[:index] + return value +} + +// Range picks and returns items by range, like array[start:end]. +// Notice, if in concurrent-safe usage, it returns a copy of slice; +// else a pointer to the underlying data. +// +// If `end` is negative, then the offset will start from the end of array. +// If `end` is omitted, then the sequence will have everything from start up +// until the end of the array. +func (a *Array) Range(start int, end ...int) []interface{} { + a.mu.RLock() + defer a.mu.RUnlock() + offsetEnd := len(a.array) + if len(end) > 0 && end[0] < offsetEnd { + offsetEnd = end[0] + } + if start > offsetEnd { + return nil + } + if start < 0 { + start = 0 + } + array := ([]interface{})(nil) + if a.mu.IsSafe() { + array = make([]interface{}, offsetEnd-start) + copy(array, a.array[start:offsetEnd]) + } else { + array = a.array[start:offsetEnd] + } + return array +} + +// SubSlice returns a slice of elements from the array as specified +// by the `offset` and `size` parameters. +// If in concurrent safe usage, it returns a copy of the slice; else a pointer. +// +// If offset is non-negative, the sequence will start at that offset in the array. +// If offset is negative, the sequence will start that far from the end of the array. +// +// If length is given and is positive, then the sequence will have up to that many elements in it. +// If the array is shorter than the length, then only the available array elements will be present. +// If length is given and is negative then the sequence will stop that many elements from the end of the array. +// If it is omitted, then the sequence will have everything from offset up until the end of the array. +// +// Any possibility crossing the left border of array, it will fail. +func (a *Array) SubSlice(offset int, length ...int) []interface{} { + a.mu.RLock() + defer a.mu.RUnlock() + size := len(a.array) + if len(length) > 0 { + size = length[0] + } + if offset > len(a.array) { + return nil + } + if offset < 0 { + offset = len(a.array) + offset + if offset < 0 { + return nil + } + } + if size < 0 { + offset += size + size = -size + if offset < 0 { + return nil + } + } + end := offset + size + if end > len(a.array) { + end = len(a.array) + size = len(a.array) - offset + } + if a.mu.IsSafe() { + s := make([]interface{}, size) + copy(s, a.array[offset:]) + return s + } else { + return a.array[offset:end] + } +} + +// Append is alias of PushRight, please See PushRight. +func (a *Array) Append(value ...interface{}) *Array { + a.PushRight(value...) + return a +} + +// Len returns the length of array. +func (a *Array) Len() int { + a.mu.RLock() + length := len(a.array) + a.mu.RUnlock() + return length +} + +// Slice returns the underlying data of array. +// Note that, if it's in concurrent-safe usage, it returns a copy of underlying data, +// or else a pointer to the underlying data. +func (a *Array) Slice() []interface{} { + if a.mu.IsSafe() { + a.mu.RLock() + defer a.mu.RUnlock() + array := make([]interface{}, len(a.array)) + copy(array, a.array) + return array + } else { + return a.array + } +} + +// Interfaces returns current array as []interface{}. +func (a *Array) Interfaces() []interface{} { + return a.Slice() +} + +// Clone returns a new array, which is a copy of current array. +func (a *Array) Clone() (newArray *Array) { + a.mu.RLock() + array := make([]interface{}, len(a.array)) + copy(array, a.array) + a.mu.RUnlock() + return NewArrayFrom(array, a.mu.IsSafe()) +} + +// Clear deletes all items of current array. +func (a *Array) Clear() *Array { + a.mu.Lock() + if len(a.array) > 0 { + a.array = make([]interface{}, 0) + } + a.mu.Unlock() + return a +} + +// Contains checks whether a value exists in the array. +func (a *Array) Contains(value interface{}) bool { + return a.Search(value) != -1 +} + +// Search searches array by `value`, returns the index of `value`, +// or returns -1 if not exists. +func (a *Array) Search(value interface{}) int { + a.mu.RLock() + defer a.mu.RUnlock() + if len(a.array) == 0 { + return -1 + } + result := -1 + for index, v := range a.array { + if v == value { + result = index + break + } + } + return result +} + +// Unique uniques the array, clear repeated items. +// Example: [1,1,2,3,2] -> [1,2,3] +func (a *Array) Unique() *Array { + a.mu.Lock() + defer a.mu.Unlock() + if len(a.array) == 0 { + return a + } + var ( + ok bool + temp interface{} + uniqueSet = make(map[interface{}]struct{}) + uniqueArray = make([]interface{}, 0, len(a.array)) + ) + for i := 0; i < len(a.array); i++ { + temp = a.array[i] + if _, ok = uniqueSet[temp]; ok { + continue + } + uniqueSet[temp] = struct{}{} + uniqueArray = append(uniqueArray, temp) + } + a.array = uniqueArray + return a +} + +// LockFunc locks writing by callback function `f`. +func (a *Array) LockFunc(f func(array []interface{})) *Array { + a.mu.Lock() + defer a.mu.Unlock() + f(a.array) + return a +} + +// RLockFunc locks reading by callback function `f`. +func (a *Array) RLockFunc(f func(array []interface{})) *Array { + a.mu.RLock() + defer a.mu.RUnlock() + f(a.array) + return a +} + +// Merge merges `array` into current array. +// The parameter `array` can be any garray or slice type. +// The difference between Merge and Append is Append supports only specified slice type, +// but Merge supports more parameter types. +func (a *Array) Merge(array interface{}) *Array { + return a.Append(gconv.Interfaces(array)...) +} + +// Fill fills an array with num entries of the value `value`, +// keys starting at the `startIndex` parameter. +func (a *Array) Fill(startIndex int, num int, value interface{}) error { + a.mu.Lock() + defer a.mu.Unlock() + if startIndex < 0 || startIndex > len(a.array) { + return gerror.NewCodef(gcode.CodeInvalidParameter, "index %d out of array range %d", startIndex, len(a.array)) + } + for i := startIndex; i < startIndex+num; i++ { + if i > len(a.array)-1 { + a.array = append(a.array, value) + } else { + a.array[i] = value + } + } + return nil +} + +// Chunk splits an array into multiple arrays, +// the size of each array is determined by `size`. +// The last chunk may contain less than size elements. +func (a *Array) Chunk(size int) [][]interface{} { + if size < 1 { + return nil + } + a.mu.RLock() + defer a.mu.RUnlock() + length := len(a.array) + chunks := int(math.Ceil(float64(length) / float64(size))) + var n [][]interface{} + for i, end := 0, 0; chunks > 0; chunks-- { + end = (i + 1) * size + if end > length { + end = length + } + n = append(n, a.array[i*size:end]) + i++ + } + return n +} + +// Pad pads array to the specified length with `value`. +// If size is positive then the array is padded on the right, or negative on the left. +// If the absolute value of `size` is less than or equal to the length of the array +// then no padding takes place. +func (a *Array) Pad(size int, val interface{}) *Array { + a.mu.Lock() + defer a.mu.Unlock() + if size == 0 || (size > 0 && size < len(a.array)) || (size < 0 && size > -len(a.array)) { + return a + } + n := size + if size < 0 { + n = -size + } + n -= len(a.array) + tmp := make([]interface{}, n) + for i := 0; i < n; i++ { + tmp[i] = val + } + if size > 0 { + a.array = append(a.array, tmp...) + } else { + a.array = append(tmp, a.array...) + } + return a +} + +// Rand randomly returns one item from array(no deleting). +func (a *Array) Rand() (value interface{}, found bool) { + a.mu.RLock() + defer a.mu.RUnlock() + if len(a.array) == 0 { + return nil, false + } + return a.array[grand.Intn(len(a.array))], true +} + +// Rands randomly returns `size` items from array(no deleting). +func (a *Array) Rands(size int) []interface{} { + a.mu.RLock() + defer a.mu.RUnlock() + if size <= 0 || len(a.array) == 0 { + return nil + } + array := make([]interface{}, size) + for i := 0; i < size; i++ { + array[i] = a.array[grand.Intn(len(a.array))] + } + return array +} + +// Shuffle randomly shuffles the array. +func (a *Array) Shuffle() *Array { + a.mu.Lock() + defer a.mu.Unlock() + for i, v := range grand.Perm(len(a.array)) { + a.array[i], a.array[v] = a.array[v], a.array[i] + } + return a +} + +// Reverse makes array with elements in reverse order. +func (a *Array) Reverse() *Array { + a.mu.Lock() + defer a.mu.Unlock() + for i, j := 0, len(a.array)-1; i < j; i, j = i+1, j-1 { + a.array[i], a.array[j] = a.array[j], a.array[i] + } + return a +} + +// Join joins array elements with a string `glue`. +func (a *Array) Join(glue string) string { + a.mu.RLock() + defer a.mu.RUnlock() + if len(a.array) == 0 { + return "" + } + buffer := bytes.NewBuffer(nil) + for k, v := range a.array { + buffer.WriteString(gconv.String(v)) + if k != len(a.array)-1 { + buffer.WriteString(glue) + } + } + return buffer.String() +} + +// CountValues counts the number of occurrences of all values in the array. +func (a *Array) CountValues() map[interface{}]int { + m := make(map[interface{}]int) + a.mu.RLock() + defer a.mu.RUnlock() + for _, v := range a.array { + m[v]++ + } + return m +} + +// Iterator is alias of IteratorAsc. +func (a *Array) Iterator(f func(k int, v interface{}) bool) { + a.IteratorAsc(f) +} + +// IteratorAsc iterates the array readonly in ascending order with given callback function `f`. +// If `f` returns true, then it continues iterating; or false to stop. +func (a *Array) IteratorAsc(f func(k int, v interface{}) bool) { + a.mu.RLock() + defer a.mu.RUnlock() + for k, v := range a.array { + if !f(k, v) { + break + } + } +} + +// IteratorDesc iterates the array readonly in descending order with given callback function `f`. +// If `f` returns true, then it continues iterating; or false to stop. +func (a *Array) IteratorDesc(f func(k int, v interface{}) bool) { + a.mu.RLock() + defer a.mu.RUnlock() + for i := len(a.array) - 1; i >= 0; i-- { + if !f(i, a.array[i]) { + break + } + } +} + +// String returns current array as a string, which implements like json.Marshal does. +func (a *Array) String() string { + if a == nil { + return "" + } + a.mu.RLock() + defer a.mu.RUnlock() + buffer := bytes.NewBuffer(nil) + buffer.WriteByte('[') + s := "" + for k, v := range a.array { + s = gconv.String(v) + if gstr.IsNumeric(s) { + buffer.WriteString(s) + } else { + buffer.WriteString(`"` + gstr.QuoteMeta(s, `"\`) + `"`) + } + if k != len(a.array)-1 { + buffer.WriteByte(',') + } + } + buffer.WriteByte(']') + return buffer.String() +} + +// MarshalJSON implements the interface MarshalJSON for json.Marshal. +// Note that do not use pointer as its receiver here. +func (a Array) MarshalJSON() ([]byte, error) { + a.mu.RLock() + defer a.mu.RUnlock() + return json.Marshal(a.array) +} + +// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. +func (a *Array) UnmarshalJSON(b []byte) error { + if a.array == nil { + a.array = make([]interface{}, 0) + } + a.mu.Lock() + defer a.mu.Unlock() + if err := json.UnmarshalUseNumber(b, &a.array); err != nil { + return err + } + return nil +} + +// UnmarshalValue is an interface implement which sets any type of value for array. +func (a *Array) UnmarshalValue(value interface{}) error { + a.mu.Lock() + defer a.mu.Unlock() + switch value.(type) { + case string, []byte: + return json.UnmarshalUseNumber(gconv.Bytes(value), &a.array) + default: + a.array = gconv.SliceAny(value) + } + return nil +} + +// FilterNil removes all nil value of the array. +func (a *Array) FilterNil() *Array { + a.mu.Lock() + defer a.mu.Unlock() + for i := 0; i < len(a.array); { + if empty.IsNil(a.array[i]) { + a.array = append(a.array[:i], a.array[i+1:]...) + } else { + i++ + } + } + return a +} + +// FilterEmpty removes all empty value of the array. +// Values like: 0, nil, false, "", len(slice/map/chan) == 0 are considered empty. +func (a *Array) FilterEmpty() *Array { + a.mu.Lock() + defer a.mu.Unlock() + for i := 0; i < len(a.array); { + if empty.IsEmpty(a.array[i]) { + a.array = append(a.array[:i], a.array[i+1:]...) + } else { + i++ + } + } + return a +} + +// Walk applies a user supplied function `f` to every item of array. +func (a *Array) Walk(f func(value interface{}) interface{}) *Array { + a.mu.Lock() + defer a.mu.Unlock() + for i, v := range a.array { + a.array[i] = f(v) + } + return a +} + +// IsEmpty checks whether the array is empty. +func (a *Array) IsEmpty() bool { + return a.Len() == 0 +} + +// DeepCopy implements interface for deep copy of current type. +func (a *Array) DeepCopy() interface{} { + if a == nil { + return nil + } + a.mu.RLock() + defer a.mu.RUnlock() + newSlice := make([]interface{}, len(a.array)) + for i, v := range a.array { + newSlice[i] = deepcopy.Copy(v) + } + return NewArrayFrom(newSlice, a.mu.IsSafe()) +} diff --git a/vendor/github.com/gogf/gf/v2/container/garray/garray_normal_int.go b/vendor/github.com/gogf/gf/v2/container/garray/garray_normal_int.go new file mode 100644 index 00000000..e44ed076 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/garray/garray_normal_int.go @@ -0,0 +1,813 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package garray + +import ( + "bytes" + "fmt" + "math" + "sort" + + "github.com/gogf/gf/v2/errors/gcode" + "github.com/gogf/gf/v2/errors/gerror" + "github.com/gogf/gf/v2/internal/json" + "github.com/gogf/gf/v2/internal/rwmutex" + "github.com/gogf/gf/v2/util/gconv" + "github.com/gogf/gf/v2/util/grand" +) + +// IntArray is a golang int array with rich features. +// It contains a concurrent-safe/unsafe switch, which should be set +// when its initialization and cannot be changed then. +type IntArray struct { + mu rwmutex.RWMutex + array []int +} + +// NewIntArray creates and returns an empty array. +// The parameter `safe` is used to specify whether using array in concurrent-safety, +// which is false in default. +func NewIntArray(safe ...bool) *IntArray { + return NewIntArraySize(0, 0, safe...) +} + +// NewIntArraySize create and returns an array with given size and cap. +// The parameter `safe` is used to specify whether using array in concurrent-safety, +// which is false in default. +func NewIntArraySize(size int, cap int, safe ...bool) *IntArray { + return &IntArray{ + mu: rwmutex.Create(safe...), + array: make([]int, size, cap), + } +} + +// NewIntArrayRange creates and returns an array by a range from `start` to `end` +// with step value `step`. +func NewIntArrayRange(start, end, step int, safe ...bool) *IntArray { + if step == 0 { + panic(fmt.Sprintf(`invalid step value: %d`, step)) + } + slice := make([]int, 0) + index := 0 + for i := start; i <= end; i += step { + slice = append(slice, i) + index++ + } + return NewIntArrayFrom(slice, safe...) +} + +// NewIntArrayFrom creates and returns an array with given slice `array`. +// The parameter `safe` is used to specify whether using array in concurrent-safety, +// which is false in default. +func NewIntArrayFrom(array []int, safe ...bool) *IntArray { + return &IntArray{ + mu: rwmutex.Create(safe...), + array: array, + } +} + +// NewIntArrayFromCopy creates and returns an array from a copy of given slice `array`. +// The parameter `safe` is used to specify whether using array in concurrent-safety, +// which is false in default. +func NewIntArrayFromCopy(array []int, safe ...bool) *IntArray { + newArray := make([]int, len(array)) + copy(newArray, array) + return &IntArray{ + mu: rwmutex.Create(safe...), + array: newArray, + } +} + +// At returns the value by the specified index. +// If the given `index` is out of range of the array, it returns `0`. +func (a *IntArray) At(index int) (value int) { + value, _ = a.Get(index) + return +} + +// Get returns the value by the specified index. +// If the given `index` is out of range of the array, the `found` is false. +func (a *IntArray) Get(index int) (value int, found bool) { + a.mu.RLock() + defer a.mu.RUnlock() + if index < 0 || index >= len(a.array) { + return 0, false + } + return a.array[index], true +} + +// Set sets value to specified index. +func (a *IntArray) Set(index int, value int) error { + a.mu.Lock() + defer a.mu.Unlock() + if index < 0 || index >= len(a.array) { + return gerror.NewCodef(gcode.CodeInvalidParameter, "index %d out of array range %d", index, len(a.array)) + } + a.array[index] = value + return nil +} + +// SetArray sets the underlying slice array with the given `array`. +func (a *IntArray) SetArray(array []int) *IntArray { + a.mu.Lock() + defer a.mu.Unlock() + a.array = array + return a +} + +// Replace replaces the array items by given `array` from the beginning of array. +func (a *IntArray) Replace(array []int) *IntArray { + a.mu.Lock() + defer a.mu.Unlock() + max := len(array) + if max > len(a.array) { + max = len(a.array) + } + for i := 0; i < max; i++ { + a.array[i] = array[i] + } + return a +} + +// Sum returns the sum of values in an array. +func (a *IntArray) Sum() (sum int) { + a.mu.RLock() + defer a.mu.RUnlock() + for _, v := range a.array { + sum += v + } + return +} + +// Sort sorts the array in increasing order. +// The parameter `reverse` controls whether sort in increasing order(default) or decreasing order. +func (a *IntArray) Sort(reverse ...bool) *IntArray { + a.mu.Lock() + defer a.mu.Unlock() + if len(reverse) > 0 && reverse[0] { + sort.Slice(a.array, func(i, j int) bool { + return a.array[i] >= a.array[j] + }) + } else { + sort.Ints(a.array) + } + return a +} + +// SortFunc sorts the array by custom function `less`. +func (a *IntArray) SortFunc(less func(v1, v2 int) bool) *IntArray { + a.mu.Lock() + defer a.mu.Unlock() + sort.Slice(a.array, func(i, j int) bool { + return less(a.array[i], a.array[j]) + }) + return a +} + +// InsertBefore inserts the `values` to the front of `index`. +func (a *IntArray) InsertBefore(index int, values ...int) error { + a.mu.Lock() + defer a.mu.Unlock() + if index < 0 || index >= len(a.array) { + return gerror.NewCodef(gcode.CodeInvalidParameter, "index %d out of array range %d", index, len(a.array)) + } + rear := append([]int{}, a.array[index:]...) + a.array = append(a.array[0:index], values...) + a.array = append(a.array, rear...) + return nil +} + +// InsertAfter inserts the `value` to the back of `index`. +func (a *IntArray) InsertAfter(index int, values ...int) error { + a.mu.Lock() + defer a.mu.Unlock() + if index < 0 || index >= len(a.array) { + return gerror.NewCodef(gcode.CodeInvalidParameter, "index %d out of array range %d", index, len(a.array)) + } + rear := append([]int{}, a.array[index+1:]...) + a.array = append(a.array[0:index+1], values...) + a.array = append(a.array, rear...) + return nil +} + +// Remove removes an item by index. +// If the given `index` is out of range of the array, the `found` is false. +func (a *IntArray) Remove(index int) (value int, found bool) { + a.mu.Lock() + defer a.mu.Unlock() + return a.doRemoveWithoutLock(index) +} + +// doRemoveWithoutLock removes an item by index without lock. +func (a *IntArray) doRemoveWithoutLock(index int) (value int, found bool) { + if index < 0 || index >= len(a.array) { + return 0, false + } + // Determine array boundaries when deleting to improve deletion efficiency. + if index == 0 { + value := a.array[0] + a.array = a.array[1:] + return value, true + } else if index == len(a.array)-1 { + value := a.array[index] + a.array = a.array[:index] + return value, true + } + // If it is a non-boundary delete, + // it will involve the creation of an array, + // then the deletion is less efficient. + value = a.array[index] + a.array = append(a.array[:index], a.array[index+1:]...) + return value, true +} + +// RemoveValue removes an item by value. +// It returns true if value is found in the array, or else false if not found. +func (a *IntArray) RemoveValue(value int) bool { + if i := a.Search(value); i != -1 { + _, found := a.Remove(i) + return found + } + return false +} + +// PushLeft pushes one or multiple items to the beginning of array. +func (a *IntArray) PushLeft(value ...int) *IntArray { + a.mu.Lock() + a.array = append(value, a.array...) + a.mu.Unlock() + return a +} + +// PushRight pushes one or multiple items to the end of array. +// It equals to Append. +func (a *IntArray) PushRight(value ...int) *IntArray { + a.mu.Lock() + a.array = append(a.array, value...) + a.mu.Unlock() + return a +} + +// PopLeft pops and returns an item from the beginning of array. +// Note that if the array is empty, the `found` is false. +func (a *IntArray) PopLeft() (value int, found bool) { + a.mu.Lock() + defer a.mu.Unlock() + if len(a.array) == 0 { + return 0, false + } + value = a.array[0] + a.array = a.array[1:] + return value, true +} + +// PopRight pops and returns an item from the end of array. +// Note that if the array is empty, the `found` is false. +func (a *IntArray) PopRight() (value int, found bool) { + a.mu.Lock() + defer a.mu.Unlock() + index := len(a.array) - 1 + if index < 0 { + return 0, false + } + value = a.array[index] + a.array = a.array[:index] + return value, true +} + +// PopRand randomly pops and return an item out of array. +// Note that if the array is empty, the `found` is false. +func (a *IntArray) PopRand() (value int, found bool) { + a.mu.Lock() + defer a.mu.Unlock() + return a.doRemoveWithoutLock(grand.Intn(len(a.array))) +} + +// PopRands randomly pops and returns `size` items out of array. +// If the given `size` is greater than size of the array, it returns all elements of the array. +// Note that if given `size` <= 0 or the array is empty, it returns nil. +func (a *IntArray) PopRands(size int) []int { + a.mu.Lock() + defer a.mu.Unlock() + if size <= 0 || len(a.array) == 0 { + return nil + } + if size >= len(a.array) { + size = len(a.array) + } + array := make([]int, size) + for i := 0; i < size; i++ { + array[i], _ = a.doRemoveWithoutLock(grand.Intn(len(a.array))) + } + return array +} + +// PopLefts pops and returns `size` items from the beginning of array. +// If the given `size` is greater than size of the array, it returns all elements of the array. +// Note that if given `size` <= 0 or the array is empty, it returns nil. +func (a *IntArray) PopLefts(size int) []int { + a.mu.Lock() + defer a.mu.Unlock() + if size <= 0 || len(a.array) == 0 { + return nil + } + if size >= len(a.array) { + array := a.array + a.array = a.array[:0] + return array + } + value := a.array[0:size] + a.array = a.array[size:] + return value +} + +// PopRights pops and returns `size` items from the end of array. +// If the given `size` is greater than size of the array, it returns all elements of the array. +// Note that if given `size` <= 0 or the array is empty, it returns nil. +func (a *IntArray) PopRights(size int) []int { + a.mu.Lock() + defer a.mu.Unlock() + if size <= 0 || len(a.array) == 0 { + return nil + } + index := len(a.array) - size + if index <= 0 { + array := a.array + a.array = a.array[:0] + return array + } + value := a.array[index:] + a.array = a.array[:index] + return value +} + +// Range picks and returns items by range, like array[start:end]. +// Notice, if in concurrent-safe usage, it returns a copy of slice; +// else a pointer to the underlying data. +// +// If `end` is negative, then the offset will start from the end of array. +// If `end` is omitted, then the sequence will have everything from start up +// until the end of the array. +func (a *IntArray) Range(start int, end ...int) []int { + a.mu.RLock() + defer a.mu.RUnlock() + offsetEnd := len(a.array) + if len(end) > 0 && end[0] < offsetEnd { + offsetEnd = end[0] + } + if start > offsetEnd { + return nil + } + if start < 0 { + start = 0 + } + array := ([]int)(nil) + if a.mu.IsSafe() { + array = make([]int, offsetEnd-start) + copy(array, a.array[start:offsetEnd]) + } else { + array = a.array[start:offsetEnd] + } + return array +} + +// SubSlice returns a slice of elements from the array as specified +// by the `offset` and `size` parameters. +// If in concurrent safe usage, it returns a copy of the slice; else a pointer. +// +// If offset is non-negative, the sequence will start at that offset in the array. +// If offset is negative, the sequence will start that far from the end of the array. +// +// If length is given and is positive, then the sequence will have up to that many elements in it. +// If the array is shorter than the length, then only the available array elements will be present. +// If length is given and is negative then the sequence will stop that many elements from the end of the array. +// If it is omitted, then the sequence will have everything from offset up until the end of the array. +// +// Any possibility crossing the left border of array, it will fail. +func (a *IntArray) SubSlice(offset int, length ...int) []int { + a.mu.RLock() + defer a.mu.RUnlock() + size := len(a.array) + if len(length) > 0 { + size = length[0] + } + if offset > len(a.array) { + return nil + } + if offset < 0 { + offset = len(a.array) + offset + if offset < 0 { + return nil + } + } + if size < 0 { + offset += size + size = -size + if offset < 0 { + return nil + } + } + end := offset + size + if end > len(a.array) { + end = len(a.array) + size = len(a.array) - offset + } + if a.mu.IsSafe() { + s := make([]int, size) + copy(s, a.array[offset:]) + return s + } else { + return a.array[offset:end] + } +} + +// Append is alias of PushRight,please See PushRight. +func (a *IntArray) Append(value ...int) *IntArray { + a.mu.Lock() + a.array = append(a.array, value...) + a.mu.Unlock() + return a +} + +// Len returns the length of array. +func (a *IntArray) Len() int { + a.mu.RLock() + length := len(a.array) + a.mu.RUnlock() + return length +} + +// Slice returns the underlying data of array. +// Note that, if it's in concurrent-safe usage, it returns a copy of underlying data, +// or else a pointer to the underlying data. +func (a *IntArray) Slice() []int { + array := ([]int)(nil) + if a.mu.IsSafe() { + a.mu.RLock() + defer a.mu.RUnlock() + array = make([]int, len(a.array)) + copy(array, a.array) + } else { + array = a.array + } + return array +} + +// Interfaces returns current array as []interface{}. +func (a *IntArray) Interfaces() []interface{} { + a.mu.RLock() + defer a.mu.RUnlock() + array := make([]interface{}, len(a.array)) + for k, v := range a.array { + array[k] = v + } + return array +} + +// Clone returns a new array, which is a copy of current array. +func (a *IntArray) Clone() (newArray *IntArray) { + a.mu.RLock() + array := make([]int, len(a.array)) + copy(array, a.array) + a.mu.RUnlock() + return NewIntArrayFrom(array, a.mu.IsSafe()) +} + +// Clear deletes all items of current array. +func (a *IntArray) Clear() *IntArray { + a.mu.Lock() + if len(a.array) > 0 { + a.array = make([]int, 0) + } + a.mu.Unlock() + return a +} + +// Contains checks whether a value exists in the array. +func (a *IntArray) Contains(value int) bool { + return a.Search(value) != -1 +} + +// Search searches array by `value`, returns the index of `value`, +// or returns -1 if not exists. +func (a *IntArray) Search(value int) int { + a.mu.RLock() + defer a.mu.RUnlock() + if len(a.array) == 0 { + return -1 + } + result := -1 + for index, v := range a.array { + if v == value { + result = index + break + } + } + return result +} + +// Unique uniques the array, clear repeated items. +// Example: [1,1,2,3,2] -> [1,2,3] +func (a *IntArray) Unique() *IntArray { + a.mu.Lock() + defer a.mu.Unlock() + if len(a.array) == 0 { + return a + } + var ( + ok bool + temp int + uniqueSet = make(map[int]struct{}) + uniqueArray = make([]int, 0, len(a.array)) + ) + for i := 0; i < len(a.array); i++ { + temp = a.array[i] + if _, ok = uniqueSet[temp]; ok { + continue + } + uniqueSet[temp] = struct{}{} + uniqueArray = append(uniqueArray, temp) + } + a.array = uniqueArray + return a +} + +// LockFunc locks writing by callback function `f`. +func (a *IntArray) LockFunc(f func(array []int)) *IntArray { + a.mu.Lock() + defer a.mu.Unlock() + f(a.array) + return a +} + +// RLockFunc locks reading by callback function `f`. +func (a *IntArray) RLockFunc(f func(array []int)) *IntArray { + a.mu.RLock() + defer a.mu.RUnlock() + f(a.array) + return a +} + +// Merge merges `array` into current array. +// The parameter `array` can be any garray or slice type. +// The difference between Merge and Append is Append supports only specified slice type, +// but Merge supports more parameter types. +func (a *IntArray) Merge(array interface{}) *IntArray { + return a.Append(gconv.Ints(array)...) +} + +// Fill fills an array with num entries of the value `value`, +// keys starting at the `startIndex` parameter. +func (a *IntArray) Fill(startIndex int, num int, value int) error { + a.mu.Lock() + defer a.mu.Unlock() + if startIndex < 0 || startIndex > len(a.array) { + return gerror.NewCodef(gcode.CodeInvalidParameter, "index %d out of array range %d", startIndex, len(a.array)) + } + for i := startIndex; i < startIndex+num; i++ { + if i > len(a.array)-1 { + a.array = append(a.array, value) + } else { + a.array[i] = value + } + } + return nil +} + +// Chunk splits an array into multiple arrays, +// the size of each array is determined by `size`. +// The last chunk may contain less than size elements. +func (a *IntArray) Chunk(size int) [][]int { + if size < 1 { + return nil + } + a.mu.RLock() + defer a.mu.RUnlock() + length := len(a.array) + chunks := int(math.Ceil(float64(length) / float64(size))) + var n [][]int + for i, end := 0, 0; chunks > 0; chunks-- { + end = (i + 1) * size + if end > length { + end = length + } + n = append(n, a.array[i*size:end]) + i++ + } + return n +} + +// Pad pads array to the specified length with `value`. +// If size is positive then the array is padded on the right, or negative on the left. +// If the absolute value of `size` is less than or equal to the length of the array +// then no padding takes place. +func (a *IntArray) Pad(size int, value int) *IntArray { + a.mu.Lock() + defer a.mu.Unlock() + if size == 0 || (size > 0 && size < len(a.array)) || (size < 0 && size > -len(a.array)) { + return a + } + n := size + if size < 0 { + n = -size + } + n -= len(a.array) + tmp := make([]int, n) + for i := 0; i < n; i++ { + tmp[i] = value + } + if size > 0 { + a.array = append(a.array, tmp...) + } else { + a.array = append(tmp, a.array...) + } + return a +} + +// Rand randomly returns one item from array(no deleting). +func (a *IntArray) Rand() (value int, found bool) { + a.mu.RLock() + defer a.mu.RUnlock() + if len(a.array) == 0 { + return 0, false + } + return a.array[grand.Intn(len(a.array))], true +} + +// Rands randomly returns `size` items from array(no deleting). +func (a *IntArray) Rands(size int) []int { + a.mu.RLock() + defer a.mu.RUnlock() + if size <= 0 || len(a.array) == 0 { + return nil + } + array := make([]int, size) + for i := 0; i < size; i++ { + array[i] = a.array[grand.Intn(len(a.array))] + } + return array +} + +// Shuffle randomly shuffles the array. +func (a *IntArray) Shuffle() *IntArray { + a.mu.Lock() + defer a.mu.Unlock() + for i, v := range grand.Perm(len(a.array)) { + a.array[i], a.array[v] = a.array[v], a.array[i] + } + return a +} + +// Reverse makes array with elements in reverse order. +func (a *IntArray) Reverse() *IntArray { + a.mu.Lock() + defer a.mu.Unlock() + for i, j := 0, len(a.array)-1; i < j; i, j = i+1, j-1 { + a.array[i], a.array[j] = a.array[j], a.array[i] + } + return a +} + +// Join joins array elements with a string `glue`. +func (a *IntArray) Join(glue string) string { + a.mu.RLock() + defer a.mu.RUnlock() + if len(a.array) == 0 { + return "" + } + buffer := bytes.NewBuffer(nil) + for k, v := range a.array { + buffer.WriteString(gconv.String(v)) + if k != len(a.array)-1 { + buffer.WriteString(glue) + } + } + return buffer.String() +} + +// CountValues counts the number of occurrences of all values in the array. +func (a *IntArray) CountValues() map[int]int { + m := make(map[int]int) + a.mu.RLock() + defer a.mu.RUnlock() + for _, v := range a.array { + m[v]++ + } + return m +} + +// Iterator is alias of IteratorAsc. +func (a *IntArray) Iterator(f func(k int, v int) bool) { + a.IteratorAsc(f) +} + +// IteratorAsc iterates the array readonly in ascending order with given callback function `f`. +// If `f` returns true, then it continues iterating; or false to stop. +func (a *IntArray) IteratorAsc(f func(k int, v int) bool) { + a.mu.RLock() + defer a.mu.RUnlock() + for k, v := range a.array { + if !f(k, v) { + break + } + } +} + +// IteratorDesc iterates the array readonly in descending order with given callback function `f`. +// If `f` returns true, then it continues iterating; or false to stop. +func (a *IntArray) IteratorDesc(f func(k int, v int) bool) { + a.mu.RLock() + defer a.mu.RUnlock() + for i := len(a.array) - 1; i >= 0; i-- { + if !f(i, a.array[i]) { + break + } + } +} + +// String returns current array as a string, which implements like json.Marshal does. +func (a *IntArray) String() string { + if a == nil { + return "" + } + return "[" + a.Join(",") + "]" +} + +// MarshalJSON implements the interface MarshalJSON for json.Marshal. +// Note that do not use pointer as its receiver here. +func (a IntArray) MarshalJSON() ([]byte, error) { + a.mu.RLock() + defer a.mu.RUnlock() + return json.Marshal(a.array) +} + +// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. +func (a *IntArray) UnmarshalJSON(b []byte) error { + if a.array == nil { + a.array = make([]int, 0) + } + a.mu.Lock() + defer a.mu.Unlock() + if err := json.UnmarshalUseNumber(b, &a.array); err != nil { + return err + } + return nil +} + +// UnmarshalValue is an interface implement which sets any type of value for array. +func (a *IntArray) UnmarshalValue(value interface{}) error { + a.mu.Lock() + defer a.mu.Unlock() + switch value.(type) { + case string, []byte: + return json.UnmarshalUseNumber(gconv.Bytes(value), &a.array) + default: + a.array = gconv.SliceInt(value) + } + return nil +} + +// FilterEmpty removes all zero value of the array. +func (a *IntArray) FilterEmpty() *IntArray { + a.mu.Lock() + defer a.mu.Unlock() + for i := 0; i < len(a.array); { + if a.array[i] == 0 { + a.array = append(a.array[:i], a.array[i+1:]...) + } else { + i++ + } + } + return a +} + +// Walk applies a user supplied function `f` to every item of array. +func (a *IntArray) Walk(f func(value int) int) *IntArray { + a.mu.Lock() + defer a.mu.Unlock() + for i, v := range a.array { + a.array[i] = f(v) + } + return a +} + +// IsEmpty checks whether the array is empty. +func (a *IntArray) IsEmpty() bool { + return a.Len() == 0 +} + +// DeepCopy implements interface for deep copy of current type. +func (a *IntArray) DeepCopy() interface{} { + if a == nil { + return nil + } + a.mu.RLock() + defer a.mu.RUnlock() + newSlice := make([]int, len(a.array)) + copy(newSlice, a.array) + return NewIntArrayFrom(newSlice, a.mu.IsSafe()) +} diff --git a/vendor/github.com/gogf/gf/v2/container/garray/garray_normal_str.go b/vendor/github.com/gogf/gf/v2/container/garray/garray_normal_str.go new file mode 100644 index 00000000..8b3f8f28 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/garray/garray_normal_str.go @@ -0,0 +1,826 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package garray + +import ( + "bytes" + "math" + "sort" + "strings" + + "github.com/gogf/gf/v2/errors/gcode" + "github.com/gogf/gf/v2/errors/gerror" + "github.com/gogf/gf/v2/internal/json" + "github.com/gogf/gf/v2/internal/rwmutex" + "github.com/gogf/gf/v2/text/gstr" + "github.com/gogf/gf/v2/util/gconv" + "github.com/gogf/gf/v2/util/grand" +) + +// StrArray is a golang string array with rich features. +// It contains a concurrent-safe/unsafe switch, which should be set +// when its initialization and cannot be changed then. +type StrArray struct { + mu rwmutex.RWMutex + array []string +} + +// NewStrArray creates and returns an empty array. +// The parameter `safe` is used to specify whether using array in concurrent-safety, +// which is false in default. +func NewStrArray(safe ...bool) *StrArray { + return NewStrArraySize(0, 0, safe...) +} + +// NewStrArraySize create and returns an array with given size and cap. +// The parameter `safe` is used to specify whether using array in concurrent-safety, +// which is false in default. +func NewStrArraySize(size int, cap int, safe ...bool) *StrArray { + return &StrArray{ + mu: rwmutex.Create(safe...), + array: make([]string, size, cap), + } +} + +// NewStrArrayFrom creates and returns an array with given slice `array`. +// The parameter `safe` is used to specify whether using array in concurrent-safety, +// which is false in default. +func NewStrArrayFrom(array []string, safe ...bool) *StrArray { + return &StrArray{ + mu: rwmutex.Create(safe...), + array: array, + } +} + +// NewStrArrayFromCopy creates and returns an array from a copy of given slice `array`. +// The parameter `safe` is used to specify whether using array in concurrent-safety, +// which is false in default. +func NewStrArrayFromCopy(array []string, safe ...bool) *StrArray { + newArray := make([]string, len(array)) + copy(newArray, array) + return &StrArray{ + mu: rwmutex.Create(safe...), + array: newArray, + } +} + +// At returns the value by the specified index. +// If the given `index` is out of range of the array, it returns an empty string. +func (a *StrArray) At(index int) (value string) { + value, _ = a.Get(index) + return +} + +// Get returns the value by the specified index. +// If the given `index` is out of range of the array, the `found` is false. +func (a *StrArray) Get(index int) (value string, found bool) { + a.mu.RLock() + defer a.mu.RUnlock() + if index < 0 || index >= len(a.array) { + return "", false + } + return a.array[index], true +} + +// Set sets value to specified index. +func (a *StrArray) Set(index int, value string) error { + a.mu.Lock() + defer a.mu.Unlock() + if index < 0 || index >= len(a.array) { + return gerror.NewCodef(gcode.CodeInvalidParameter, "index %d out of array range %d", index, len(a.array)) + } + a.array[index] = value + return nil +} + +// SetArray sets the underlying slice array with the given `array`. +func (a *StrArray) SetArray(array []string) *StrArray { + a.mu.Lock() + defer a.mu.Unlock() + a.array = array + return a +} + +// Replace replaces the array items by given `array` from the beginning of array. +func (a *StrArray) Replace(array []string) *StrArray { + a.mu.Lock() + defer a.mu.Unlock() + max := len(array) + if max > len(a.array) { + max = len(a.array) + } + for i := 0; i < max; i++ { + a.array[i] = array[i] + } + return a +} + +// Sum returns the sum of values in an array. +func (a *StrArray) Sum() (sum int) { + a.mu.RLock() + defer a.mu.RUnlock() + for _, v := range a.array { + sum += gconv.Int(v) + } + return +} + +// Sort sorts the array in increasing order. +// The parameter `reverse` controls whether sort +// in increasing order(default) or decreasing order +func (a *StrArray) Sort(reverse ...bool) *StrArray { + a.mu.Lock() + defer a.mu.Unlock() + if len(reverse) > 0 && reverse[0] { + sort.Slice(a.array, func(i, j int) bool { + return strings.Compare(a.array[i], a.array[j]) >= 0 + }) + } else { + sort.Strings(a.array) + } + return a +} + +// SortFunc sorts the array by custom function `less`. +func (a *StrArray) SortFunc(less func(v1, v2 string) bool) *StrArray { + a.mu.Lock() + defer a.mu.Unlock() + sort.Slice(a.array, func(i, j int) bool { + return less(a.array[i], a.array[j]) + }) + return a +} + +// InsertBefore inserts the `values` to the front of `index`. +func (a *StrArray) InsertBefore(index int, values ...string) error { + a.mu.Lock() + defer a.mu.Unlock() + if index < 0 || index >= len(a.array) { + return gerror.NewCodef(gcode.CodeInvalidParameter, "index %d out of array range %d", index, len(a.array)) + } + rear := append([]string{}, a.array[index:]...) + a.array = append(a.array[0:index], values...) + a.array = append(a.array, rear...) + return nil +} + +// InsertAfter inserts the `values` to the back of `index`. +func (a *StrArray) InsertAfter(index int, values ...string) error { + a.mu.Lock() + defer a.mu.Unlock() + if index < 0 || index >= len(a.array) { + return gerror.NewCodef(gcode.CodeInvalidParameter, "index %d out of array range %d", index, len(a.array)) + } + rear := append([]string{}, a.array[index+1:]...) + a.array = append(a.array[0:index+1], values...) + a.array = append(a.array, rear...) + return nil +} + +// Remove removes an item by index. +// If the given `index` is out of range of the array, the `found` is false. +func (a *StrArray) Remove(index int) (value string, found bool) { + a.mu.Lock() + defer a.mu.Unlock() + return a.doRemoveWithoutLock(index) +} + +// doRemoveWithoutLock removes an item by index without lock. +func (a *StrArray) doRemoveWithoutLock(index int) (value string, found bool) { + if index < 0 || index >= len(a.array) { + return "", false + } + // Determine array boundaries when deleting to improve deletion efficiency. + if index == 0 { + value := a.array[0] + a.array = a.array[1:] + return value, true + } else if index == len(a.array)-1 { + value := a.array[index] + a.array = a.array[:index] + return value, true + } + // If it is a non-boundary delete, + // it will involve the creation of an array, + // then the deletion is less efficient. + value = a.array[index] + a.array = append(a.array[:index], a.array[index+1:]...) + return value, true +} + +// RemoveValue removes an item by value. +// It returns true if value is found in the array, or else false if not found. +func (a *StrArray) RemoveValue(value string) bool { + if i := a.Search(value); i != -1 { + _, found := a.Remove(i) + return found + } + return false +} + +// PushLeft pushes one or multiple items to the beginning of array. +func (a *StrArray) PushLeft(value ...string) *StrArray { + a.mu.Lock() + a.array = append(value, a.array...) + a.mu.Unlock() + return a +} + +// PushRight pushes one or multiple items to the end of array. +// It equals to Append. +func (a *StrArray) PushRight(value ...string) *StrArray { + a.mu.Lock() + a.array = append(a.array, value...) + a.mu.Unlock() + return a +} + +// PopLeft pops and returns an item from the beginning of array. +// Note that if the array is empty, the `found` is false. +func (a *StrArray) PopLeft() (value string, found bool) { + a.mu.Lock() + defer a.mu.Unlock() + if len(a.array) == 0 { + return "", false + } + value = a.array[0] + a.array = a.array[1:] + return value, true +} + +// PopRight pops and returns an item from the end of array. +// Note that if the array is empty, the `found` is false. +func (a *StrArray) PopRight() (value string, found bool) { + a.mu.Lock() + defer a.mu.Unlock() + index := len(a.array) - 1 + if index < 0 { + return "", false + } + value = a.array[index] + a.array = a.array[:index] + return value, true +} + +// PopRand randomly pops and return an item out of array. +// Note that if the array is empty, the `found` is false. +func (a *StrArray) PopRand() (value string, found bool) { + a.mu.Lock() + defer a.mu.Unlock() + return a.doRemoveWithoutLock(grand.Intn(len(a.array))) +} + +// PopRands randomly pops and returns `size` items out of array. +// If the given `size` is greater than size of the array, it returns all elements of the array. +// Note that if given `size` <= 0 or the array is empty, it returns nil. +func (a *StrArray) PopRands(size int) []string { + a.mu.Lock() + defer a.mu.Unlock() + if size <= 0 || len(a.array) == 0 { + return nil + } + if size >= len(a.array) { + size = len(a.array) + } + array := make([]string, size) + for i := 0; i < size; i++ { + array[i], _ = a.doRemoveWithoutLock(grand.Intn(len(a.array))) + } + return array +} + +// PopLefts pops and returns `size` items from the beginning of array. +// If the given `size` is greater than size of the array, it returns all elements of the array. +// Note that if given `size` <= 0 or the array is empty, it returns nil. +func (a *StrArray) PopLefts(size int) []string { + a.mu.Lock() + defer a.mu.Unlock() + if size <= 0 || len(a.array) == 0 { + return nil + } + if size >= len(a.array) { + array := a.array + a.array = a.array[:0] + return array + } + value := a.array[0:size] + a.array = a.array[size:] + return value +} + +// PopRights pops and returns `size` items from the end of array. +// If the given `size` is greater than size of the array, it returns all elements of the array. +// Note that if given `size` <= 0 or the array is empty, it returns nil. +func (a *StrArray) PopRights(size int) []string { + a.mu.Lock() + defer a.mu.Unlock() + if size <= 0 || len(a.array) == 0 { + return nil + } + index := len(a.array) - size + if index <= 0 { + array := a.array + a.array = a.array[:0] + return array + } + value := a.array[index:] + a.array = a.array[:index] + return value +} + +// Range picks and returns items by range, like array[start:end]. +// Notice, if in concurrent-safe usage, it returns a copy of slice; +// else a pointer to the underlying data. +// +// If `end` is negative, then the offset will start from the end of array. +// If `end` is omitted, then the sequence will have everything from start up +// until the end of the array. +func (a *StrArray) Range(start int, end ...int) []string { + a.mu.RLock() + defer a.mu.RUnlock() + offsetEnd := len(a.array) + if len(end) > 0 && end[0] < offsetEnd { + offsetEnd = end[0] + } + if start > offsetEnd { + return nil + } + if start < 0 { + start = 0 + } + array := ([]string)(nil) + if a.mu.IsSafe() { + array = make([]string, offsetEnd-start) + copy(array, a.array[start:offsetEnd]) + } else { + array = a.array[start:offsetEnd] + } + return array +} + +// SubSlice returns a slice of elements from the array as specified +// by the `offset` and `size` parameters. +// If in concurrent safe usage, it returns a copy of the slice; else a pointer. +// +// If offset is non-negative, the sequence will start at that offset in the array. +// If offset is negative, the sequence will start that far from the end of the array. +// +// If length is given and is positive, then the sequence will have up to that many elements in it. +// If the array is shorter than the length, then only the available array elements will be present. +// If length is given and is negative then the sequence will stop that many elements from the end of the array. +// If it is omitted, then the sequence will have everything from offset up until the end of the array. +// +// Any possibility crossing the left border of array, it will fail. +func (a *StrArray) SubSlice(offset int, length ...int) []string { + a.mu.RLock() + defer a.mu.RUnlock() + size := len(a.array) + if len(length) > 0 { + size = length[0] + } + if offset > len(a.array) { + return nil + } + if offset < 0 { + offset = len(a.array) + offset + if offset < 0 { + return nil + } + } + if size < 0 { + offset += size + size = -size + if offset < 0 { + return nil + } + } + end := offset + size + if end > len(a.array) { + end = len(a.array) + size = len(a.array) - offset + } + if a.mu.IsSafe() { + s := make([]string, size) + copy(s, a.array[offset:]) + return s + } + return a.array[offset:end] +} + +// Append is alias of PushRight,please See PushRight. +func (a *StrArray) Append(value ...string) *StrArray { + a.mu.Lock() + a.array = append(a.array, value...) + a.mu.Unlock() + return a +} + +// Len returns the length of array. +func (a *StrArray) Len() int { + a.mu.RLock() + length := len(a.array) + a.mu.RUnlock() + return length +} + +// Slice returns the underlying data of array. +// Note that, if it's in concurrent-safe usage, it returns a copy of underlying data, +// or else a pointer to the underlying data. +func (a *StrArray) Slice() []string { + array := ([]string)(nil) + if a.mu.IsSafe() { + a.mu.RLock() + defer a.mu.RUnlock() + array = make([]string, len(a.array)) + copy(array, a.array) + } else { + array = a.array + } + return array +} + +// Interfaces returns current array as []interface{}. +func (a *StrArray) Interfaces() []interface{} { + a.mu.RLock() + defer a.mu.RUnlock() + array := make([]interface{}, len(a.array)) + for k, v := range a.array { + array[k] = v + } + return array +} + +// Clone returns a new array, which is a copy of current array. +func (a *StrArray) Clone() (newArray *StrArray) { + a.mu.RLock() + array := make([]string, len(a.array)) + copy(array, a.array) + a.mu.RUnlock() + return NewStrArrayFrom(array, a.mu.IsSafe()) +} + +// Clear deletes all items of current array. +func (a *StrArray) Clear() *StrArray { + a.mu.Lock() + if len(a.array) > 0 { + a.array = make([]string, 0) + } + a.mu.Unlock() + return a +} + +// Contains checks whether a value exists in the array. +func (a *StrArray) Contains(value string) bool { + return a.Search(value) != -1 +} + +// ContainsI checks whether a value exists in the array with case-insensitively. +// Note that it internally iterates the whole array to do the comparison with case-insensitively. +func (a *StrArray) ContainsI(value string) bool { + a.mu.RLock() + defer a.mu.RUnlock() + if len(a.array) == 0 { + return false + } + for _, v := range a.array { + if strings.EqualFold(v, value) { + return true + } + } + return false +} + +// Search searches array by `value`, returns the index of `value`, +// or returns -1 if not exists. +func (a *StrArray) Search(value string) int { + a.mu.RLock() + defer a.mu.RUnlock() + if len(a.array) == 0 { + return -1 + } + result := -1 + for index, v := range a.array { + if strings.Compare(v, value) == 0 { + result = index + break + } + } + return result +} + +// Unique uniques the array, clear repeated items. +// Example: [1,1,2,3,2] -> [1,2,3] +func (a *StrArray) Unique() *StrArray { + a.mu.Lock() + defer a.mu.Unlock() + if len(a.array) == 0 { + return a + } + var ( + ok bool + temp string + uniqueSet = make(map[string]struct{}) + uniqueArray = make([]string, 0, len(a.array)) + ) + for i := 0; i < len(a.array); i++ { + temp = a.array[i] + if _, ok = uniqueSet[temp]; ok { + continue + } + uniqueSet[temp] = struct{}{} + uniqueArray = append(uniqueArray, temp) + } + a.array = uniqueArray + return a +} + +// LockFunc locks writing by callback function `f`. +func (a *StrArray) LockFunc(f func(array []string)) *StrArray { + a.mu.Lock() + defer a.mu.Unlock() + f(a.array) + return a +} + +// RLockFunc locks reading by callback function `f`. +func (a *StrArray) RLockFunc(f func(array []string)) *StrArray { + a.mu.RLock() + defer a.mu.RUnlock() + f(a.array) + return a +} + +// Merge merges `array` into current array. +// The parameter `array` can be any garray or slice type. +// The difference between Merge and Append is Append supports only specified slice type, +// but Merge supports more parameter types. +func (a *StrArray) Merge(array interface{}) *StrArray { + return a.Append(gconv.Strings(array)...) +} + +// Fill fills an array with num entries of the value `value`, +// keys starting at the `startIndex` parameter. +func (a *StrArray) Fill(startIndex int, num int, value string) error { + a.mu.Lock() + defer a.mu.Unlock() + if startIndex < 0 || startIndex > len(a.array) { + return gerror.NewCodef(gcode.CodeInvalidParameter, "index %d out of array range %d", startIndex, len(a.array)) + } + for i := startIndex; i < startIndex+num; i++ { + if i > len(a.array)-1 { + a.array = append(a.array, value) + } else { + a.array[i] = value + } + } + return nil +} + +// Chunk splits an array into multiple arrays, +// the size of each array is determined by `size`. +// The last chunk may contain less than size elements. +func (a *StrArray) Chunk(size int) [][]string { + if size < 1 { + return nil + } + a.mu.RLock() + defer a.mu.RUnlock() + length := len(a.array) + chunks := int(math.Ceil(float64(length) / float64(size))) + var n [][]string + for i, end := 0, 0; chunks > 0; chunks-- { + end = (i + 1) * size + if end > length { + end = length + } + n = append(n, a.array[i*size:end]) + i++ + } + return n +} + +// Pad pads array to the specified length with `value`. +// If size is positive then the array is padded on the right, or negative on the left. +// If the absolute value of `size` is less than or equal to the length of the array +// then no padding takes place. +func (a *StrArray) Pad(size int, value string) *StrArray { + a.mu.Lock() + defer a.mu.Unlock() + if size == 0 || (size > 0 && size < len(a.array)) || (size < 0 && size > -len(a.array)) { + return a + } + n := size + if size < 0 { + n = -size + } + n -= len(a.array) + tmp := make([]string, n) + for i := 0; i < n; i++ { + tmp[i] = value + } + if size > 0 { + a.array = append(a.array, tmp...) + } else { + a.array = append(tmp, a.array...) + } + return a +} + +// Rand randomly returns one item from array(no deleting). +func (a *StrArray) Rand() (value string, found bool) { + a.mu.RLock() + defer a.mu.RUnlock() + if len(a.array) == 0 { + return "", false + } + return a.array[grand.Intn(len(a.array))], true +} + +// Rands randomly returns `size` items from array(no deleting). +func (a *StrArray) Rands(size int) []string { + a.mu.RLock() + defer a.mu.RUnlock() + if size <= 0 || len(a.array) == 0 { + return nil + } + array := make([]string, size) + for i := 0; i < size; i++ { + array[i] = a.array[grand.Intn(len(a.array))] + } + return array +} + +// Shuffle randomly shuffles the array. +func (a *StrArray) Shuffle() *StrArray { + a.mu.Lock() + defer a.mu.Unlock() + for i, v := range grand.Perm(len(a.array)) { + a.array[i], a.array[v] = a.array[v], a.array[i] + } + return a +} + +// Reverse makes array with elements in reverse order. +func (a *StrArray) Reverse() *StrArray { + a.mu.Lock() + defer a.mu.Unlock() + for i, j := 0, len(a.array)-1; i < j; i, j = i+1, j-1 { + a.array[i], a.array[j] = a.array[j], a.array[i] + } + return a +} + +// Join joins array elements with a string `glue`. +func (a *StrArray) Join(glue string) string { + a.mu.RLock() + defer a.mu.RUnlock() + if len(a.array) == 0 { + return "" + } + buffer := bytes.NewBuffer(nil) + for k, v := range a.array { + buffer.WriteString(v) + if k != len(a.array)-1 { + buffer.WriteString(glue) + } + } + return buffer.String() +} + +// CountValues counts the number of occurrences of all values in the array. +func (a *StrArray) CountValues() map[string]int { + m := make(map[string]int) + a.mu.RLock() + defer a.mu.RUnlock() + for _, v := range a.array { + m[v]++ + } + return m +} + +// Iterator is alias of IteratorAsc. +func (a *StrArray) Iterator(f func(k int, v string) bool) { + a.IteratorAsc(f) +} + +// IteratorAsc iterates the array readonly in ascending order with given callback function `f`. +// If `f` returns true, then it continues iterating; or false to stop. +func (a *StrArray) IteratorAsc(f func(k int, v string) bool) { + a.mu.RLock() + defer a.mu.RUnlock() + for k, v := range a.array { + if !f(k, v) { + break + } + } +} + +// IteratorDesc iterates the array readonly in descending order with given callback function `f`. +// If `f` returns true, then it continues iterating; or false to stop. +func (a *StrArray) IteratorDesc(f func(k int, v string) bool) { + a.mu.RLock() + defer a.mu.RUnlock() + for i := len(a.array) - 1; i >= 0; i-- { + if !f(i, a.array[i]) { + break + } + } +} + +// String returns current array as a string, which implements like json.Marshal does. +func (a *StrArray) String() string { + if a == nil { + return "" + } + a.mu.RLock() + defer a.mu.RUnlock() + buffer := bytes.NewBuffer(nil) + buffer.WriteByte('[') + for k, v := range a.array { + buffer.WriteString(`"` + gstr.QuoteMeta(v, `"\`) + `"`) + if k != len(a.array)-1 { + buffer.WriteByte(',') + } + } + buffer.WriteByte(']') + return buffer.String() +} + +// MarshalJSON implements the interface MarshalJSON for json.Marshal. +// Note that do not use pointer as its receiver here. +func (a StrArray) MarshalJSON() ([]byte, error) { + a.mu.RLock() + defer a.mu.RUnlock() + return json.Marshal(a.array) +} + +// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. +func (a *StrArray) UnmarshalJSON(b []byte) error { + if a.array == nil { + a.array = make([]string, 0) + } + a.mu.Lock() + defer a.mu.Unlock() + if err := json.UnmarshalUseNumber(b, &a.array); err != nil { + return err + } + return nil +} + +// UnmarshalValue is an interface implement which sets any type of value for array. +func (a *StrArray) UnmarshalValue(value interface{}) error { + a.mu.Lock() + defer a.mu.Unlock() + switch value.(type) { + case string, []byte: + return json.UnmarshalUseNumber(gconv.Bytes(value), &a.array) + default: + a.array = gconv.SliceStr(value) + } + return nil +} + +// FilterEmpty removes all empty string value of the array. +func (a *StrArray) FilterEmpty() *StrArray { + a.mu.Lock() + defer a.mu.Unlock() + for i := 0; i < len(a.array); { + if a.array[i] == "" { + a.array = append(a.array[:i], a.array[i+1:]...) + } else { + i++ + } + } + return a +} + +// Walk applies a user supplied function `f` to every item of array. +func (a *StrArray) Walk(f func(value string) string) *StrArray { + a.mu.Lock() + defer a.mu.Unlock() + for i, v := range a.array { + a.array[i] = f(v) + } + return a +} + +// IsEmpty checks whether the array is empty. +func (a *StrArray) IsEmpty() bool { + return a.Len() == 0 +} + +// DeepCopy implements interface for deep copy of current type. +func (a *StrArray) DeepCopy() interface{} { + if a == nil { + return nil + } + a.mu.RLock() + defer a.mu.RUnlock() + newSlice := make([]string, len(a.array)) + copy(newSlice, a.array) + return NewStrArrayFrom(newSlice, a.mu.IsSafe()) +} diff --git a/vendor/github.com/gogf/gf/v2/container/garray/garray_sorted_any.go b/vendor/github.com/gogf/gf/v2/container/garray/garray_sorted_any.go new file mode 100644 index 00000000..494c07e3 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/garray/garray_sorted_any.go @@ -0,0 +1,815 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package garray + +import ( + "bytes" + "fmt" + "math" + "sort" + + "github.com/gogf/gf/v2/internal/deepcopy" + "github.com/gogf/gf/v2/internal/empty" + "github.com/gogf/gf/v2/internal/json" + "github.com/gogf/gf/v2/internal/rwmutex" + "github.com/gogf/gf/v2/text/gstr" + "github.com/gogf/gf/v2/util/gconv" + "github.com/gogf/gf/v2/util/grand" + "github.com/gogf/gf/v2/util/gutil" +) + +// SortedArray is a golang sorted array with rich features. +// It is using increasing order in default, which can be changed by +// setting it a custom comparator. +// It contains a concurrent-safe/unsafe switch, which should be set +// when its initialization and cannot be changed then. +type SortedArray struct { + mu rwmutex.RWMutex + array []interface{} + unique bool // Whether enable unique feature(false) + comparator func(a, b interface{}) int // Comparison function(it returns -1: a < b; 0: a == b; 1: a > b) +} + +// NewSortedArray creates and returns an empty sorted array. +// The parameter `safe` is used to specify whether using array in concurrent-safety, which is false in default. +// The parameter `comparator` used to compare values to sort in array, +// if it returns value < 0, means `a` < `b`; the `a` will be inserted before `b`; +// if it returns value = 0, means `a` = `b`; the `a` will be replaced by `b`; +// if it returns value > 0, means `a` > `b`; the `a` will be inserted after `b`; +func NewSortedArray(comparator func(a, b interface{}) int, safe ...bool) *SortedArray { + return NewSortedArraySize(0, comparator, safe...) +} + +// NewSortedArraySize create and returns an sorted array with given size and cap. +// The parameter `safe` is used to specify whether using array in concurrent-safety, +// which is false in default. +func NewSortedArraySize(cap int, comparator func(a, b interface{}) int, safe ...bool) *SortedArray { + return &SortedArray{ + mu: rwmutex.Create(safe...), + array: make([]interface{}, 0, cap), + comparator: comparator, + } +} + +// NewSortedArrayRange creates and returns an array by a range from `start` to `end` +// with step value `step`. +func NewSortedArrayRange(start, end, step int, comparator func(a, b interface{}) int, safe ...bool) *SortedArray { + if step == 0 { + panic(fmt.Sprintf(`invalid step value: %d`, step)) + } + slice := make([]interface{}, 0) + index := 0 + for i := start; i <= end; i += step { + slice = append(slice, i) + index++ + } + return NewSortedArrayFrom(slice, comparator, safe...) +} + +// NewSortedArrayFrom creates and returns an sorted array with given slice `array`. +// The parameter `safe` is used to specify whether using array in concurrent-safety, +// which is false in default. +func NewSortedArrayFrom(array []interface{}, comparator func(a, b interface{}) int, safe ...bool) *SortedArray { + a := NewSortedArraySize(0, comparator, safe...) + a.array = array + sort.Slice(a.array, func(i, j int) bool { + return a.getComparator()(a.array[i], a.array[j]) < 0 + }) + return a +} + +// NewSortedArrayFromCopy creates and returns an sorted array from a copy of given slice `array`. +// The parameter `safe` is used to specify whether using array in concurrent-safety, +// which is false in default. +func NewSortedArrayFromCopy(array []interface{}, comparator func(a, b interface{}) int, safe ...bool) *SortedArray { + newArray := make([]interface{}, len(array)) + copy(newArray, array) + return NewSortedArrayFrom(newArray, comparator, safe...) +} + +// At returns the value by the specified index. +// If the given `index` is out of range of the array, it returns `nil`. +func (a *SortedArray) At(index int) (value interface{}) { + value, _ = a.Get(index) + return +} + +// SetArray sets the underlying slice array with the given `array`. +func (a *SortedArray) SetArray(array []interface{}) *SortedArray { + a.mu.Lock() + defer a.mu.Unlock() + a.array = array + sort.Slice(a.array, func(i, j int) bool { + return a.getComparator()(a.array[i], a.array[j]) < 0 + }) + return a +} + +// SetComparator sets/changes the comparator for sorting. +// It resorts the array as the comparator is changed. +func (a *SortedArray) SetComparator(comparator func(a, b interface{}) int) { + a.mu.Lock() + defer a.mu.Unlock() + a.comparator = comparator + sort.Slice(a.array, func(i, j int) bool { + return a.getComparator()(a.array[i], a.array[j]) < 0 + }) +} + +// Sort sorts the array in increasing order. +// The parameter `reverse` controls whether sort +// in increasing order(default) or decreasing order +func (a *SortedArray) Sort() *SortedArray { + a.mu.Lock() + defer a.mu.Unlock() + sort.Slice(a.array, func(i, j int) bool { + return a.getComparator()(a.array[i], a.array[j]) < 0 + }) + return a +} + +// Add adds one or multiple values to sorted array, the array always keeps sorted. +// It's alias of function Append, see Append. +func (a *SortedArray) Add(values ...interface{}) *SortedArray { + return a.Append(values...) +} + +// Append adds one or multiple values to sorted array, the array always keeps sorted. +func (a *SortedArray) Append(values ...interface{}) *SortedArray { + if len(values) == 0 { + return a + } + a.mu.Lock() + defer a.mu.Unlock() + for _, value := range values { + index, cmp := a.binSearch(value, false) + if a.unique && cmp == 0 { + continue + } + if index < 0 { + a.array = append(a.array, value) + continue + } + if cmp > 0 { + index++ + } + a.array = append(a.array[:index], append([]interface{}{value}, a.array[index:]...)...) + } + return a +} + +// Get returns the value by the specified index. +// If the given `index` is out of range of the array, the `found` is false. +func (a *SortedArray) Get(index int) (value interface{}, found bool) { + a.mu.RLock() + defer a.mu.RUnlock() + if index < 0 || index >= len(a.array) { + return nil, false + } + return a.array[index], true +} + +// Remove removes an item by index. +// If the given `index` is out of range of the array, the `found` is false. +func (a *SortedArray) Remove(index int) (value interface{}, found bool) { + a.mu.Lock() + defer a.mu.Unlock() + return a.doRemoveWithoutLock(index) +} + +// doRemoveWithoutLock removes an item by index without lock. +func (a *SortedArray) doRemoveWithoutLock(index int) (value interface{}, found bool) { + if index < 0 || index >= len(a.array) { + return nil, false + } + // Determine array boundaries when deleting to improve deletion efficiency. + if index == 0 { + value := a.array[0] + a.array = a.array[1:] + return value, true + } else if index == len(a.array)-1 { + value := a.array[index] + a.array = a.array[:index] + return value, true + } + // If it is a non-boundary delete, + // it will involve the creation of an array, + // then the deletion is less efficient. + value = a.array[index] + a.array = append(a.array[:index], a.array[index+1:]...) + return value, true +} + +// RemoveValue removes an item by value. +// It returns true if value is found in the array, or else false if not found. +func (a *SortedArray) RemoveValue(value interface{}) bool { + a.mu.Lock() + defer a.mu.Unlock() + if i, r := a.binSearch(value, false); r == 0 { + _, res := a.doRemoveWithoutLock(i) + return res + } + return false +} + +// PopLeft pops and returns an item from the beginning of array. +// Note that if the array is empty, the `found` is false. +func (a *SortedArray) PopLeft() (value interface{}, found bool) { + a.mu.Lock() + defer a.mu.Unlock() + if len(a.array) == 0 { + return nil, false + } + value = a.array[0] + a.array = a.array[1:] + return value, true +} + +// PopRight pops and returns an item from the end of array. +// Note that if the array is empty, the `found` is false. +func (a *SortedArray) PopRight() (value interface{}, found bool) { + a.mu.Lock() + defer a.mu.Unlock() + index := len(a.array) - 1 + if index < 0 { + return nil, false + } + value = a.array[index] + a.array = a.array[:index] + return value, true +} + +// PopRand randomly pops and return an item out of array. +// Note that if the array is empty, the `found` is false. +func (a *SortedArray) PopRand() (value interface{}, found bool) { + a.mu.Lock() + defer a.mu.Unlock() + return a.doRemoveWithoutLock(grand.Intn(len(a.array))) +} + +// PopRands randomly pops and returns `size` items out of array. +func (a *SortedArray) PopRands(size int) []interface{} { + a.mu.Lock() + defer a.mu.Unlock() + if size <= 0 || len(a.array) == 0 { + return nil + } + if size >= len(a.array) { + size = len(a.array) + } + array := make([]interface{}, size) + for i := 0; i < size; i++ { + array[i], _ = a.doRemoveWithoutLock(grand.Intn(len(a.array))) + } + return array +} + +// PopLefts pops and returns `size` items from the beginning of array. +func (a *SortedArray) PopLefts(size int) []interface{} { + a.mu.Lock() + defer a.mu.Unlock() + if size <= 0 || len(a.array) == 0 { + return nil + } + if size >= len(a.array) { + array := a.array + a.array = a.array[:0] + return array + } + value := a.array[0:size] + a.array = a.array[size:] + return value +} + +// PopRights pops and returns `size` items from the end of array. +func (a *SortedArray) PopRights(size int) []interface{} { + a.mu.Lock() + defer a.mu.Unlock() + if size <= 0 || len(a.array) == 0 { + return nil + } + index := len(a.array) - size + if index <= 0 { + array := a.array + a.array = a.array[:0] + return array + } + value := a.array[index:] + a.array = a.array[:index] + return value +} + +// Range picks and returns items by range, like array[start:end]. +// Notice, if in concurrent-safe usage, it returns a copy of slice; +// else a pointer to the underlying data. +// +// If `end` is negative, then the offset will start from the end of array. +// If `end` is omitted, then the sequence will have everything from start up +// until the end of the array. +func (a *SortedArray) Range(start int, end ...int) []interface{} { + a.mu.RLock() + defer a.mu.RUnlock() + offsetEnd := len(a.array) + if len(end) > 0 && end[0] < offsetEnd { + offsetEnd = end[0] + } + if start > offsetEnd { + return nil + } + if start < 0 { + start = 0 + } + array := ([]interface{})(nil) + if a.mu.IsSafe() { + array = make([]interface{}, offsetEnd-start) + copy(array, a.array[start:offsetEnd]) + } else { + array = a.array[start:offsetEnd] + } + return array +} + +// SubSlice returns a slice of elements from the array as specified +// by the `offset` and `size` parameters. +// If in concurrent safe usage, it returns a copy of the slice; else a pointer. +// +// If offset is non-negative, the sequence will start at that offset in the array. +// If offset is negative, the sequence will start that far from the end of the array. +// +// If length is given and is positive, then the sequence will have up to that many elements in it. +// If the array is shorter than the length, then only the available array elements will be present. +// If length is given and is negative then the sequence will stop that many elements from the end of the array. +// If it is omitted, then the sequence will have everything from offset up until the end of the array. +// +// Any possibility crossing the left border of array, it will fail. +func (a *SortedArray) SubSlice(offset int, length ...int) []interface{} { + a.mu.RLock() + defer a.mu.RUnlock() + size := len(a.array) + if len(length) > 0 { + size = length[0] + } + if offset > len(a.array) { + return nil + } + if offset < 0 { + offset = len(a.array) + offset + if offset < 0 { + return nil + } + } + if size < 0 { + offset += size + size = -size + if offset < 0 { + return nil + } + } + end := offset + size + if end > len(a.array) { + end = len(a.array) + size = len(a.array) - offset + } + if a.mu.IsSafe() { + s := make([]interface{}, size) + copy(s, a.array[offset:]) + return s + } else { + return a.array[offset:end] + } +} + +// Sum returns the sum of values in an array. +func (a *SortedArray) Sum() (sum int) { + a.mu.RLock() + defer a.mu.RUnlock() + for _, v := range a.array { + sum += gconv.Int(v) + } + return +} + +// Len returns the length of array. +func (a *SortedArray) Len() int { + a.mu.RLock() + length := len(a.array) + a.mu.RUnlock() + return length +} + +// Slice returns the underlying data of array. +// Note that, if it's in concurrent-safe usage, it returns a copy of underlying data, +// or else a pointer to the underlying data. +func (a *SortedArray) Slice() []interface{} { + var array []interface{} + if a.mu.IsSafe() { + a.mu.RLock() + defer a.mu.RUnlock() + array = make([]interface{}, len(a.array)) + copy(array, a.array) + } else { + array = a.array + } + return array +} + +// Interfaces returns current array as []interface{}. +func (a *SortedArray) Interfaces() []interface{} { + return a.Slice() +} + +// Contains checks whether a value exists in the array. +func (a *SortedArray) Contains(value interface{}) bool { + return a.Search(value) != -1 +} + +// Search searches array by `value`, returns the index of `value`, +// or returns -1 if not exists. +func (a *SortedArray) Search(value interface{}) (index int) { + if i, r := a.binSearch(value, true); r == 0 { + return i + } + return -1 +} + +// Binary search. +// It returns the last compared index and the result. +// If `result` equals to 0, it means the value at `index` is equals to `value`. +// If `result` lesser than 0, it means the value at `index` is lesser than `value`. +// If `result` greater than 0, it means the value at `index` is greater than `value`. +func (a *SortedArray) binSearch(value interface{}, lock bool) (index int, result int) { + if lock { + a.mu.RLock() + defer a.mu.RUnlock() + } + if len(a.array) == 0 { + return -1, -2 + } + min := 0 + max := len(a.array) - 1 + mid := 0 + cmp := -2 + for min <= max { + mid = min + (max-min)/2 + cmp = a.getComparator()(value, a.array[mid]) + switch { + case cmp < 0: + max = mid - 1 + case cmp > 0: + min = mid + 1 + default: + return mid, cmp + } + } + return mid, cmp +} + +// SetUnique sets unique mark to the array, +// which means it does not contain any repeated items. +// It also do unique check, remove all repeated items. +func (a *SortedArray) SetUnique(unique bool) *SortedArray { + oldUnique := a.unique + a.unique = unique + if unique && oldUnique != unique { + a.Unique() + } + return a +} + +// Unique uniques the array, clear repeated items. +func (a *SortedArray) Unique() *SortedArray { + a.mu.Lock() + defer a.mu.Unlock() + if len(a.array) == 0 { + return a + } + i := 0 + for { + if i == len(a.array)-1 { + break + } + if a.getComparator()(a.array[i], a.array[i+1]) == 0 { + a.array = append(a.array[:i+1], a.array[i+1+1:]...) + } else { + i++ + } + } + return a +} + +// Clone returns a new array, which is a copy of current array. +func (a *SortedArray) Clone() (newArray *SortedArray) { + a.mu.RLock() + array := make([]interface{}, len(a.array)) + copy(array, a.array) + a.mu.RUnlock() + return NewSortedArrayFrom(array, a.comparator, a.mu.IsSafe()) +} + +// Clear deletes all items of current array. +func (a *SortedArray) Clear() *SortedArray { + a.mu.Lock() + if len(a.array) > 0 { + a.array = make([]interface{}, 0) + } + a.mu.Unlock() + return a +} + +// LockFunc locks writing by callback function `f`. +func (a *SortedArray) LockFunc(f func(array []interface{})) *SortedArray { + a.mu.Lock() + defer a.mu.Unlock() + + // Keep the array always sorted. + defer sort.Slice(a.array, func(i, j int) bool { + return a.getComparator()(a.array[i], a.array[j]) < 0 + }) + + f(a.array) + return a +} + +// RLockFunc locks reading by callback function `f`. +func (a *SortedArray) RLockFunc(f func(array []interface{})) *SortedArray { + a.mu.RLock() + defer a.mu.RUnlock() + f(a.array) + return a +} + +// Merge merges `array` into current array. +// The parameter `array` can be any garray or slice type. +// The difference between Merge and Append is Append supports only specified slice type, +// but Merge supports more parameter types. +func (a *SortedArray) Merge(array interface{}) *SortedArray { + return a.Add(gconv.Interfaces(array)...) +} + +// Chunk splits an array into multiple arrays, +// the size of each array is determined by `size`. +// The last chunk may contain less than size elements. +func (a *SortedArray) Chunk(size int) [][]interface{} { + if size < 1 { + return nil + } + a.mu.RLock() + defer a.mu.RUnlock() + length := len(a.array) + chunks := int(math.Ceil(float64(length) / float64(size))) + var n [][]interface{} + for i, end := 0, 0; chunks > 0; chunks-- { + end = (i + 1) * size + if end > length { + end = length + } + n = append(n, a.array[i*size:end]) + i++ + } + return n +} + +// Rand randomly returns one item from array(no deleting). +func (a *SortedArray) Rand() (value interface{}, found bool) { + a.mu.RLock() + defer a.mu.RUnlock() + if len(a.array) == 0 { + return nil, false + } + return a.array[grand.Intn(len(a.array))], true +} + +// Rands randomly returns `size` items from array(no deleting). +func (a *SortedArray) Rands(size int) []interface{} { + a.mu.RLock() + defer a.mu.RUnlock() + if size <= 0 || len(a.array) == 0 { + return nil + } + array := make([]interface{}, size) + for i := 0; i < size; i++ { + array[i] = a.array[grand.Intn(len(a.array))] + } + return array +} + +// Join joins array elements with a string `glue`. +func (a *SortedArray) Join(glue string) string { + a.mu.RLock() + defer a.mu.RUnlock() + if len(a.array) == 0 { + return "" + } + buffer := bytes.NewBuffer(nil) + for k, v := range a.array { + buffer.WriteString(gconv.String(v)) + if k != len(a.array)-1 { + buffer.WriteString(glue) + } + } + return buffer.String() +} + +// CountValues counts the number of occurrences of all values in the array. +func (a *SortedArray) CountValues() map[interface{}]int { + m := make(map[interface{}]int) + a.mu.RLock() + defer a.mu.RUnlock() + for _, v := range a.array { + m[v]++ + } + return m +} + +// Iterator is alias of IteratorAsc. +func (a *SortedArray) Iterator(f func(k int, v interface{}) bool) { + a.IteratorAsc(f) +} + +// IteratorAsc iterates the array readonly in ascending order with given callback function `f`. +// If `f` returns true, then it continues iterating; or false to stop. +func (a *SortedArray) IteratorAsc(f func(k int, v interface{}) bool) { + a.mu.RLock() + defer a.mu.RUnlock() + for k, v := range a.array { + if !f(k, v) { + break + } + } +} + +// IteratorDesc iterates the array readonly in descending order with given callback function `f`. +// If `f` returns true, then it continues iterating; or false to stop. +func (a *SortedArray) IteratorDesc(f func(k int, v interface{}) bool) { + a.mu.RLock() + defer a.mu.RUnlock() + for i := len(a.array) - 1; i >= 0; i-- { + if !f(i, a.array[i]) { + break + } + } +} + +// String returns current array as a string, which implements like json.Marshal does. +func (a *SortedArray) String() string { + if a == nil { + return "" + } + a.mu.RLock() + defer a.mu.RUnlock() + buffer := bytes.NewBuffer(nil) + buffer.WriteByte('[') + s := "" + for k, v := range a.array { + s = gconv.String(v) + if gstr.IsNumeric(s) { + buffer.WriteString(s) + } else { + buffer.WriteString(`"` + gstr.QuoteMeta(s, `"\`) + `"`) + } + if k != len(a.array)-1 { + buffer.WriteByte(',') + } + } + buffer.WriteByte(']') + return buffer.String() +} + +// MarshalJSON implements the interface MarshalJSON for json.Marshal. +// Note that do not use pointer as its receiver here. +func (a SortedArray) MarshalJSON() ([]byte, error) { + a.mu.RLock() + defer a.mu.RUnlock() + return json.Marshal(a.array) +} + +// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. +// Note that the comparator is set as string comparator in default. +func (a *SortedArray) UnmarshalJSON(b []byte) error { + if a.comparator == nil { + a.array = make([]interface{}, 0) + a.comparator = gutil.ComparatorString + } + a.mu.Lock() + defer a.mu.Unlock() + if err := json.UnmarshalUseNumber(b, &a.array); err != nil { + return err + } + if a.comparator != nil && a.array != nil { + sort.Slice(a.array, func(i, j int) bool { + return a.comparator(a.array[i], a.array[j]) < 0 + }) + } + return nil +} + +// UnmarshalValue is an interface implement which sets any type of value for array. +// Note that the comparator is set as string comparator in default. +func (a *SortedArray) UnmarshalValue(value interface{}) (err error) { + if a.comparator == nil { + a.comparator = gutil.ComparatorString + } + a.mu.Lock() + defer a.mu.Unlock() + switch value.(type) { + case string, []byte: + err = json.UnmarshalUseNumber(gconv.Bytes(value), &a.array) + default: + a.array = gconv.SliceAny(value) + } + if a.comparator != nil && a.array != nil { + sort.Slice(a.array, func(i, j int) bool { + return a.comparator(a.array[i], a.array[j]) < 0 + }) + } + return err +} + +// FilterNil removes all nil value of the array. +func (a *SortedArray) FilterNil() *SortedArray { + a.mu.Lock() + defer a.mu.Unlock() + for i := 0; i < len(a.array); { + if empty.IsNil(a.array[i]) { + a.array = append(a.array[:i], a.array[i+1:]...) + } else { + break + } + } + for i := len(a.array) - 1; i >= 0; { + if empty.IsNil(a.array[i]) { + a.array = append(a.array[:i], a.array[i+1:]...) + } else { + break + } + } + return a +} + +// FilterEmpty removes all empty value of the array. +// Values like: 0, nil, false, "", len(slice/map/chan) == 0 are considered empty. +func (a *SortedArray) FilterEmpty() *SortedArray { + a.mu.Lock() + defer a.mu.Unlock() + for i := 0; i < len(a.array); { + if empty.IsEmpty(a.array[i]) { + a.array = append(a.array[:i], a.array[i+1:]...) + } else { + break + } + } + for i := len(a.array) - 1; i >= 0; { + if empty.IsEmpty(a.array[i]) { + a.array = append(a.array[:i], a.array[i+1:]...) + } else { + break + } + } + return a +} + +// Walk applies a user supplied function `f` to every item of array. +func (a *SortedArray) Walk(f func(value interface{}) interface{}) *SortedArray { + a.mu.Lock() + defer a.mu.Unlock() + // Keep the array always sorted. + defer sort.Slice(a.array, func(i, j int) bool { + return a.getComparator()(a.array[i], a.array[j]) < 0 + }) + for i, v := range a.array { + a.array[i] = f(v) + } + return a +} + +// IsEmpty checks whether the array is empty. +func (a *SortedArray) IsEmpty() bool { + return a.Len() == 0 +} + +// getComparator returns the comparator if it's previously set, +// or else it panics. +func (a *SortedArray) getComparator() func(a, b interface{}) int { + if a.comparator == nil { + panic("comparator is missing for sorted array") + } + return a.comparator +} + +// DeepCopy implements interface for deep copy of current type. +func (a *SortedArray) DeepCopy() interface{} { + if a == nil { + return nil + } + a.mu.RLock() + defer a.mu.RUnlock() + newSlice := make([]interface{}, len(a.array)) + for i, v := range a.array { + newSlice[i] = deepcopy.Copy(v) + } + return NewSortedArrayFrom(newSlice, a.comparator, a.mu.IsSafe()) +} diff --git a/vendor/github.com/gogf/gf/v2/container/garray/garray_sorted_int.go b/vendor/github.com/gogf/gf/v2/container/garray/garray_sorted_int.go new file mode 100644 index 00000000..aeee26ba --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/garray/garray_sorted_int.go @@ -0,0 +1,760 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package garray + +import ( + "bytes" + "fmt" + "math" + "sort" + + "github.com/gogf/gf/v2/internal/json" + "github.com/gogf/gf/v2/internal/rwmutex" + "github.com/gogf/gf/v2/util/gconv" + "github.com/gogf/gf/v2/util/grand" +) + +// SortedIntArray is a golang sorted int array with rich features. +// It is using increasing order in default, which can be changed by +// setting it a custom comparator. +// It contains a concurrent-safe/unsafe switch, which should be set +// when its initialization and cannot be changed then. +type SortedIntArray struct { + mu rwmutex.RWMutex + array []int + unique bool // Whether enable unique feature(false) + comparator func(a, b int) int // Comparison function(it returns -1: a < b; 0: a == b; 1: a > b) +} + +// NewSortedIntArray creates and returns an empty sorted array. +// The parameter `safe` is used to specify whether using array in concurrent-safety, +// which is false in default. +func NewSortedIntArray(safe ...bool) *SortedIntArray { + return NewSortedIntArraySize(0, safe...) +} + +// NewSortedIntArrayComparator creates and returns an empty sorted array with specified comparator. +// The parameter `safe` is used to specify whether using array in concurrent-safety which is false in default. +func NewSortedIntArrayComparator(comparator func(a, b int) int, safe ...bool) *SortedIntArray { + array := NewSortedIntArray(safe...) + array.comparator = comparator + return array +} + +// NewSortedIntArraySize create and returns an sorted array with given size and cap. +// The parameter `safe` is used to specify whether using array in concurrent-safety, +// which is false in default. +func NewSortedIntArraySize(cap int, safe ...bool) *SortedIntArray { + return &SortedIntArray{ + mu: rwmutex.Create(safe...), + array: make([]int, 0, cap), + comparator: defaultComparatorInt, + } +} + +// NewSortedIntArrayRange creates and returns an array by a range from `start` to `end` +// with step value `step`. +func NewSortedIntArrayRange(start, end, step int, safe ...bool) *SortedIntArray { + if step == 0 { + panic(fmt.Sprintf(`invalid step value: %d`, step)) + } + slice := make([]int, 0) + index := 0 + for i := start; i <= end; i += step { + slice = append(slice, i) + index++ + } + return NewSortedIntArrayFrom(slice, safe...) +} + +// NewSortedIntArrayFrom creates and returns an sorted array with given slice `array`. +// The parameter `safe` is used to specify whether using array in concurrent-safety, +// which is false in default. +func NewSortedIntArrayFrom(array []int, safe ...bool) *SortedIntArray { + a := NewSortedIntArraySize(0, safe...) + a.array = array + sort.Ints(a.array) + return a +} + +// NewSortedIntArrayFromCopy creates and returns an sorted array from a copy of given slice `array`. +// The parameter `safe` is used to specify whether using array in concurrent-safety, +// which is false in default. +func NewSortedIntArrayFromCopy(array []int, safe ...bool) *SortedIntArray { + newArray := make([]int, len(array)) + copy(newArray, array) + return NewSortedIntArrayFrom(newArray, safe...) +} + +// At returns the value by the specified index. +// If the given `index` is out of range of the array, it returns `0`. +func (a *SortedIntArray) At(index int) (value int) { + value, _ = a.Get(index) + return +} + +// SetArray sets the underlying slice array with the given `array`. +func (a *SortedIntArray) SetArray(array []int) *SortedIntArray { + a.mu.Lock() + defer a.mu.Unlock() + a.array = array + quickSortInt(a.array, a.getComparator()) + return a +} + +// Sort sorts the array in increasing order. +// The parameter `reverse` controls whether sort +// in increasing order(default) or decreasing order. +func (a *SortedIntArray) Sort() *SortedIntArray { + a.mu.Lock() + defer a.mu.Unlock() + quickSortInt(a.array, a.getComparator()) + return a +} + +// Add adds one or multiple values to sorted array, the array always keeps sorted. +// It's alias of function Append, see Append. +func (a *SortedIntArray) Add(values ...int) *SortedIntArray { + return a.Append(values...) +} + +// Append adds one or multiple values to sorted array, the array always keeps sorted. +func (a *SortedIntArray) Append(values ...int) *SortedIntArray { + if len(values) == 0 { + return a + } + a.mu.Lock() + defer a.mu.Unlock() + for _, value := range values { + index, cmp := a.binSearch(value, false) + if a.unique && cmp == 0 { + continue + } + if index < 0 { + a.array = append(a.array, value) + continue + } + if cmp > 0 { + index++ + } + rear := append([]int{}, a.array[index:]...) + a.array = append(a.array[0:index], value) + a.array = append(a.array, rear...) + } + return a +} + +// Get returns the value by the specified index. +// If the given `index` is out of range of the array, the `found` is false. +func (a *SortedIntArray) Get(index int) (value int, found bool) { + a.mu.RLock() + defer a.mu.RUnlock() + if index < 0 || index >= len(a.array) { + return 0, false + } + return a.array[index], true +} + +// Remove removes an item by index. +// If the given `index` is out of range of the array, the `found` is false. +func (a *SortedIntArray) Remove(index int) (value int, found bool) { + a.mu.Lock() + defer a.mu.Unlock() + return a.doRemoveWithoutLock(index) +} + +// doRemoveWithoutLock removes an item by index without lock. +func (a *SortedIntArray) doRemoveWithoutLock(index int) (value int, found bool) { + if index < 0 || index >= len(a.array) { + return 0, false + } + // Determine array boundaries when deleting to improve deletion efficiency. + if index == 0 { + value := a.array[0] + a.array = a.array[1:] + return value, true + } else if index == len(a.array)-1 { + value := a.array[index] + a.array = a.array[:index] + return value, true + } + // If it is a non-boundary delete, + // it will involve the creation of an array, + // then the deletion is less efficient. + value = a.array[index] + a.array = append(a.array[:index], a.array[index+1:]...) + return value, true +} + +// RemoveValue removes an item by value. +// It returns true if value is found in the array, or else false if not found. +func (a *SortedIntArray) RemoveValue(value int) bool { + a.mu.Lock() + defer a.mu.Unlock() + if i, r := a.binSearch(value, false); r == 0 { + _, res := a.doRemoveWithoutLock(i) + return res + } + return false +} + +// PopLeft pops and returns an item from the beginning of array. +// Note that if the array is empty, the `found` is false. +func (a *SortedIntArray) PopLeft() (value int, found bool) { + a.mu.Lock() + defer a.mu.Unlock() + if len(a.array) == 0 { + return 0, false + } + value = a.array[0] + a.array = a.array[1:] + return value, true +} + +// PopRight pops and returns an item from the end of array. +// Note that if the array is empty, the `found` is false. +func (a *SortedIntArray) PopRight() (value int, found bool) { + a.mu.Lock() + defer a.mu.Unlock() + index := len(a.array) - 1 + if index < 0 { + return 0, false + } + value = a.array[index] + a.array = a.array[:index] + return value, true +} + +// PopRand randomly pops and return an item out of array. +// Note that if the array is empty, the `found` is false. +func (a *SortedIntArray) PopRand() (value int, found bool) { + a.mu.Lock() + defer a.mu.Unlock() + return a.doRemoveWithoutLock(grand.Intn(len(a.array))) +} + +// PopRands randomly pops and returns `size` items out of array. +// If the given `size` is greater than size of the array, it returns all elements of the array. +// Note that if given `size` <= 0 or the array is empty, it returns nil. +func (a *SortedIntArray) PopRands(size int) []int { + a.mu.Lock() + defer a.mu.Unlock() + if size <= 0 || len(a.array) == 0 { + return nil + } + if size >= len(a.array) { + size = len(a.array) + } + array := make([]int, size) + for i := 0; i < size; i++ { + array[i], _ = a.doRemoveWithoutLock(grand.Intn(len(a.array))) + } + return array +} + +// PopLefts pops and returns `size` items from the beginning of array. +// If the given `size` is greater than size of the array, it returns all elements of the array. +// Note that if given `size` <= 0 or the array is empty, it returns nil. +func (a *SortedIntArray) PopLefts(size int) []int { + a.mu.Lock() + defer a.mu.Unlock() + if size <= 0 || len(a.array) == 0 { + return nil + } + if size >= len(a.array) { + array := a.array + a.array = a.array[:0] + return array + } + value := a.array[0:size] + a.array = a.array[size:] + return value +} + +// PopRights pops and returns `size` items from the end of array. +// If the given `size` is greater than size of the array, it returns all elements of the array. +// Note that if given `size` <= 0 or the array is empty, it returns nil. +func (a *SortedIntArray) PopRights(size int) []int { + a.mu.Lock() + defer a.mu.Unlock() + if size <= 0 || len(a.array) == 0 { + return nil + } + index := len(a.array) - size + if index <= 0 { + array := a.array + a.array = a.array[:0] + return array + } + value := a.array[index:] + a.array = a.array[:index] + return value +} + +// Range picks and returns items by range, like array[start:end]. +// Notice, if in concurrent-safe usage, it returns a copy of slice; +// else a pointer to the underlying data. +// +// If `end` is negative, then the offset will start from the end of array. +// If `end` is omitted, then the sequence will have everything from start up +// until the end of the array. +func (a *SortedIntArray) Range(start int, end ...int) []int { + a.mu.RLock() + defer a.mu.RUnlock() + offsetEnd := len(a.array) + if len(end) > 0 && end[0] < offsetEnd { + offsetEnd = end[0] + } + if start > offsetEnd { + return nil + } + if start < 0 { + start = 0 + } + array := ([]int)(nil) + if a.mu.IsSafe() { + array = make([]int, offsetEnd-start) + copy(array, a.array[start:offsetEnd]) + } else { + array = a.array[start:offsetEnd] + } + return array +} + +// SubSlice returns a slice of elements from the array as specified +// by the `offset` and `size` parameters. +// If in concurrent safe usage, it returns a copy of the slice; else a pointer. +// +// If offset is non-negative, the sequence will start at that offset in the array. +// If offset is negative, the sequence will start that far from the end of the array. +// +// If length is given and is positive, then the sequence will have up to that many elements in it. +// If the array is shorter than the length, then only the available array elements will be present. +// If length is given and is negative then the sequence will stop that many elements from the end of the array. +// If it is omitted, then the sequence will have everything from offset up until the end of the array. +// +// Any possibility crossing the left border of array, it will fail. +func (a *SortedIntArray) SubSlice(offset int, length ...int) []int { + a.mu.RLock() + defer a.mu.RUnlock() + size := len(a.array) + if len(length) > 0 { + size = length[0] + } + if offset > len(a.array) { + return nil + } + if offset < 0 { + offset = len(a.array) + offset + if offset < 0 { + return nil + } + } + if size < 0 { + offset += size + size = -size + if offset < 0 { + return nil + } + } + end := offset + size + if end > len(a.array) { + end = len(a.array) + size = len(a.array) - offset + } + if a.mu.IsSafe() { + s := make([]int, size) + copy(s, a.array[offset:]) + return s + } else { + return a.array[offset:end] + } +} + +// Len returns the length of array. +func (a *SortedIntArray) Len() int { + a.mu.RLock() + length := len(a.array) + a.mu.RUnlock() + return length +} + +// Sum returns the sum of values in an array. +func (a *SortedIntArray) Sum() (sum int) { + a.mu.RLock() + defer a.mu.RUnlock() + for _, v := range a.array { + sum += v + } + return +} + +// Slice returns the underlying data of array. +// Note that, if it's in concurrent-safe usage, it returns a copy of underlying data, +// or else a pointer to the underlying data. +func (a *SortedIntArray) Slice() []int { + array := ([]int)(nil) + if a.mu.IsSafe() { + a.mu.RLock() + defer a.mu.RUnlock() + array = make([]int, len(a.array)) + copy(array, a.array) + } else { + array = a.array + } + return array +} + +// Interfaces returns current array as []interface{}. +func (a *SortedIntArray) Interfaces() []interface{} { + a.mu.RLock() + defer a.mu.RUnlock() + array := make([]interface{}, len(a.array)) + for k, v := range a.array { + array[k] = v + } + return array +} + +// Contains checks whether a value exists in the array. +func (a *SortedIntArray) Contains(value int) bool { + return a.Search(value) != -1 +} + +// Search searches array by `value`, returns the index of `value`, +// or returns -1 if not exists. +func (a *SortedIntArray) Search(value int) (index int) { + if i, r := a.binSearch(value, true); r == 0 { + return i + } + return -1 +} + +// Binary search. +// It returns the last compared index and the result. +// If `result` equals to 0, it means the value at `index` is equals to `value`. +// If `result` lesser than 0, it means the value at `index` is lesser than `value`. +// If `result` greater than 0, it means the value at `index` is greater than `value`. +func (a *SortedIntArray) binSearch(value int, lock bool) (index int, result int) { + if lock { + a.mu.RLock() + defer a.mu.RUnlock() + } + if len(a.array) == 0 { + return -1, -2 + } + min := 0 + max := len(a.array) - 1 + mid := 0 + cmp := -2 + for min <= max { + mid = min + int((max-min)/2) + cmp = a.getComparator()(value, a.array[mid]) + switch { + case cmp < 0: + max = mid - 1 + case cmp > 0: + min = mid + 1 + default: + return mid, cmp + } + } + return mid, cmp +} + +// SetUnique sets unique mark to the array, +// which means it does not contain any repeated items. +// It also do unique check, remove all repeated items. +func (a *SortedIntArray) SetUnique(unique bool) *SortedIntArray { + oldUnique := a.unique + a.unique = unique + if unique && oldUnique != unique { + a.Unique() + } + return a +} + +// Unique uniques the array, clear repeated items. +func (a *SortedIntArray) Unique() *SortedIntArray { + a.mu.Lock() + defer a.mu.Unlock() + if len(a.array) == 0 { + return a + } + i := 0 + for { + if i == len(a.array)-1 { + break + } + if a.getComparator()(a.array[i], a.array[i+1]) == 0 { + a.array = append(a.array[:i+1], a.array[i+1+1:]...) + } else { + i++ + } + } + return a +} + +// Clone returns a new array, which is a copy of current array. +func (a *SortedIntArray) Clone() (newArray *SortedIntArray) { + a.mu.RLock() + array := make([]int, len(a.array)) + copy(array, a.array) + a.mu.RUnlock() + return NewSortedIntArrayFrom(array, a.mu.IsSafe()) +} + +// Clear deletes all items of current array. +func (a *SortedIntArray) Clear() *SortedIntArray { + a.mu.Lock() + if len(a.array) > 0 { + a.array = make([]int, 0) + } + a.mu.Unlock() + return a +} + +// LockFunc locks writing by callback function `f`. +func (a *SortedIntArray) LockFunc(f func(array []int)) *SortedIntArray { + a.mu.Lock() + defer a.mu.Unlock() + f(a.array) + return a +} + +// RLockFunc locks reading by callback function `f`. +func (a *SortedIntArray) RLockFunc(f func(array []int)) *SortedIntArray { + a.mu.RLock() + defer a.mu.RUnlock() + f(a.array) + return a +} + +// Merge merges `array` into current array. +// The parameter `array` can be any garray or slice type. +// The difference between Merge and Append is Append supports only specified slice type, +// but Merge supports more parameter types. +func (a *SortedIntArray) Merge(array interface{}) *SortedIntArray { + return a.Add(gconv.Ints(array)...) +} + +// Chunk splits an array into multiple arrays, +// the size of each array is determined by `size`. +// The last chunk may contain less than size elements. +func (a *SortedIntArray) Chunk(size int) [][]int { + if size < 1 { + return nil + } + a.mu.RLock() + defer a.mu.RUnlock() + length := len(a.array) + chunks := int(math.Ceil(float64(length) / float64(size))) + var n [][]int + for i, end := 0, 0; chunks > 0; chunks-- { + end = (i + 1) * size + if end > length { + end = length + } + n = append(n, a.array[i*size:end]) + i++ + } + return n +} + +// Rand randomly returns one item from array(no deleting). +func (a *SortedIntArray) Rand() (value int, found bool) { + a.mu.RLock() + defer a.mu.RUnlock() + if len(a.array) == 0 { + return 0, false + } + return a.array[grand.Intn(len(a.array))], true +} + +// Rands randomly returns `size` items from array(no deleting). +func (a *SortedIntArray) Rands(size int) []int { + a.mu.RLock() + defer a.mu.RUnlock() + if size <= 0 || len(a.array) == 0 { + return nil + } + array := make([]int, size) + for i := 0; i < size; i++ { + array[i] = a.array[grand.Intn(len(a.array))] + } + return array +} + +// Join joins array elements with a string `glue`. +func (a *SortedIntArray) Join(glue string) string { + a.mu.RLock() + defer a.mu.RUnlock() + if len(a.array) == 0 { + return "" + } + buffer := bytes.NewBuffer(nil) + for k, v := range a.array { + buffer.WriteString(gconv.String(v)) + if k != len(a.array)-1 { + buffer.WriteString(glue) + } + } + return buffer.String() +} + +// CountValues counts the number of occurrences of all values in the array. +func (a *SortedIntArray) CountValues() map[int]int { + m := make(map[int]int) + a.mu.RLock() + defer a.mu.RUnlock() + for _, v := range a.array { + m[v]++ + } + return m +} + +// Iterator is alias of IteratorAsc. +func (a *SortedIntArray) Iterator(f func(k int, v int) bool) { + a.IteratorAsc(f) +} + +// IteratorAsc iterates the array readonly in ascending order with given callback function `f`. +// If `f` returns true, then it continues iterating; or false to stop. +func (a *SortedIntArray) IteratorAsc(f func(k int, v int) bool) { + a.mu.RLock() + defer a.mu.RUnlock() + for k, v := range a.array { + if !f(k, v) { + break + } + } +} + +// IteratorDesc iterates the array readonly in descending order with given callback function `f`. +// If `f` returns true, then it continues iterating; or false to stop. +func (a *SortedIntArray) IteratorDesc(f func(k int, v int) bool) { + a.mu.RLock() + defer a.mu.RUnlock() + for i := len(a.array) - 1; i >= 0; i-- { + if !f(i, a.array[i]) { + break + } + } +} + +// String returns current array as a string, which implements like json.Marshal does. +func (a *SortedIntArray) String() string { + if a == nil { + return "" + } + return "[" + a.Join(",") + "]" +} + +// MarshalJSON implements the interface MarshalJSON for json.Marshal. +// Note that do not use pointer as its receiver here. +func (a SortedIntArray) MarshalJSON() ([]byte, error) { + a.mu.RLock() + defer a.mu.RUnlock() + return json.Marshal(a.array) +} + +// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. +func (a *SortedIntArray) UnmarshalJSON(b []byte) error { + if a.comparator == nil { + a.array = make([]int, 0) + a.comparator = defaultComparatorInt + } + a.mu.Lock() + defer a.mu.Unlock() + if err := json.UnmarshalUseNumber(b, &a.array); err != nil { + return err + } + if a.array != nil { + sort.Ints(a.array) + } + return nil +} + +// UnmarshalValue is an interface implement which sets any type of value for array. +func (a *SortedIntArray) UnmarshalValue(value interface{}) (err error) { + if a.comparator == nil { + a.comparator = defaultComparatorInt + } + a.mu.Lock() + defer a.mu.Unlock() + switch value.(type) { + case string, []byte: + err = json.UnmarshalUseNumber(gconv.Bytes(value), &a.array) + default: + a.array = gconv.SliceInt(value) + } + if a.array != nil { + sort.Ints(a.array) + } + return err +} + +// FilterEmpty removes all zero value of the array. +func (a *SortedIntArray) FilterEmpty() *SortedIntArray { + a.mu.Lock() + defer a.mu.Unlock() + for i := 0; i < len(a.array); { + if a.array[i] == 0 { + a.array = append(a.array[:i], a.array[i+1:]...) + } else { + break + } + } + for i := len(a.array) - 1; i >= 0; { + if a.array[i] == 0 { + a.array = append(a.array[:i], a.array[i+1:]...) + } else { + break + } + } + return a +} + +// Walk applies a user supplied function `f` to every item of array. +func (a *SortedIntArray) Walk(f func(value int) int) *SortedIntArray { + a.mu.Lock() + defer a.mu.Unlock() + + // Keep the array always sorted. + defer quickSortInt(a.array, a.getComparator()) + + for i, v := range a.array { + a.array[i] = f(v) + } + return a +} + +// IsEmpty checks whether the array is empty. +func (a *SortedIntArray) IsEmpty() bool { + return a.Len() == 0 +} + +// getComparator returns the comparator if it's previously set, +// or else it returns a default comparator. +func (a *SortedIntArray) getComparator() func(a, b int) int { + if a.comparator == nil { + return defaultComparatorInt + } + return a.comparator +} + +// DeepCopy implements interface for deep copy of current type. +func (a *SortedIntArray) DeepCopy() interface{} { + if a == nil { + return nil + } + a.mu.RLock() + defer a.mu.RUnlock() + newSlice := make([]int, len(a.array)) + copy(newSlice, a.array) + return NewSortedIntArrayFrom(newSlice, a.mu.IsSafe()) +} diff --git a/vendor/github.com/gogf/gf/v2/container/garray/garray_sorted_str.go b/vendor/github.com/gogf/gf/v2/container/garray/garray_sorted_str.go new file mode 100644 index 00000000..3dbd8d8f --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/garray/garray_sorted_str.go @@ -0,0 +1,773 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package garray + +import ( + "bytes" + "math" + "sort" + "strings" + + "github.com/gogf/gf/v2/internal/json" + "github.com/gogf/gf/v2/internal/rwmutex" + "github.com/gogf/gf/v2/text/gstr" + "github.com/gogf/gf/v2/util/gconv" + "github.com/gogf/gf/v2/util/grand" +) + +// SortedStrArray is a golang sorted string array with rich features. +// It is using increasing order in default, which can be changed by +// setting it a custom comparator. +// It contains a concurrent-safe/unsafe switch, which should be set +// when its initialization and cannot be changed then. +type SortedStrArray struct { + mu rwmutex.RWMutex + array []string + unique bool // Whether enable unique feature(false) + comparator func(a, b string) int // Comparison function(it returns -1: a < b; 0: a == b; 1: a > b) +} + +// NewSortedStrArray creates and returns an empty sorted array. +// The parameter `safe` is used to specify whether using array in concurrent-safety, +// which is false in default. +func NewSortedStrArray(safe ...bool) *SortedStrArray { + return NewSortedStrArraySize(0, safe...) +} + +// NewSortedStrArrayComparator creates and returns an empty sorted array with specified comparator. +// The parameter `safe` is used to specify whether using array in concurrent-safety which is false in default. +func NewSortedStrArrayComparator(comparator func(a, b string) int, safe ...bool) *SortedStrArray { + array := NewSortedStrArray(safe...) + array.comparator = comparator + return array +} + +// NewSortedStrArraySize create and returns an sorted array with given size and cap. +// The parameter `safe` is used to specify whether using array in concurrent-safety, +// which is false in default. +func NewSortedStrArraySize(cap int, safe ...bool) *SortedStrArray { + return &SortedStrArray{ + mu: rwmutex.Create(safe...), + array: make([]string, 0, cap), + comparator: defaultComparatorStr, + } +} + +// NewSortedStrArrayFrom creates and returns an sorted array with given slice `array`. +// The parameter `safe` is used to specify whether using array in concurrent-safety, +// which is false in default. +func NewSortedStrArrayFrom(array []string, safe ...bool) *SortedStrArray { + a := NewSortedStrArraySize(0, safe...) + a.array = array + quickSortStr(a.array, a.getComparator()) + return a +} + +// NewSortedStrArrayFromCopy creates and returns an sorted array from a copy of given slice `array`. +// The parameter `safe` is used to specify whether using array in concurrent-safety, +// which is false in default. +func NewSortedStrArrayFromCopy(array []string, safe ...bool) *SortedStrArray { + newArray := make([]string, len(array)) + copy(newArray, array) + return NewSortedStrArrayFrom(newArray, safe...) +} + +// SetArray sets the underlying slice array with the given `array`. +func (a *SortedStrArray) SetArray(array []string) *SortedStrArray { + a.mu.Lock() + defer a.mu.Unlock() + a.array = array + quickSortStr(a.array, a.getComparator()) + return a +} + +// At returns the value by the specified index. +// If the given `index` is out of range of the array, it returns an empty string. +func (a *SortedStrArray) At(index int) (value string) { + value, _ = a.Get(index) + return +} + +// Sort sorts the array in increasing order. +// The parameter `reverse` controls whether sort +// in increasing order(default) or decreasing order. +func (a *SortedStrArray) Sort() *SortedStrArray { + a.mu.Lock() + defer a.mu.Unlock() + quickSortStr(a.array, a.getComparator()) + return a +} + +// Add adds one or multiple values to sorted array, the array always keeps sorted. +// It's alias of function Append, see Append. +func (a *SortedStrArray) Add(values ...string) *SortedStrArray { + return a.Append(values...) +} + +// Append adds one or multiple values to sorted array, the array always keeps sorted. +func (a *SortedStrArray) Append(values ...string) *SortedStrArray { + if len(values) == 0 { + return a + } + a.mu.Lock() + defer a.mu.Unlock() + for _, value := range values { + index, cmp := a.binSearch(value, false) + if a.unique && cmp == 0 { + continue + } + if index < 0 { + a.array = append(a.array, value) + continue + } + if cmp > 0 { + index++ + } + rear := append([]string{}, a.array[index:]...) + a.array = append(a.array[0:index], value) + a.array = append(a.array, rear...) + } + return a +} + +// Get returns the value by the specified index. +// If the given `index` is out of range of the array, the `found` is false. +func (a *SortedStrArray) Get(index int) (value string, found bool) { + a.mu.RLock() + defer a.mu.RUnlock() + if index < 0 || index >= len(a.array) { + return "", false + } + return a.array[index], true +} + +// Remove removes an item by index. +// If the given `index` is out of range of the array, the `found` is false. +func (a *SortedStrArray) Remove(index int) (value string, found bool) { + a.mu.Lock() + defer a.mu.Unlock() + return a.doRemoveWithoutLock(index) +} + +// doRemoveWithoutLock removes an item by index without lock. +func (a *SortedStrArray) doRemoveWithoutLock(index int) (value string, found bool) { + if index < 0 || index >= len(a.array) { + return "", false + } + // Determine array boundaries when deleting to improve deletion efficiency. + if index == 0 { + value := a.array[0] + a.array = a.array[1:] + return value, true + } else if index == len(a.array)-1 { + value := a.array[index] + a.array = a.array[:index] + return value, true + } + // If it is a non-boundary delete, + // it will involve the creation of an array, + // then the deletion is less efficient. + value = a.array[index] + a.array = append(a.array[:index], a.array[index+1:]...) + return value, true +} + +// RemoveValue removes an item by value. +// It returns true if value is found in the array, or else false if not found. +func (a *SortedStrArray) RemoveValue(value string) bool { + a.mu.Lock() + defer a.mu.Unlock() + if i, r := a.binSearch(value, false); r == 0 { + _, res := a.doRemoveWithoutLock(i) + return res + } + return false +} + +// PopLeft pops and returns an item from the beginning of array. +// Note that if the array is empty, the `found` is false. +func (a *SortedStrArray) PopLeft() (value string, found bool) { + a.mu.Lock() + defer a.mu.Unlock() + if len(a.array) == 0 { + return "", false + } + value = a.array[0] + a.array = a.array[1:] + return value, true +} + +// PopRight pops and returns an item from the end of array. +// Note that if the array is empty, the `found` is false. +func (a *SortedStrArray) PopRight() (value string, found bool) { + a.mu.Lock() + defer a.mu.Unlock() + index := len(a.array) - 1 + if index < 0 { + return "", false + } + value = a.array[index] + a.array = a.array[:index] + return value, true +} + +// PopRand randomly pops and return an item out of array. +// Note that if the array is empty, the `found` is false. +func (a *SortedStrArray) PopRand() (value string, found bool) { + a.mu.Lock() + defer a.mu.Unlock() + return a.doRemoveWithoutLock(grand.Intn(len(a.array))) +} + +// PopRands randomly pops and returns `size` items out of array. +// If the given `size` is greater than size of the array, it returns all elements of the array. +// Note that if given `size` <= 0 or the array is empty, it returns nil. +func (a *SortedStrArray) PopRands(size int) []string { + a.mu.Lock() + defer a.mu.Unlock() + if size <= 0 || len(a.array) == 0 { + return nil + } + if size >= len(a.array) { + size = len(a.array) + } + array := make([]string, size) + for i := 0; i < size; i++ { + array[i], _ = a.doRemoveWithoutLock(grand.Intn(len(a.array))) + } + return array +} + +// PopLefts pops and returns `size` items from the beginning of array. +// If the given `size` is greater than size of the array, it returns all elements of the array. +// Note that if given `size` <= 0 or the array is empty, it returns nil. +func (a *SortedStrArray) PopLefts(size int) []string { + a.mu.Lock() + defer a.mu.Unlock() + if size <= 0 || len(a.array) == 0 { + return nil + } + if size >= len(a.array) { + array := a.array + a.array = a.array[:0] + return array + } + value := a.array[0:size] + a.array = a.array[size:] + return value +} + +// PopRights pops and returns `size` items from the end of array. +// If the given `size` is greater than size of the array, it returns all elements of the array. +// Note that if given `size` <= 0 or the array is empty, it returns nil. +func (a *SortedStrArray) PopRights(size int) []string { + a.mu.Lock() + defer a.mu.Unlock() + if size <= 0 || len(a.array) == 0 { + return nil + } + index := len(a.array) - size + if index <= 0 { + array := a.array + a.array = a.array[:0] + return array + } + value := a.array[index:] + a.array = a.array[:index] + return value +} + +// Range picks and returns items by range, like array[start:end]. +// Notice, if in concurrent-safe usage, it returns a copy of slice; +// else a pointer to the underlying data. +// +// If `end` is negative, then the offset will start from the end of array. +// If `end` is omitted, then the sequence will have everything from start up +// until the end of the array. +func (a *SortedStrArray) Range(start int, end ...int) []string { + a.mu.RLock() + defer a.mu.RUnlock() + offsetEnd := len(a.array) + if len(end) > 0 && end[0] < offsetEnd { + offsetEnd = end[0] + } + if start > offsetEnd { + return nil + } + if start < 0 { + start = 0 + } + array := ([]string)(nil) + if a.mu.IsSafe() { + array = make([]string, offsetEnd-start) + copy(array, a.array[start:offsetEnd]) + } else { + array = a.array[start:offsetEnd] + } + return array +} + +// SubSlice returns a slice of elements from the array as specified +// by the `offset` and `size` parameters. +// If in concurrent safe usage, it returns a copy of the slice; else a pointer. +// +// If offset is non-negative, the sequence will start at that offset in the array. +// If offset is negative, the sequence will start that far from the end of the array. +// +// If length is given and is positive, then the sequence will have up to that many elements in it. +// If the array is shorter than the length, then only the available array elements will be present. +// If length is given and is negative then the sequence will stop that many elements from the end of the array. +// If it is omitted, then the sequence will have everything from offset up until the end of the array. +// +// Any possibility crossing the left border of array, it will fail. +func (a *SortedStrArray) SubSlice(offset int, length ...int) []string { + a.mu.RLock() + defer a.mu.RUnlock() + size := len(a.array) + if len(length) > 0 { + size = length[0] + } + if offset > len(a.array) { + return nil + } + if offset < 0 { + offset = len(a.array) + offset + if offset < 0 { + return nil + } + } + if size < 0 { + offset += size + size = -size + if offset < 0 { + return nil + } + } + end := offset + size + if end > len(a.array) { + end = len(a.array) + size = len(a.array) - offset + } + if a.mu.IsSafe() { + s := make([]string, size) + copy(s, a.array[offset:]) + return s + } else { + return a.array[offset:end] + } +} + +// Sum returns the sum of values in an array. +func (a *SortedStrArray) Sum() (sum int) { + a.mu.RLock() + defer a.mu.RUnlock() + for _, v := range a.array { + sum += gconv.Int(v) + } + return +} + +// Len returns the length of array. +func (a *SortedStrArray) Len() int { + a.mu.RLock() + length := len(a.array) + a.mu.RUnlock() + return length +} + +// Slice returns the underlying data of array. +// Note that, if it's in concurrent-safe usage, it returns a copy of underlying data, +// or else a pointer to the underlying data. +func (a *SortedStrArray) Slice() []string { + array := ([]string)(nil) + if a.mu.IsSafe() { + a.mu.RLock() + defer a.mu.RUnlock() + array = make([]string, len(a.array)) + copy(array, a.array) + } else { + array = a.array + } + return array +} + +// Interfaces returns current array as []interface{}. +func (a *SortedStrArray) Interfaces() []interface{} { + a.mu.RLock() + defer a.mu.RUnlock() + array := make([]interface{}, len(a.array)) + for k, v := range a.array { + array[k] = v + } + return array +} + +// Contains checks whether a value exists in the array. +func (a *SortedStrArray) Contains(value string) bool { + return a.Search(value) != -1 +} + +// ContainsI checks whether a value exists in the array with case-insensitively. +// Note that it internally iterates the whole array to do the comparison with case-insensitively. +func (a *SortedStrArray) ContainsI(value string) bool { + a.mu.RLock() + defer a.mu.RUnlock() + if len(a.array) == 0 { + return false + } + for _, v := range a.array { + if strings.EqualFold(v, value) { + return true + } + } + return false +} + +// Search searches array by `value`, returns the index of `value`, +// or returns -1 if not exists. +func (a *SortedStrArray) Search(value string) (index int) { + if i, r := a.binSearch(value, true); r == 0 { + return i + } + return -1 +} + +// Binary search. +// It returns the last compared index and the result. +// If `result` equals to 0, it means the value at `index` is equals to `value`. +// If `result` lesser than 0, it means the value at `index` is lesser than `value`. +// If `result` greater than 0, it means the value at `index` is greater than `value`. +func (a *SortedStrArray) binSearch(value string, lock bool) (index int, result int) { + if lock { + a.mu.RLock() + defer a.mu.RUnlock() + } + if len(a.array) == 0 { + return -1, -2 + } + min := 0 + max := len(a.array) - 1 + mid := 0 + cmp := -2 + for min <= max { + mid = min + int((max-min)/2) + cmp = a.getComparator()(value, a.array[mid]) + switch { + case cmp < 0: + max = mid - 1 + case cmp > 0: + min = mid + 1 + default: + return mid, cmp + } + } + return mid, cmp +} + +// SetUnique sets unique mark to the array, +// which means it does not contain any repeated items. +// It also do unique check, remove all repeated items. +func (a *SortedStrArray) SetUnique(unique bool) *SortedStrArray { + oldUnique := a.unique + a.unique = unique + if unique && oldUnique != unique { + a.Unique() + } + return a +} + +// Unique uniques the array, clear repeated items. +func (a *SortedStrArray) Unique() *SortedStrArray { + a.mu.Lock() + defer a.mu.Unlock() + if len(a.array) == 0 { + return a + } + i := 0 + for { + if i == len(a.array)-1 { + break + } + if a.getComparator()(a.array[i], a.array[i+1]) == 0 { + a.array = append(a.array[:i+1], a.array[i+1+1:]...) + } else { + i++ + } + } + return a +} + +// Clone returns a new array, which is a copy of current array. +func (a *SortedStrArray) Clone() (newArray *SortedStrArray) { + a.mu.RLock() + array := make([]string, len(a.array)) + copy(array, a.array) + a.mu.RUnlock() + return NewSortedStrArrayFrom(array, a.mu.IsSafe()) +} + +// Clear deletes all items of current array. +func (a *SortedStrArray) Clear() *SortedStrArray { + a.mu.Lock() + if len(a.array) > 0 { + a.array = make([]string, 0) + } + a.mu.Unlock() + return a +} + +// LockFunc locks writing by callback function `f`. +func (a *SortedStrArray) LockFunc(f func(array []string)) *SortedStrArray { + a.mu.Lock() + defer a.mu.Unlock() + f(a.array) + return a +} + +// RLockFunc locks reading by callback function `f`. +func (a *SortedStrArray) RLockFunc(f func(array []string)) *SortedStrArray { + a.mu.RLock() + defer a.mu.RUnlock() + f(a.array) + return a +} + +// Merge merges `array` into current array. +// The parameter `array` can be any garray or slice type. +// The difference between Merge and Append is Append supports only specified slice type, +// but Merge supports more parameter types. +func (a *SortedStrArray) Merge(array interface{}) *SortedStrArray { + return a.Add(gconv.Strings(array)...) +} + +// Chunk splits an array into multiple arrays, +// the size of each array is determined by `size`. +// The last chunk may contain less than size elements. +func (a *SortedStrArray) Chunk(size int) [][]string { + if size < 1 { + return nil + } + a.mu.RLock() + defer a.mu.RUnlock() + length := len(a.array) + chunks := int(math.Ceil(float64(length) / float64(size))) + var n [][]string + for i, end := 0, 0; chunks > 0; chunks-- { + end = (i + 1) * size + if end > length { + end = length + } + n = append(n, a.array[i*size:end]) + i++ + } + return n +} + +// Rand randomly returns one item from array(no deleting). +func (a *SortedStrArray) Rand() (value string, found bool) { + a.mu.RLock() + defer a.mu.RUnlock() + if len(a.array) == 0 { + return "", false + } + return a.array[grand.Intn(len(a.array))], true +} + +// Rands randomly returns `size` items from array(no deleting). +func (a *SortedStrArray) Rands(size int) []string { + a.mu.RLock() + defer a.mu.RUnlock() + if size <= 0 || len(a.array) == 0 { + return nil + } + array := make([]string, size) + for i := 0; i < size; i++ { + array[i] = a.array[grand.Intn(len(a.array))] + } + return array +} + +// Join joins array elements with a string `glue`. +func (a *SortedStrArray) Join(glue string) string { + a.mu.RLock() + defer a.mu.RUnlock() + if len(a.array) == 0 { + return "" + } + buffer := bytes.NewBuffer(nil) + for k, v := range a.array { + buffer.WriteString(v) + if k != len(a.array)-1 { + buffer.WriteString(glue) + } + } + return buffer.String() +} + +// CountValues counts the number of occurrences of all values in the array. +func (a *SortedStrArray) CountValues() map[string]int { + m := make(map[string]int) + a.mu.RLock() + defer a.mu.RUnlock() + for _, v := range a.array { + m[v]++ + } + return m +} + +// Iterator is alias of IteratorAsc. +func (a *SortedStrArray) Iterator(f func(k int, v string) bool) { + a.IteratorAsc(f) +} + +// IteratorAsc iterates the array readonly in ascending order with given callback function `f`. +// If `f` returns true, then it continues iterating; or false to stop. +func (a *SortedStrArray) IteratorAsc(f func(k int, v string) bool) { + a.mu.RLock() + defer a.mu.RUnlock() + for k, v := range a.array { + if !f(k, v) { + break + } + } +} + +// IteratorDesc iterates the array readonly in descending order with given callback function `f`. +// If `f` returns true, then it continues iterating; or false to stop. +func (a *SortedStrArray) IteratorDesc(f func(k int, v string) bool) { + a.mu.RLock() + defer a.mu.RUnlock() + for i := len(a.array) - 1; i >= 0; i-- { + if !f(i, a.array[i]) { + break + } + } +} + +// String returns current array as a string, which implements like json.Marshal does. +func (a *SortedStrArray) String() string { + if a == nil { + return "" + } + a.mu.RLock() + defer a.mu.RUnlock() + buffer := bytes.NewBuffer(nil) + buffer.WriteByte('[') + for k, v := range a.array { + buffer.WriteString(`"` + gstr.QuoteMeta(v, `"\`) + `"`) + if k != len(a.array)-1 { + buffer.WriteByte(',') + } + } + buffer.WriteByte(']') + return buffer.String() +} + +// MarshalJSON implements the interface MarshalJSON for json.Marshal. +// Note that do not use pointer as its receiver here. +func (a SortedStrArray) MarshalJSON() ([]byte, error) { + a.mu.RLock() + defer a.mu.RUnlock() + return json.Marshal(a.array) +} + +// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. +func (a *SortedStrArray) UnmarshalJSON(b []byte) error { + if a.comparator == nil { + a.array = make([]string, 0) + a.comparator = defaultComparatorStr + } + a.mu.Lock() + defer a.mu.Unlock() + if err := json.UnmarshalUseNumber(b, &a.array); err != nil { + return err + } + if a.array != nil { + sort.Strings(a.array) + } + return nil +} + +// UnmarshalValue is an interface implement which sets any type of value for array. +func (a *SortedStrArray) UnmarshalValue(value interface{}) (err error) { + if a.comparator == nil { + a.comparator = defaultComparatorStr + } + a.mu.Lock() + defer a.mu.Unlock() + switch value.(type) { + case string, []byte: + err = json.UnmarshalUseNumber(gconv.Bytes(value), &a.array) + default: + a.array = gconv.SliceStr(value) + } + if a.array != nil { + sort.Strings(a.array) + } + return err +} + +// FilterEmpty removes all empty string value of the array. +func (a *SortedStrArray) FilterEmpty() *SortedStrArray { + a.mu.Lock() + defer a.mu.Unlock() + for i := 0; i < len(a.array); { + if a.array[i] == "" { + a.array = append(a.array[:i], a.array[i+1:]...) + } else { + break + } + } + for i := len(a.array) - 1; i >= 0; { + if a.array[i] == "" { + a.array = append(a.array[:i], a.array[i+1:]...) + } else { + break + } + } + return a +} + +// Walk applies a user supplied function `f` to every item of array. +func (a *SortedStrArray) Walk(f func(value string) string) *SortedStrArray { + a.mu.Lock() + defer a.mu.Unlock() + + // Keep the array always sorted. + defer quickSortStr(a.array, a.getComparator()) + + for i, v := range a.array { + a.array[i] = f(v) + } + return a +} + +// IsEmpty checks whether the array is empty. +func (a *SortedStrArray) IsEmpty() bool { + return a.Len() == 0 +} + +// getComparator returns the comparator if it's previously set, +// or else it returns a default comparator. +func (a *SortedStrArray) getComparator() func(a, b string) int { + if a.comparator == nil { + return defaultComparatorStr + } + return a.comparator +} + +// DeepCopy implements interface for deep copy of current type. +func (a *SortedStrArray) DeepCopy() interface{} { + if a == nil { + return nil + } + a.mu.RLock() + defer a.mu.RUnlock() + newSlice := make([]string, len(a.array)) + copy(newSlice, a.array) + return NewSortedStrArrayFrom(newSlice, a.mu.IsSafe()) +} diff --git a/vendor/github.com/gogf/gf/v2/container/glist/glist.go b/vendor/github.com/gogf/gf/v2/container/glist/glist.go new file mode 100644 index 00000000..1c6212ea --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/glist/glist.go @@ -0,0 +1,572 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with l file, +// You can obtain one at https://github.com/gogf/gf. +// + +// Package glist provides most commonly used doubly linked list container which also supports concurrent-safe/unsafe switch feature. +package glist + +import ( + "bytes" + "container/list" + + "github.com/gogf/gf/v2/internal/deepcopy" + "github.com/gogf/gf/v2/internal/json" + "github.com/gogf/gf/v2/internal/rwmutex" + "github.com/gogf/gf/v2/util/gconv" +) + +type ( + // List is a doubly linked list containing a concurrent-safe/unsafe switch. + // The switch should be set when its initialization and cannot be changed then. + List struct { + mu rwmutex.RWMutex + list *list.List + } + // Element the item type of the list. + Element = list.Element +) + +// New creates and returns a new empty doubly linked list. +func New(safe ...bool) *List { + return &List{ + mu: rwmutex.Create(safe...), + list: list.New(), + } +} + +// NewFrom creates and returns a list from a copy of given slice `array`. +// The parameter `safe` is used to specify whether using list in concurrent-safety, +// which is false in default. +func NewFrom(array []interface{}, safe ...bool) *List { + l := list.New() + for _, v := range array { + l.PushBack(v) + } + return &List{ + mu: rwmutex.Create(safe...), + list: l, + } +} + +// PushFront inserts a new element `e` with value `v` at the front of list `l` and returns `e`. +func (l *List) PushFront(v interface{}) (e *Element) { + l.mu.Lock() + if l.list == nil { + l.list = list.New() + } + e = l.list.PushFront(v) + l.mu.Unlock() + return +} + +// PushBack inserts a new element `e` with value `v` at the back of list `l` and returns `e`. +func (l *List) PushBack(v interface{}) (e *Element) { + l.mu.Lock() + if l.list == nil { + l.list = list.New() + } + e = l.list.PushBack(v) + l.mu.Unlock() + return +} + +// PushFronts inserts multiple new elements with values `values` at the front of list `l`. +func (l *List) PushFronts(values []interface{}) { + l.mu.Lock() + if l.list == nil { + l.list = list.New() + } + for _, v := range values { + l.list.PushFront(v) + } + l.mu.Unlock() +} + +// PushBacks inserts multiple new elements with values `values` at the back of list `l`. +func (l *List) PushBacks(values []interface{}) { + l.mu.Lock() + if l.list == nil { + l.list = list.New() + } + for _, v := range values { + l.list.PushBack(v) + } + l.mu.Unlock() +} + +// PopBack removes the element from back of `l` and returns the value of the element. +func (l *List) PopBack() (value interface{}) { + l.mu.Lock() + defer l.mu.Unlock() + if l.list == nil { + l.list = list.New() + return + } + if e := l.list.Back(); e != nil { + value = l.list.Remove(e) + } + return +} + +// PopFront removes the element from front of `l` and returns the value of the element. +func (l *List) PopFront() (value interface{}) { + l.mu.Lock() + defer l.mu.Unlock() + if l.list == nil { + l.list = list.New() + return + } + if e := l.list.Front(); e != nil { + value = l.list.Remove(e) + } + return +} + +// PopBacks removes `max` elements from back of `l` +// and returns values of the removed elements as slice. +func (l *List) PopBacks(max int) (values []interface{}) { + l.mu.Lock() + defer l.mu.Unlock() + if l.list == nil { + l.list = list.New() + return + } + length := l.list.Len() + if length > 0 { + if max > 0 && max < length { + length = max + } + values = make([]interface{}, length) + for i := 0; i < length; i++ { + values[i] = l.list.Remove(l.list.Back()) + } + } + return +} + +// PopFronts removes `max` elements from front of `l` +// and returns values of the removed elements as slice. +func (l *List) PopFronts(max int) (values []interface{}) { + l.mu.Lock() + defer l.mu.Unlock() + if l.list == nil { + l.list = list.New() + return + } + length := l.list.Len() + if length > 0 { + if max > 0 && max < length { + length = max + } + values = make([]interface{}, length) + for i := 0; i < length; i++ { + values[i] = l.list.Remove(l.list.Front()) + } + } + return +} + +// PopBackAll removes all elements from back of `l` +// and returns values of the removed elements as slice. +func (l *List) PopBackAll() []interface{} { + return l.PopBacks(-1) +} + +// PopFrontAll removes all elements from front of `l` +// and returns values of the removed elements as slice. +func (l *List) PopFrontAll() []interface{} { + return l.PopFronts(-1) +} + +// FrontAll copies and returns values of all elements from front of `l` as slice. +func (l *List) FrontAll() (values []interface{}) { + l.mu.RLock() + defer l.mu.RUnlock() + if l.list == nil { + return + } + length := l.list.Len() + if length > 0 { + values = make([]interface{}, length) + for i, e := 0, l.list.Front(); i < length; i, e = i+1, e.Next() { + values[i] = e.Value + } + } + return +} + +// BackAll copies and returns values of all elements from back of `l` as slice. +func (l *List) BackAll() (values []interface{}) { + l.mu.RLock() + defer l.mu.RUnlock() + if l.list == nil { + return + } + length := l.list.Len() + if length > 0 { + values = make([]interface{}, length) + for i, e := 0, l.list.Back(); i < length; i, e = i+1, e.Prev() { + values[i] = e.Value + } + } + return +} + +// FrontValue returns value of the first element of `l` or nil if the list is empty. +func (l *List) FrontValue() (value interface{}) { + l.mu.RLock() + defer l.mu.RUnlock() + if l.list == nil { + return + } + if e := l.list.Front(); e != nil { + value = e.Value + } + return +} + +// BackValue returns value of the last element of `l` or nil if the list is empty. +func (l *List) BackValue() (value interface{}) { + l.mu.RLock() + defer l.mu.RUnlock() + if l.list == nil { + return + } + if e := l.list.Back(); e != nil { + value = e.Value + } + return +} + +// Front returns the first element of list `l` or nil if the list is empty. +func (l *List) Front() (e *Element) { + l.mu.RLock() + defer l.mu.RUnlock() + if l.list == nil { + return + } + e = l.list.Front() + return +} + +// Back returns the last element of list `l` or nil if the list is empty. +func (l *List) Back() (e *Element) { + l.mu.RLock() + defer l.mu.RUnlock() + if l.list == nil { + return + } + e = l.list.Back() + return +} + +// Len returns the number of elements of list `l`. +// The complexity is O(1). +func (l *List) Len() (length int) { + l.mu.RLock() + defer l.mu.RUnlock() + if l.list == nil { + return + } + length = l.list.Len() + return +} + +// Size is alias of Len. +func (l *List) Size() int { + return l.Len() +} + +// MoveBefore moves element `e` to its new position before `p`. +// If `e` or `p` is not an element of `l`, or `e` == `p`, the list is not modified. +// The element and `p` must not be nil. +func (l *List) MoveBefore(e, p *Element) { + l.mu.Lock() + defer l.mu.Unlock() + if l.list == nil { + l.list = list.New() + } + l.list.MoveBefore(e, p) +} + +// MoveAfter moves element `e` to its new position after `p`. +// If `e` or `p` is not an element of `l`, or `e` == `p`, the list is not modified. +// The element and `p` must not be nil. +func (l *List) MoveAfter(e, p *Element) { + l.mu.Lock() + defer l.mu.Unlock() + if l.list == nil { + l.list = list.New() + } + l.list.MoveAfter(e, p) +} + +// MoveToFront moves element `e` to the front of list `l`. +// If `e` is not an element of `l`, the list is not modified. +// The element must not be nil. +func (l *List) MoveToFront(e *Element) { + l.mu.Lock() + defer l.mu.Unlock() + if l.list == nil { + l.list = list.New() + } + l.list.MoveToFront(e) +} + +// MoveToBack moves element `e` to the back of list `l`. +// If `e` is not an element of `l`, the list is not modified. +// The element must not be nil. +func (l *List) MoveToBack(e *Element) { + l.mu.Lock() + defer l.mu.Unlock() + if l.list == nil { + l.list = list.New() + } + l.list.MoveToBack(e) +} + +// PushBackList inserts a copy of an other list at the back of list `l`. +// The lists `l` and `other` may be the same, but they must not be nil. +func (l *List) PushBackList(other *List) { + if l != other { + other.mu.RLock() + defer other.mu.RUnlock() + } + l.mu.Lock() + defer l.mu.Unlock() + if l.list == nil { + l.list = list.New() + } + l.list.PushBackList(other.list) +} + +// PushFrontList inserts a copy of an other list at the front of list `l`. +// The lists `l` and `other` may be the same, but they must not be nil. +func (l *List) PushFrontList(other *List) { + if l != other { + other.mu.RLock() + defer other.mu.RUnlock() + } + l.mu.Lock() + defer l.mu.Unlock() + if l.list == nil { + l.list = list.New() + } + l.list.PushFrontList(other.list) +} + +// InsertAfter inserts a new element `e` with value `v` immediately after `p` and returns `e`. +// If `p` is not an element of `l`, the list is not modified. +// The `p` must not be nil. +func (l *List) InsertAfter(p *Element, v interface{}) (e *Element) { + l.mu.Lock() + defer l.mu.Unlock() + if l.list == nil { + l.list = list.New() + } + e = l.list.InsertAfter(v, p) + return +} + +// InsertBefore inserts a new element `e` with value `v` immediately before `p` and returns `e`. +// If `p` is not an element of `l`, the list is not modified. +// The `p` must not be nil. +func (l *List) InsertBefore(p *Element, v interface{}) (e *Element) { + l.mu.Lock() + defer l.mu.Unlock() + if l.list == nil { + l.list = list.New() + } + e = l.list.InsertBefore(v, p) + return +} + +// Remove removes `e` from `l` if `e` is an element of list `l`. +// It returns the element value e.Value. +// The element must not be nil. +func (l *List) Remove(e *Element) (value interface{}) { + l.mu.Lock() + defer l.mu.Unlock() + if l.list == nil { + l.list = list.New() + } + value = l.list.Remove(e) + return +} + +// Removes removes multiple elements `es` from `l` if `es` are elements of list `l`. +func (l *List) Removes(es []*Element) { + l.mu.Lock() + defer l.mu.Unlock() + if l.list == nil { + l.list = list.New() + } + for _, e := range es { + l.list.Remove(e) + } +} + +// RemoveAll removes all elements from list `l`. +func (l *List) RemoveAll() { + l.mu.Lock() + l.list = list.New() + l.mu.Unlock() +} + +// Clear is alias of RemoveAll. +func (l *List) Clear() { + l.RemoveAll() +} + +// RLockFunc locks reading with given callback function `f` within RWMutex.RLock. +func (l *List) RLockFunc(f func(list *list.List)) { + l.mu.RLock() + defer l.mu.RUnlock() + if l.list != nil { + f(l.list) + } +} + +// LockFunc locks writing with given callback function `f` within RWMutex.Lock. +func (l *List) LockFunc(f func(list *list.List)) { + l.mu.Lock() + defer l.mu.Unlock() + if l.list == nil { + l.list = list.New() + } + f(l.list) +} + +// Iterator is alias of IteratorAsc. +func (l *List) Iterator(f func(e *Element) bool) { + l.IteratorAsc(f) +} + +// IteratorAsc iterates the list readonly in ascending order with given callback function `f`. +// If `f` returns true, then it continues iterating; or false to stop. +func (l *List) IteratorAsc(f func(e *Element) bool) { + l.mu.RLock() + defer l.mu.RUnlock() + if l.list == nil { + return + } + length := l.list.Len() + if length > 0 { + for i, e := 0, l.list.Front(); i < length; i, e = i+1, e.Next() { + if !f(e) { + break + } + } + } +} + +// IteratorDesc iterates the list readonly in descending order with given callback function `f`. +// If `f` returns true, then it continues iterating; or false to stop. +func (l *List) IteratorDesc(f func(e *Element) bool) { + l.mu.RLock() + defer l.mu.RUnlock() + if l.list == nil { + return + } + length := l.list.Len() + if length > 0 { + for i, e := 0, l.list.Back(); i < length; i, e = i+1, e.Prev() { + if !f(e) { + break + } + } + } +} + +// Join joins list elements with a string `glue`. +func (l *List) Join(glue string) string { + l.mu.RLock() + defer l.mu.RUnlock() + if l.list == nil { + return "" + } + buffer := bytes.NewBuffer(nil) + length := l.list.Len() + if length > 0 { + for i, e := 0, l.list.Front(); i < length; i, e = i+1, e.Next() { + buffer.WriteString(gconv.String(e.Value)) + if i != length-1 { + buffer.WriteString(glue) + } + } + } + return buffer.String() +} + +// String returns current list as a string. +func (l *List) String() string { + if l == nil { + return "" + } + return "[" + l.Join(",") + "]" +} + +// MarshalJSON implements the interface MarshalJSON for json.Marshal. +func (l List) MarshalJSON() ([]byte, error) { + return json.Marshal(l.FrontAll()) +} + +// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. +func (l *List) UnmarshalJSON(b []byte) error { + l.mu.Lock() + defer l.mu.Unlock() + if l.list == nil { + l.list = list.New() + } + var array []interface{} + if err := json.UnmarshalUseNumber(b, &array); err != nil { + return err + } + l.PushBacks(array) + return nil +} + +// UnmarshalValue is an interface implement which sets any type of value for list. +func (l *List) UnmarshalValue(value interface{}) (err error) { + l.mu.Lock() + defer l.mu.Unlock() + if l.list == nil { + l.list = list.New() + } + var array []interface{} + switch value.(type) { + case string, []byte: + err = json.UnmarshalUseNumber(gconv.Bytes(value), &array) + default: + array = gconv.SliceAny(value) + } + l.PushBacks(array) + return err +} + +// DeepCopy implements interface for deep copy of current type. +func (l *List) DeepCopy() interface{} { + if l == nil { + return nil + } + + l.mu.RLock() + defer l.mu.RUnlock() + + if l.list == nil { + return nil + } + var ( + length = l.list.Len() + values = make([]interface{}, length) + ) + if length > 0 { + for i, e := 0, l.list.Front(); i < length; i, e = i+1, e.Next() { + values[i] = deepcopy.Copy(e.Value) + } + } + return NewFrom(values, l.mu.IsSafe()) +} diff --git a/vendor/github.com/gogf/gf/v2/container/gmap/gmap.go b/vendor/github.com/gogf/gf/v2/container/gmap/gmap.go new file mode 100644 index 00000000..4cff99d3 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gmap/gmap.go @@ -0,0 +1,45 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with gm file, +// You can obtain one at https://github.com/gogf/gf. + +// Package gmap provides most commonly used map container which also support concurrent-safe/unsafe switch feature. +package gmap + +type ( + Map = AnyAnyMap // Map is alias of AnyAnyMap. + HashMap = AnyAnyMap // HashMap is alias of AnyAnyMap. +) + +// New creates and returns an empty hash map. +// The parameter `safe` is used to specify whether using map in concurrent-safety, +// which is false in default. +func New(safe ...bool) *Map { + return NewAnyAnyMap(safe...) +} + +// NewFrom creates and returns a hash map from given map `data`. +// Note that, the param `data` map will be set as the underlying data map(no deep copy), +// there might be some concurrent-safe issues when changing the map outside. +// The parameter `safe` is used to specify whether using tree in concurrent-safety, +// which is false in default. +func NewFrom(data map[interface{}]interface{}, safe ...bool) *Map { + return NewAnyAnyMapFrom(data, safe...) +} + +// NewHashMap creates and returns an empty hash map. +// The parameter `safe` is used to specify whether using map in concurrent-safety, +// which is false in default. +func NewHashMap(safe ...bool) *Map { + return NewAnyAnyMap(safe...) +} + +// NewHashMapFrom creates and returns a hash map from given map `data`. +// Note that, the param `data` map will be set as the underlying data map(no deep copy), +// there might be some concurrent-safe issues when changing the map outside. +// The parameter `safe` is used to specify whether using tree in concurrent-safety, +// which is false in default. +func NewHashMapFrom(data map[interface{}]interface{}, safe ...bool) *Map { + return NewAnyAnyMapFrom(data, safe...) +} diff --git a/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_any_any_map.go b/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_any_any_map.go new file mode 100644 index 00000000..fdfe9aa6 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_any_any_map.go @@ -0,0 +1,537 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with gm file, +// You can obtain one at https://github.com/gogf/gf. + +package gmap + +import ( + "github.com/gogf/gf/v2/container/gvar" + "github.com/gogf/gf/v2/internal/deepcopy" + "github.com/gogf/gf/v2/internal/empty" + "github.com/gogf/gf/v2/internal/json" + "github.com/gogf/gf/v2/internal/rwmutex" + "github.com/gogf/gf/v2/util/gconv" +) + +// AnyAnyMap wraps map type `map[interface{}]interface{}` and provides more map features. +type AnyAnyMap struct { + mu rwmutex.RWMutex + data map[interface{}]interface{} +} + +// NewAnyAnyMap creates and returns an empty hash map. +// The parameter `safe` is used to specify whether using map in concurrent-safety, +// which is false in default. +func NewAnyAnyMap(safe ...bool) *AnyAnyMap { + return &AnyAnyMap{ + mu: rwmutex.Create(safe...), + data: make(map[interface{}]interface{}), + } +} + +// NewAnyAnyMapFrom creates and returns a hash map from given map `data`. +// Note that, the param `data` map will be set as the underlying data map(no deep copy), +// there might be some concurrent-safe issues when changing the map outside. +func NewAnyAnyMapFrom(data map[interface{}]interface{}, safe ...bool) *AnyAnyMap { + return &AnyAnyMap{ + mu: rwmutex.Create(safe...), + data: data, + } +} + +// Iterator iterates the hash map readonly with custom callback function `f`. +// If `f` returns true, then it continues iterating; or false to stop. +func (m *AnyAnyMap) Iterator(f func(k interface{}, v interface{}) bool) { + m.mu.RLock() + defer m.mu.RUnlock() + for k, v := range m.data { + if !f(k, v) { + break + } + } +} + +// Clone returns a new hash map with copy of current map data. +func (m *AnyAnyMap) Clone(safe ...bool) *AnyAnyMap { + return NewFrom(m.MapCopy(), safe...) +} + +// Map returns the underlying data map. +// Note that, if it's in concurrent-safe usage, it returns a copy of underlying data, +// or else a pointer to the underlying data. +func (m *AnyAnyMap) Map() map[interface{}]interface{} { + m.mu.RLock() + defer m.mu.RUnlock() + if !m.mu.IsSafe() { + return m.data + } + data := make(map[interface{}]interface{}, len(m.data)) + for k, v := range m.data { + data[k] = v + } + return data +} + +// MapCopy returns a shallow copy of the underlying data of the hash map. +func (m *AnyAnyMap) MapCopy() map[interface{}]interface{} { + m.mu.RLock() + defer m.mu.RUnlock() + data := make(map[interface{}]interface{}, len(m.data)) + for k, v := range m.data { + data[k] = v + } + return data +} + +// MapStrAny returns a copy of the underlying data of the map as map[string]interface{}. +func (m *AnyAnyMap) MapStrAny() map[string]interface{} { + m.mu.RLock() + defer m.mu.RUnlock() + data := make(map[string]interface{}, len(m.data)) + for k, v := range m.data { + data[gconv.String(k)] = v + } + return data +} + +// FilterEmpty deletes all key-value pair of which the value is empty. +// Values like: 0, nil, false, "", len(slice/map/chan) == 0 are considered empty. +func (m *AnyAnyMap) FilterEmpty() { + m.mu.Lock() + defer m.mu.Unlock() + for k, v := range m.data { + if empty.IsEmpty(v) { + delete(m.data, k) + } + } +} + +// FilterNil deletes all key-value pair of which the value is nil. +func (m *AnyAnyMap) FilterNil() { + m.mu.Lock() + defer m.mu.Unlock() + for k, v := range m.data { + if empty.IsNil(v) { + delete(m.data, k) + } + } +} + +// Set sets key-value to the hash map. +func (m *AnyAnyMap) Set(key interface{}, value interface{}) { + m.mu.Lock() + if m.data == nil { + m.data = make(map[interface{}]interface{}) + } + m.data[key] = value + m.mu.Unlock() +} + +// Sets batch sets key-values to the hash map. +func (m *AnyAnyMap) Sets(data map[interface{}]interface{}) { + m.mu.Lock() + if m.data == nil { + m.data = data + } else { + for k, v := range data { + m.data[k] = v + } + } + m.mu.Unlock() +} + +// Search searches the map with given `key`. +// Second return parameter `found` is true if key was found, otherwise false. +func (m *AnyAnyMap) Search(key interface{}) (value interface{}, found bool) { + m.mu.RLock() + if m.data != nil { + value, found = m.data[key] + } + m.mu.RUnlock() + return +} + +// Get returns the value by given `key`. +func (m *AnyAnyMap) Get(key interface{}) (value interface{}) { + m.mu.RLock() + if m.data != nil { + value = m.data[key] + } + m.mu.RUnlock() + return +} + +// Pop retrieves and deletes an item from the map. +func (m *AnyAnyMap) Pop() (key, value interface{}) { + m.mu.Lock() + defer m.mu.Unlock() + for key, value = range m.data { + delete(m.data, key) + return + } + return +} + +// Pops retrieves and deletes `size` items from the map. +// It returns all items if size == -1. +func (m *AnyAnyMap) Pops(size int) map[interface{}]interface{} { + m.mu.Lock() + defer m.mu.Unlock() + if size > len(m.data) || size == -1 { + size = len(m.data) + } + if size == 0 { + return nil + } + var ( + index = 0 + newMap = make(map[interface{}]interface{}, size) + ) + for k, v := range m.data { + delete(m.data, k) + newMap[k] = v + index++ + if index == size { + break + } + } + return newMap +} + +// doSetWithLockCheck checks whether value of the key exists with mutex.Lock, +// if not exists, set value to the map with given `key`, +// or else just return the existing value. +// +// When setting value, if `value` is type of `func() interface {}`, +// it will be executed with mutex.Lock of the hash map, +// and its return value will be set to the map with `key`. +// +// It returns value with given `key`. +func (m *AnyAnyMap) doSetWithLockCheck(key interface{}, value interface{}) interface{} { + m.mu.Lock() + defer m.mu.Unlock() + if m.data == nil { + m.data = make(map[interface{}]interface{}) + } + if v, ok := m.data[key]; ok { + return v + } + if f, ok := value.(func() interface{}); ok { + value = f() + } + if value != nil { + m.data[key] = value + } + return value +} + +// GetOrSet returns the value by key, +// or sets value with given `value` if it does not exist and then returns this value. +func (m *AnyAnyMap) GetOrSet(key interface{}, value interface{}) interface{} { + if v, ok := m.Search(key); !ok { + return m.doSetWithLockCheck(key, value) + } else { + return v + } +} + +// GetOrSetFunc returns the value by key, +// or sets value with returned value of callback function `f` if it does not exist +// and then returns this value. +func (m *AnyAnyMap) GetOrSetFunc(key interface{}, f func() interface{}) interface{} { + if v, ok := m.Search(key); !ok { + return m.doSetWithLockCheck(key, f()) + } else { + return v + } +} + +// GetOrSetFuncLock returns the value by key, +// or sets value with returned value of callback function `f` if it does not exist +// and then returns this value. +// +// GetOrSetFuncLock differs with GetOrSetFunc function is that it executes function `f` +// with mutex.Lock of the hash map. +func (m *AnyAnyMap) GetOrSetFuncLock(key interface{}, f func() interface{}) interface{} { + if v, ok := m.Search(key); !ok { + return m.doSetWithLockCheck(key, f) + } else { + return v + } +} + +// GetVar returns a Var with the value by given `key`. +// The returned Var is un-concurrent safe. +func (m *AnyAnyMap) GetVar(key interface{}) *gvar.Var { + return gvar.New(m.Get(key)) +} + +// GetVarOrSet returns a Var with result from GetOrSet. +// The returned Var is un-concurrent safe. +func (m *AnyAnyMap) GetVarOrSet(key interface{}, value interface{}) *gvar.Var { + return gvar.New(m.GetOrSet(key, value)) +} + +// GetVarOrSetFunc returns a Var with result from GetOrSetFunc. +// The returned Var is un-concurrent safe. +func (m *AnyAnyMap) GetVarOrSetFunc(key interface{}, f func() interface{}) *gvar.Var { + return gvar.New(m.GetOrSetFunc(key, f)) +} + +// GetVarOrSetFuncLock returns a Var with result from GetOrSetFuncLock. +// The returned Var is un-concurrent safe. +func (m *AnyAnyMap) GetVarOrSetFuncLock(key interface{}, f func() interface{}) *gvar.Var { + return gvar.New(m.GetOrSetFuncLock(key, f)) +} + +// SetIfNotExist sets `value` to the map if the `key` does not exist, and then returns true. +// It returns false if `key` exists, and `value` would be ignored. +func (m *AnyAnyMap) SetIfNotExist(key interface{}, value interface{}) bool { + if !m.Contains(key) { + m.doSetWithLockCheck(key, value) + return true + } + return false +} + +// SetIfNotExistFunc sets value with return value of callback function `f`, and then returns true. +// It returns false if `key` exists, and `value` would be ignored. +func (m *AnyAnyMap) SetIfNotExistFunc(key interface{}, f func() interface{}) bool { + if !m.Contains(key) { + m.doSetWithLockCheck(key, f()) + return true + } + return false +} + +// SetIfNotExistFuncLock sets value with return value of callback function `f`, and then returns true. +// It returns false if `key` exists, and `value` would be ignored. +// +// SetIfNotExistFuncLock differs with SetIfNotExistFunc function is that +// it executes function `f` with mutex.Lock of the hash map. +func (m *AnyAnyMap) SetIfNotExistFuncLock(key interface{}, f func() interface{}) bool { + if !m.Contains(key) { + m.doSetWithLockCheck(key, f) + return true + } + return false +} + +// Remove deletes value from map by given `key`, and return this deleted value. +func (m *AnyAnyMap) Remove(key interface{}) (value interface{}) { + m.mu.Lock() + if m.data != nil { + var ok bool + if value, ok = m.data[key]; ok { + delete(m.data, key) + } + } + m.mu.Unlock() + return +} + +// Removes batch deletes values of the map by keys. +func (m *AnyAnyMap) Removes(keys []interface{}) { + m.mu.Lock() + if m.data != nil { + for _, key := range keys { + delete(m.data, key) + } + } + m.mu.Unlock() +} + +// Keys returns all keys of the map as a slice. +func (m *AnyAnyMap) Keys() []interface{} { + m.mu.RLock() + defer m.mu.RUnlock() + var ( + keys = make([]interface{}, len(m.data)) + index = 0 + ) + for key := range m.data { + keys[index] = key + index++ + } + return keys +} + +// Values returns all values of the map as a slice. +func (m *AnyAnyMap) Values() []interface{} { + m.mu.RLock() + defer m.mu.RUnlock() + var ( + values = make([]interface{}, len(m.data)) + index = 0 + ) + for _, value := range m.data { + values[index] = value + index++ + } + return values +} + +// Contains checks whether a key exists. +// It returns true if the `key` exists, or else false. +func (m *AnyAnyMap) Contains(key interface{}) bool { + var ok bool + m.mu.RLock() + if m.data != nil { + _, ok = m.data[key] + } + m.mu.RUnlock() + return ok +} + +// Size returns the size of the map. +func (m *AnyAnyMap) Size() int { + m.mu.RLock() + length := len(m.data) + m.mu.RUnlock() + return length +} + +// IsEmpty checks whether the map is empty. +// It returns true if map is empty, or else false. +func (m *AnyAnyMap) IsEmpty() bool { + return m.Size() == 0 +} + +// Clear deletes all data of the map, it will remake a new underlying data map. +func (m *AnyAnyMap) Clear() { + m.mu.Lock() + m.data = make(map[interface{}]interface{}) + m.mu.Unlock() +} + +// Replace the data of the map with given `data`. +func (m *AnyAnyMap) Replace(data map[interface{}]interface{}) { + m.mu.Lock() + m.data = data + m.mu.Unlock() +} + +// LockFunc locks writing with given callback function `f` within RWMutex.Lock. +func (m *AnyAnyMap) LockFunc(f func(m map[interface{}]interface{})) { + m.mu.Lock() + defer m.mu.Unlock() + f(m.data) +} + +// RLockFunc locks reading with given callback function `f` within RWMutex.RLock. +func (m *AnyAnyMap) RLockFunc(f func(m map[interface{}]interface{})) { + m.mu.RLock() + defer m.mu.RUnlock() + f(m.data) +} + +// Flip exchanges key-value of the map to value-key. +func (m *AnyAnyMap) Flip() { + m.mu.Lock() + defer m.mu.Unlock() + n := make(map[interface{}]interface{}, len(m.data)) + for k, v := range m.data { + n[v] = k + } + m.data = n +} + +// Merge merges two hash maps. +// The `other` map will be merged into the map `m`. +func (m *AnyAnyMap) Merge(other *AnyAnyMap) { + m.mu.Lock() + defer m.mu.Unlock() + if m.data == nil { + m.data = other.MapCopy() + return + } + if other != m { + other.mu.RLock() + defer other.mu.RUnlock() + } + for k, v := range other.data { + m.data[k] = v + } +} + +// String returns the map as a string. +func (m *AnyAnyMap) String() string { + if m == nil { + return "" + } + b, _ := m.MarshalJSON() + return string(b) +} + +// MarshalJSON implements the interface MarshalJSON for json.Marshal. +func (m AnyAnyMap) MarshalJSON() ([]byte, error) { + return json.Marshal(gconv.Map(m.Map())) +} + +// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. +func (m *AnyAnyMap) UnmarshalJSON(b []byte) error { + m.mu.Lock() + defer m.mu.Unlock() + if m.data == nil { + m.data = make(map[interface{}]interface{}) + } + var data map[string]interface{} + if err := json.UnmarshalUseNumber(b, &data); err != nil { + return err + } + for k, v := range data { + m.data[k] = v + } + return nil +} + +// UnmarshalValue is an interface implement which sets any type of value for map. +func (m *AnyAnyMap) UnmarshalValue(value interface{}) (err error) { + m.mu.Lock() + defer m.mu.Unlock() + if m.data == nil { + m.data = make(map[interface{}]interface{}) + } + for k, v := range gconv.Map(value) { + m.data[k] = v + } + return +} + +// DeepCopy implements interface for deep copy of current type. +func (m *AnyAnyMap) DeepCopy() interface{} { + if m == nil { + return nil + } + + m.mu.RLock() + defer m.mu.RUnlock() + data := make(map[interface{}]interface{}, len(m.data)) + for k, v := range m.data { + data[k] = deepcopy.Copy(v) + } + return NewFrom(data, m.mu.IsSafe()) +} + +// IsSubOf checks whether the current map is a sub-map of `other`. +func (m *AnyAnyMap) IsSubOf(other *AnyAnyMap) bool { + if m == other { + return true + } + m.mu.RLock() + defer m.mu.RUnlock() + other.mu.RLock() + defer other.mu.RUnlock() + for key, value := range m.data { + otherValue, ok := other.data[key] + if !ok { + return false + } + if otherValue != value { + return false + } + } + return true +} diff --git a/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_int_any_map.go b/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_int_any_map.go new file mode 100644 index 00000000..1faed272 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_int_any_map.go @@ -0,0 +1,538 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with gm file, +// You can obtain one at https://github.com/gogf/gf. +// + +package gmap + +import ( + "github.com/gogf/gf/v2/container/gvar" + "github.com/gogf/gf/v2/internal/deepcopy" + "github.com/gogf/gf/v2/internal/empty" + "github.com/gogf/gf/v2/internal/json" + "github.com/gogf/gf/v2/internal/rwmutex" + "github.com/gogf/gf/v2/util/gconv" +) + +// IntAnyMap implements map[int]interface{} with RWMutex that has switch. +type IntAnyMap struct { + mu rwmutex.RWMutex + data map[int]interface{} +} + +// NewIntAnyMap returns an empty IntAnyMap object. +// The parameter `safe` is used to specify whether using map in concurrent-safety, +// which is false in default. +func NewIntAnyMap(safe ...bool) *IntAnyMap { + return &IntAnyMap{ + mu: rwmutex.Create(safe...), + data: make(map[int]interface{}), + } +} + +// NewIntAnyMapFrom creates and returns a hash map from given map `data`. +// Note that, the param `data` map will be set as the underlying data map(no deep copy), +// there might be some concurrent-safe issues when changing the map outside. +func NewIntAnyMapFrom(data map[int]interface{}, safe ...bool) *IntAnyMap { + return &IntAnyMap{ + mu: rwmutex.Create(safe...), + data: data, + } +} + +// Iterator iterates the hash map readonly with custom callback function `f`. +// If `f` returns true, then it continues iterating; or false to stop. +func (m *IntAnyMap) Iterator(f func(k int, v interface{}) bool) { + m.mu.RLock() + defer m.mu.RUnlock() + for k, v := range m.data { + if !f(k, v) { + break + } + } +} + +// Clone returns a new hash map with copy of current map data. +func (m *IntAnyMap) Clone() *IntAnyMap { + return NewIntAnyMapFrom(m.MapCopy(), m.mu.IsSafe()) +} + +// Map returns the underlying data map. +// Note that, if it's in concurrent-safe usage, it returns a copy of underlying data, +// or else a pointer to the underlying data. +func (m *IntAnyMap) Map() map[int]interface{} { + m.mu.RLock() + defer m.mu.RUnlock() + if !m.mu.IsSafe() { + return m.data + } + data := make(map[int]interface{}, len(m.data)) + for k, v := range m.data { + data[k] = v + } + return data +} + +// MapStrAny returns a copy of the underlying data of the map as map[string]interface{}. +func (m *IntAnyMap) MapStrAny() map[string]interface{} { + m.mu.RLock() + data := make(map[string]interface{}, len(m.data)) + for k, v := range m.data { + data[gconv.String(k)] = v + } + m.mu.RUnlock() + return data +} + +// MapCopy returns a copy of the underlying data of the hash map. +func (m *IntAnyMap) MapCopy() map[int]interface{} { + m.mu.RLock() + defer m.mu.RUnlock() + data := make(map[int]interface{}, len(m.data)) + for k, v := range m.data { + data[k] = v + } + return data +} + +// FilterEmpty deletes all key-value pair of which the value is empty. +// Values like: 0, nil, false, "", len(slice/map/chan) == 0 are considered empty. +func (m *IntAnyMap) FilterEmpty() { + m.mu.Lock() + for k, v := range m.data { + if empty.IsEmpty(v) { + delete(m.data, k) + } + } + m.mu.Unlock() +} + +// FilterNil deletes all key-value pair of which the value is nil. +func (m *IntAnyMap) FilterNil() { + m.mu.Lock() + defer m.mu.Unlock() + for k, v := range m.data { + if empty.IsNil(v) { + delete(m.data, k) + } + } +} + +// Set sets key-value to the hash map. +func (m *IntAnyMap) Set(key int, val interface{}) { + m.mu.Lock() + if m.data == nil { + m.data = make(map[int]interface{}) + } + m.data[key] = val + m.mu.Unlock() +} + +// Sets batch sets key-values to the hash map. +func (m *IntAnyMap) Sets(data map[int]interface{}) { + m.mu.Lock() + if m.data == nil { + m.data = data + } else { + for k, v := range data { + m.data[k] = v + } + } + m.mu.Unlock() +} + +// Search searches the map with given `key`. +// Second return parameter `found` is true if key was found, otherwise false. +func (m *IntAnyMap) Search(key int) (value interface{}, found bool) { + m.mu.RLock() + if m.data != nil { + value, found = m.data[key] + } + m.mu.RUnlock() + return +} + +// Get returns the value by given `key`. +func (m *IntAnyMap) Get(key int) (value interface{}) { + m.mu.RLock() + if m.data != nil { + value = m.data[key] + } + m.mu.RUnlock() + return +} + +// Pop retrieves and deletes an item from the map. +func (m *IntAnyMap) Pop() (key int, value interface{}) { + m.mu.Lock() + defer m.mu.Unlock() + for key, value = range m.data { + delete(m.data, key) + return + } + return +} + +// Pops retrieves and deletes `size` items from the map. +// It returns all items if size == -1. +func (m *IntAnyMap) Pops(size int) map[int]interface{} { + m.mu.Lock() + defer m.mu.Unlock() + if size > len(m.data) || size == -1 { + size = len(m.data) + } + if size == 0 { + return nil + } + var ( + index = 0 + newMap = make(map[int]interface{}, size) + ) + for k, v := range m.data { + delete(m.data, k) + newMap[k] = v + index++ + if index == size { + break + } + } + return newMap +} + +// doSetWithLockCheck checks whether value of the key exists with mutex.Lock, +// if not exists, set value to the map with given `key`, +// or else just return the existing value. +// +// When setting value, if `value` is type of `func() interface {}`, +// it will be executed with mutex.Lock of the hash map, +// and its return value will be set to the map with `key`. +// +// It returns value with given `key`. +func (m *IntAnyMap) doSetWithLockCheck(key int, value interface{}) interface{} { + m.mu.Lock() + defer m.mu.Unlock() + if m.data == nil { + m.data = make(map[int]interface{}) + } + if v, ok := m.data[key]; ok { + return v + } + if f, ok := value.(func() interface{}); ok { + value = f() + } + if value != nil { + m.data[key] = value + } + return value +} + +// GetOrSet returns the value by key, +// or sets value with given `value` if it does not exist and then returns this value. +func (m *IntAnyMap) GetOrSet(key int, value interface{}) interface{} { + if v, ok := m.Search(key); !ok { + return m.doSetWithLockCheck(key, value) + } else { + return v + } +} + +// GetOrSetFunc returns the value by key, +// or sets value with returned value of callback function `f` if it does not exist and returns this value. +func (m *IntAnyMap) GetOrSetFunc(key int, f func() interface{}) interface{} { + if v, ok := m.Search(key); !ok { + return m.doSetWithLockCheck(key, f()) + } else { + return v + } +} + +// GetOrSetFuncLock returns the value by key, +// or sets value with returned value of callback function `f` if it does not exist and returns this value. +// +// GetOrSetFuncLock differs with GetOrSetFunc function is that it executes function `f` +// with mutex.Lock of the hash map. +func (m *IntAnyMap) GetOrSetFuncLock(key int, f func() interface{}) interface{} { + if v, ok := m.Search(key); !ok { + return m.doSetWithLockCheck(key, f) + } else { + return v + } +} + +// GetVar returns a Var with the value by given `key`. +// The returned Var is un-concurrent safe. +func (m *IntAnyMap) GetVar(key int) *gvar.Var { + return gvar.New(m.Get(key)) +} + +// GetVarOrSet returns a Var with result from GetVarOrSet. +// The returned Var is un-concurrent safe. +func (m *IntAnyMap) GetVarOrSet(key int, value interface{}) *gvar.Var { + return gvar.New(m.GetOrSet(key, value)) +} + +// GetVarOrSetFunc returns a Var with result from GetOrSetFunc. +// The returned Var is un-concurrent safe. +func (m *IntAnyMap) GetVarOrSetFunc(key int, f func() interface{}) *gvar.Var { + return gvar.New(m.GetOrSetFunc(key, f)) +} + +// GetVarOrSetFuncLock returns a Var with result from GetOrSetFuncLock. +// The returned Var is un-concurrent safe. +func (m *IntAnyMap) GetVarOrSetFuncLock(key int, f func() interface{}) *gvar.Var { + return gvar.New(m.GetOrSetFuncLock(key, f)) +} + +// SetIfNotExist sets `value` to the map if the `key` does not exist, and then returns true. +// It returns false if `key` exists, and `value` would be ignored. +func (m *IntAnyMap) SetIfNotExist(key int, value interface{}) bool { + if !m.Contains(key) { + m.doSetWithLockCheck(key, value) + return true + } + return false +} + +// SetIfNotExistFunc sets value with return value of callback function `f`, and then returns true. +// It returns false if `key` exists, and `value` would be ignored. +func (m *IntAnyMap) SetIfNotExistFunc(key int, f func() interface{}) bool { + if !m.Contains(key) { + m.doSetWithLockCheck(key, f()) + return true + } + return false +} + +// SetIfNotExistFuncLock sets value with return value of callback function `f`, and then returns true. +// It returns false if `key` exists, and `value` would be ignored. +// +// SetIfNotExistFuncLock differs with SetIfNotExistFunc function is that +// it executes function `f` with mutex.Lock of the hash map. +func (m *IntAnyMap) SetIfNotExistFuncLock(key int, f func() interface{}) bool { + if !m.Contains(key) { + m.doSetWithLockCheck(key, f) + return true + } + return false +} + +// Removes batch deletes values of the map by keys. +func (m *IntAnyMap) Removes(keys []int) { + m.mu.Lock() + if m.data != nil { + for _, key := range keys { + delete(m.data, key) + } + } + m.mu.Unlock() +} + +// Remove deletes value from map by given `key`, and return this deleted value. +func (m *IntAnyMap) Remove(key int) (value interface{}) { + m.mu.Lock() + if m.data != nil { + var ok bool + if value, ok = m.data[key]; ok { + delete(m.data, key) + } + } + m.mu.Unlock() + return +} + +// Keys returns all keys of the map as a slice. +func (m *IntAnyMap) Keys() []int { + m.mu.RLock() + var ( + keys = make([]int, len(m.data)) + index = 0 + ) + for key := range m.data { + keys[index] = key + index++ + } + m.mu.RUnlock() + return keys +} + +// Values returns all values of the map as a slice. +func (m *IntAnyMap) Values() []interface{} { + m.mu.RLock() + var ( + values = make([]interface{}, len(m.data)) + index = 0 + ) + for _, value := range m.data { + values[index] = value + index++ + } + m.mu.RUnlock() + return values +} + +// Contains checks whether a key exists. +// It returns true if the `key` exists, or else false. +func (m *IntAnyMap) Contains(key int) bool { + var ok bool + m.mu.RLock() + if m.data != nil { + _, ok = m.data[key] + } + m.mu.RUnlock() + return ok +} + +// Size returns the size of the map. +func (m *IntAnyMap) Size() int { + m.mu.RLock() + length := len(m.data) + m.mu.RUnlock() + return length +} + +// IsEmpty checks whether the map is empty. +// It returns true if map is empty, or else false. +func (m *IntAnyMap) IsEmpty() bool { + return m.Size() == 0 +} + +// Clear deletes all data of the map, it will remake a new underlying data map. +func (m *IntAnyMap) Clear() { + m.mu.Lock() + m.data = make(map[int]interface{}) + m.mu.Unlock() +} + +// Replace the data of the map with given `data`. +func (m *IntAnyMap) Replace(data map[int]interface{}) { + m.mu.Lock() + m.data = data + m.mu.Unlock() +} + +// LockFunc locks writing with given callback function `f` within RWMutex.Lock. +func (m *IntAnyMap) LockFunc(f func(m map[int]interface{})) { + m.mu.Lock() + defer m.mu.Unlock() + f(m.data) +} + +// RLockFunc locks reading with given callback function `f` within RWMutex.RLock. +func (m *IntAnyMap) RLockFunc(f func(m map[int]interface{})) { + m.mu.RLock() + defer m.mu.RUnlock() + f(m.data) +} + +// Flip exchanges key-value of the map to value-key. +func (m *IntAnyMap) Flip() { + m.mu.Lock() + defer m.mu.Unlock() + n := make(map[int]interface{}, len(m.data)) + for k, v := range m.data { + n[gconv.Int(v)] = k + } + m.data = n +} + +// Merge merges two hash maps. +// The `other` map will be merged into the map `m`. +func (m *IntAnyMap) Merge(other *IntAnyMap) { + m.mu.Lock() + defer m.mu.Unlock() + if m.data == nil { + m.data = other.MapCopy() + return + } + if other != m { + other.mu.RLock() + defer other.mu.RUnlock() + } + for k, v := range other.data { + m.data[k] = v + } +} + +// String returns the map as a string. +func (m *IntAnyMap) String() string { + if m == nil { + return "" + } + b, _ := m.MarshalJSON() + return string(b) +} + +// MarshalJSON implements the interface MarshalJSON for json.Marshal. +func (m IntAnyMap) MarshalJSON() ([]byte, error) { + m.mu.RLock() + defer m.mu.RUnlock() + return json.Marshal(m.data) +} + +// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. +func (m *IntAnyMap) UnmarshalJSON(b []byte) error { + m.mu.Lock() + defer m.mu.Unlock() + if m.data == nil { + m.data = make(map[int]interface{}) + } + if err := json.UnmarshalUseNumber(b, &m.data); err != nil { + return err + } + return nil +} + +// UnmarshalValue is an interface implement which sets any type of value for map. +func (m *IntAnyMap) UnmarshalValue(value interface{}) (err error) { + m.mu.Lock() + defer m.mu.Unlock() + if m.data == nil { + m.data = make(map[int]interface{}) + } + switch value.(type) { + case string, []byte: + return json.UnmarshalUseNumber(gconv.Bytes(value), &m.data) + default: + for k, v := range gconv.Map(value) { + m.data[gconv.Int(k)] = v + } + } + return +} + +// DeepCopy implements interface for deep copy of current type. +func (m *IntAnyMap) DeepCopy() interface{} { + if m == nil { + return nil + } + m.mu.RLock() + defer m.mu.RUnlock() + data := make(map[int]interface{}, len(m.data)) + for k, v := range m.data { + data[k] = deepcopy.Copy(v) + } + return NewIntAnyMapFrom(data, m.mu.IsSafe()) +} + +// IsSubOf checks whether the current map is a sub-map of `other`. +func (m *IntAnyMap) IsSubOf(other *IntAnyMap) bool { + if m == other { + return true + } + m.mu.RLock() + defer m.mu.RUnlock() + other.mu.RLock() + defer other.mu.RUnlock() + for key, value := range m.data { + otherValue, ok := other.data[key] + if !ok { + return false + } + if otherValue != value { + return false + } + } + return true +} diff --git a/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_int_int_map.go b/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_int_int_map.go new file mode 100644 index 00000000..f63c1f06 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_int_int_map.go @@ -0,0 +1,508 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with gm file, +// You can obtain one at https://github.com/gogf/gf. + +package gmap + +import ( + "github.com/gogf/gf/v2/internal/empty" + "github.com/gogf/gf/v2/internal/json" + "github.com/gogf/gf/v2/internal/rwmutex" + "github.com/gogf/gf/v2/util/gconv" +) + +// IntIntMap implements map[int]int with RWMutex that has switch. +type IntIntMap struct { + mu rwmutex.RWMutex + data map[int]int +} + +// NewIntIntMap returns an empty IntIntMap object. +// The parameter `safe` is used to specify whether using map in concurrent-safety, +// which is false in default. +func NewIntIntMap(safe ...bool) *IntIntMap { + return &IntIntMap{ + mu: rwmutex.Create(safe...), + data: make(map[int]int), + } +} + +// NewIntIntMapFrom creates and returns a hash map from given map `data`. +// Note that, the param `data` map will be set as the underlying data map(no deep copy), +// there might be some concurrent-safe issues when changing the map outside. +func NewIntIntMapFrom(data map[int]int, safe ...bool) *IntIntMap { + return &IntIntMap{ + mu: rwmutex.Create(safe...), + data: data, + } +} + +// Iterator iterates the hash map readonly with custom callback function `f`. +// If `f` returns true, then it continues iterating; or false to stop. +func (m *IntIntMap) Iterator(f func(k int, v int) bool) { + m.mu.RLock() + defer m.mu.RUnlock() + for k, v := range m.data { + if !f(k, v) { + break + } + } +} + +// Clone returns a new hash map with copy of current map data. +func (m *IntIntMap) Clone() *IntIntMap { + return NewIntIntMapFrom(m.MapCopy(), m.mu.IsSafe()) +} + +// Map returns the underlying data map. +// Note that, if it's in concurrent-safe usage, it returns a copy of underlying data, +// or else a pointer to the underlying data. +func (m *IntIntMap) Map() map[int]int { + m.mu.RLock() + defer m.mu.RUnlock() + if !m.mu.IsSafe() { + return m.data + } + data := make(map[int]int, len(m.data)) + for k, v := range m.data { + data[k] = v + } + return data +} + +// MapStrAny returns a copy of the underlying data of the map as map[string]interface{}. +func (m *IntIntMap) MapStrAny() map[string]interface{} { + m.mu.RLock() + data := make(map[string]interface{}, len(m.data)) + for k, v := range m.data { + data[gconv.String(k)] = v + } + m.mu.RUnlock() + return data +} + +// MapCopy returns a copy of the underlying data of the hash map. +func (m *IntIntMap) MapCopy() map[int]int { + m.mu.RLock() + defer m.mu.RUnlock() + data := make(map[int]int, len(m.data)) + for k, v := range m.data { + data[k] = v + } + return data +} + +// FilterEmpty deletes all key-value pair of which the value is empty. +// Values like: 0, nil, false, "", len(slice/map/chan) == 0 are considered empty. +func (m *IntIntMap) FilterEmpty() { + m.mu.Lock() + for k, v := range m.data { + if empty.IsEmpty(v) { + delete(m.data, k) + } + } + m.mu.Unlock() +} + +// Set sets key-value to the hash map. +func (m *IntIntMap) Set(key int, val int) { + m.mu.Lock() + if m.data == nil { + m.data = make(map[int]int) + } + m.data[key] = val + m.mu.Unlock() +} + +// Sets batch sets key-values to the hash map. +func (m *IntIntMap) Sets(data map[int]int) { + m.mu.Lock() + if m.data == nil { + m.data = data + } else { + for k, v := range data { + m.data[k] = v + } + } + m.mu.Unlock() +} + +// Search searches the map with given `key`. +// Second return parameter `found` is true if key was found, otherwise false. +func (m *IntIntMap) Search(key int) (value int, found bool) { + m.mu.RLock() + if m.data != nil { + value, found = m.data[key] + } + m.mu.RUnlock() + return +} + +// Get returns the value by given `key`. +func (m *IntIntMap) Get(key int) (value int) { + m.mu.RLock() + if m.data != nil { + value = m.data[key] + } + m.mu.RUnlock() + return +} + +// Pop retrieves and deletes an item from the map. +func (m *IntIntMap) Pop() (key, value int) { + m.mu.Lock() + defer m.mu.Unlock() + for key, value = range m.data { + delete(m.data, key) + return + } + return +} + +// Pops retrieves and deletes `size` items from the map. +// It returns all items if size == -1. +func (m *IntIntMap) Pops(size int) map[int]int { + m.mu.Lock() + defer m.mu.Unlock() + if size > len(m.data) || size == -1 { + size = len(m.data) + } + if size == 0 { + return nil + } + var ( + index = 0 + newMap = make(map[int]int, size) + ) + for k, v := range m.data { + delete(m.data, k) + newMap[k] = v + index++ + if index == size { + break + } + } + return newMap +} + +// doSetWithLockCheck checks whether value of the key exists with mutex.Lock, +// if not exists, set value to the map with given `key`, +// or else just return the existing value. +// +// It returns value with given `key`. +func (m *IntIntMap) doSetWithLockCheck(key int, value int) int { + m.mu.Lock() + defer m.mu.Unlock() + if m.data == nil { + m.data = make(map[int]int) + } + if v, ok := m.data[key]; ok { + return v + } + m.data[key] = value + return value +} + +// GetOrSet returns the value by key, +// or sets value with given `value` if it does not exist and then returns this value. +func (m *IntIntMap) GetOrSet(key int, value int) int { + if v, ok := m.Search(key); !ok { + return m.doSetWithLockCheck(key, value) + } else { + return v + } +} + +// GetOrSetFunc returns the value by key, +// or sets value with returned value of callback function `f` if it does not exist and returns this value. +func (m *IntIntMap) GetOrSetFunc(key int, f func() int) int { + if v, ok := m.Search(key); !ok { + return m.doSetWithLockCheck(key, f()) + } else { + return v + } +} + +// GetOrSetFuncLock returns the value by key, +// or sets value with returned value of callback function `f` if it does not exist and returns this value. +// +// GetOrSetFuncLock differs with GetOrSetFunc function is that it executes function `f` +// with mutex.Lock of the hash map. +func (m *IntIntMap) GetOrSetFuncLock(key int, f func() int) int { + if v, ok := m.Search(key); !ok { + m.mu.Lock() + defer m.mu.Unlock() + if m.data == nil { + m.data = make(map[int]int) + } + if v, ok = m.data[key]; ok { + return v + } + v = f() + m.data[key] = v + return v + } else { + return v + } +} + +// SetIfNotExist sets `value` to the map if the `key` does not exist, and then returns true. +// It returns false if `key` exists, and `value` would be ignored. +func (m *IntIntMap) SetIfNotExist(key int, value int) bool { + if !m.Contains(key) { + m.doSetWithLockCheck(key, value) + return true + } + return false +} + +// SetIfNotExistFunc sets value with return value of callback function `f`, and then returns true. +// It returns false if `key` exists, and `value` would be ignored. +func (m *IntIntMap) SetIfNotExistFunc(key int, f func() int) bool { + if !m.Contains(key) { + m.doSetWithLockCheck(key, f()) + return true + } + return false +} + +// SetIfNotExistFuncLock sets value with return value of callback function `f`, and then returns true. +// It returns false if `key` exists, and `value` would be ignored. +// +// SetIfNotExistFuncLock differs with SetIfNotExistFunc function is that +// it executes function `f` with mutex.Lock of the hash map. +func (m *IntIntMap) SetIfNotExistFuncLock(key int, f func() int) bool { + if !m.Contains(key) { + m.mu.Lock() + defer m.mu.Unlock() + if m.data == nil { + m.data = make(map[int]int) + } + if _, ok := m.data[key]; !ok { + m.data[key] = f() + } + return true + } + return false +} + +// Removes batch deletes values of the map by keys. +func (m *IntIntMap) Removes(keys []int) { + m.mu.Lock() + if m.data != nil { + for _, key := range keys { + delete(m.data, key) + } + } + m.mu.Unlock() +} + +// Remove deletes value from map by given `key`, and return this deleted value. +func (m *IntIntMap) Remove(key int) (value int) { + m.mu.Lock() + if m.data != nil { + var ok bool + if value, ok = m.data[key]; ok { + delete(m.data, key) + } + } + m.mu.Unlock() + return +} + +// Keys returns all keys of the map as a slice. +func (m *IntIntMap) Keys() []int { + m.mu.RLock() + var ( + keys = make([]int, len(m.data)) + index = 0 + ) + for key := range m.data { + keys[index] = key + index++ + } + m.mu.RUnlock() + return keys +} + +// Values returns all values of the map as a slice. +func (m *IntIntMap) Values() []int { + m.mu.RLock() + var ( + values = make([]int, len(m.data)) + index = 0 + ) + for _, value := range m.data { + values[index] = value + index++ + } + m.mu.RUnlock() + return values +} + +// Contains checks whether a key exists. +// It returns true if the `key` exists, or else false. +func (m *IntIntMap) Contains(key int) bool { + var ok bool + m.mu.RLock() + if m.data != nil { + _, ok = m.data[key] + } + m.mu.RUnlock() + return ok +} + +// Size returns the size of the map. +func (m *IntIntMap) Size() int { + m.mu.RLock() + length := len(m.data) + m.mu.RUnlock() + return length +} + +// IsEmpty checks whether the map is empty. +// It returns true if map is empty, or else false. +func (m *IntIntMap) IsEmpty() bool { + return m.Size() == 0 +} + +// Clear deletes all data of the map, it will remake a new underlying data map. +func (m *IntIntMap) Clear() { + m.mu.Lock() + m.data = make(map[int]int) + m.mu.Unlock() +} + +// Replace the data of the map with given `data`. +func (m *IntIntMap) Replace(data map[int]int) { + m.mu.Lock() + m.data = data + m.mu.Unlock() +} + +// LockFunc locks writing with given callback function `f` within RWMutex.Lock. +func (m *IntIntMap) LockFunc(f func(m map[int]int)) { + m.mu.Lock() + defer m.mu.Unlock() + f(m.data) +} + +// RLockFunc locks reading with given callback function `f` within RWMutex.RLock. +func (m *IntIntMap) RLockFunc(f func(m map[int]int)) { + m.mu.RLock() + defer m.mu.RUnlock() + f(m.data) +} + +// Flip exchanges key-value of the map to value-key. +func (m *IntIntMap) Flip() { + m.mu.Lock() + defer m.mu.Unlock() + n := make(map[int]int, len(m.data)) + for k, v := range m.data { + n[v] = k + } + m.data = n +} + +// Merge merges two hash maps. +// The `other` map will be merged into the map `m`. +func (m *IntIntMap) Merge(other *IntIntMap) { + m.mu.Lock() + defer m.mu.Unlock() + if m.data == nil { + m.data = other.MapCopy() + return + } + if other != m { + other.mu.RLock() + defer other.mu.RUnlock() + } + for k, v := range other.data { + m.data[k] = v + } +} + +// String returns the map as a string. +func (m *IntIntMap) String() string { + if m == nil { + return "" + } + b, _ := m.MarshalJSON() + return string(b) +} + +// MarshalJSON implements the interface MarshalJSON for json.Marshal. +func (m IntIntMap) MarshalJSON() ([]byte, error) { + m.mu.RLock() + defer m.mu.RUnlock() + return json.Marshal(m.data) +} + +// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. +func (m *IntIntMap) UnmarshalJSON(b []byte) error { + m.mu.Lock() + defer m.mu.Unlock() + if m.data == nil { + m.data = make(map[int]int) + } + if err := json.UnmarshalUseNumber(b, &m.data); err != nil { + return err + } + return nil +} + +// UnmarshalValue is an interface implement which sets any type of value for map. +func (m *IntIntMap) UnmarshalValue(value interface{}) (err error) { + m.mu.Lock() + defer m.mu.Unlock() + if m.data == nil { + m.data = make(map[int]int) + } + switch value.(type) { + case string, []byte: + return json.UnmarshalUseNumber(gconv.Bytes(value), &m.data) + default: + for k, v := range gconv.Map(value) { + m.data[gconv.Int(k)] = gconv.Int(v) + } + } + return +} + +// DeepCopy implements interface for deep copy of current type. +func (m *IntIntMap) DeepCopy() interface{} { + if m == nil { + return nil + } + m.mu.RLock() + defer m.mu.RUnlock() + data := make(map[int]int, len(m.data)) + for k, v := range m.data { + data[k] = v + } + return NewIntIntMapFrom(data, m.mu.IsSafe()) +} + +// IsSubOf checks whether the current map is a sub-map of `other`. +func (m *IntIntMap) IsSubOf(other *IntIntMap) bool { + if m == other { + return true + } + m.mu.RLock() + defer m.mu.RUnlock() + other.mu.RLock() + defer other.mu.RUnlock() + for key, value := range m.data { + otherValue, ok := other.data[key] + if !ok { + return false + } + if otherValue != value { + return false + } + } + return true +} diff --git a/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_int_str_map.go b/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_int_str_map.go new file mode 100644 index 00000000..943f6b79 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_int_str_map.go @@ -0,0 +1,508 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with gm file, +// You can obtain one at https://github.com/gogf/gf. + +package gmap + +import ( + "github.com/gogf/gf/v2/internal/empty" + "github.com/gogf/gf/v2/internal/json" + "github.com/gogf/gf/v2/internal/rwmutex" + "github.com/gogf/gf/v2/util/gconv" +) + +// IntStrMap implements map[int]string with RWMutex that has switch. +type IntStrMap struct { + mu rwmutex.RWMutex + data map[int]string +} + +// NewIntStrMap returns an empty IntStrMap object. +// The parameter `safe` is used to specify whether using map in concurrent-safety, +// which is false in default. +func NewIntStrMap(safe ...bool) *IntStrMap { + return &IntStrMap{ + mu: rwmutex.Create(safe...), + data: make(map[int]string), + } +} + +// NewIntStrMapFrom creates and returns a hash map from given map `data`. +// Note that, the param `data` map will be set as the underlying data map(no deep copy), +// there might be some concurrent-safe issues when changing the map outside. +func NewIntStrMapFrom(data map[int]string, safe ...bool) *IntStrMap { + return &IntStrMap{ + mu: rwmutex.Create(safe...), + data: data, + } +} + +// Iterator iterates the hash map readonly with custom callback function `f`. +// If `f` returns true, then it continues iterating; or false to stop. +func (m *IntStrMap) Iterator(f func(k int, v string) bool) { + m.mu.RLock() + defer m.mu.RUnlock() + for k, v := range m.data { + if !f(k, v) { + break + } + } +} + +// Clone returns a new hash map with copy of current map data. +func (m *IntStrMap) Clone() *IntStrMap { + return NewIntStrMapFrom(m.MapCopy(), m.mu.IsSafe()) +} + +// Map returns the underlying data map. +// Note that, if it's in concurrent-safe usage, it returns a copy of underlying data, +// or else a pointer to the underlying data. +func (m *IntStrMap) Map() map[int]string { + m.mu.RLock() + defer m.mu.RUnlock() + if !m.mu.IsSafe() { + return m.data + } + data := make(map[int]string, len(m.data)) + for k, v := range m.data { + data[k] = v + } + return data +} + +// MapStrAny returns a copy of the underlying data of the map as map[string]interface{}. +func (m *IntStrMap) MapStrAny() map[string]interface{} { + m.mu.RLock() + data := make(map[string]interface{}, len(m.data)) + for k, v := range m.data { + data[gconv.String(k)] = v + } + m.mu.RUnlock() + return data +} + +// MapCopy returns a copy of the underlying data of the hash map. +func (m *IntStrMap) MapCopy() map[int]string { + m.mu.RLock() + defer m.mu.RUnlock() + data := make(map[int]string, len(m.data)) + for k, v := range m.data { + data[k] = v + } + return data +} + +// FilterEmpty deletes all key-value pair of which the value is empty. +// Values like: 0, nil, false, "", len(slice/map/chan) == 0 are considered empty. +func (m *IntStrMap) FilterEmpty() { + m.mu.Lock() + for k, v := range m.data { + if empty.IsEmpty(v) { + delete(m.data, k) + } + } + m.mu.Unlock() +} + +// Set sets key-value to the hash map. +func (m *IntStrMap) Set(key int, val string) { + m.mu.Lock() + if m.data == nil { + m.data = make(map[int]string) + } + m.data[key] = val + m.mu.Unlock() +} + +// Sets batch sets key-values to the hash map. +func (m *IntStrMap) Sets(data map[int]string) { + m.mu.Lock() + if m.data == nil { + m.data = data + } else { + for k, v := range data { + m.data[k] = v + } + } + m.mu.Unlock() +} + +// Search searches the map with given `key`. +// Second return parameter `found` is true if key was found, otherwise false. +func (m *IntStrMap) Search(key int) (value string, found bool) { + m.mu.RLock() + if m.data != nil { + value, found = m.data[key] + } + m.mu.RUnlock() + return +} + +// Get returns the value by given `key`. +func (m *IntStrMap) Get(key int) (value string) { + m.mu.RLock() + if m.data != nil { + value = m.data[key] + } + m.mu.RUnlock() + return +} + +// Pop retrieves and deletes an item from the map. +func (m *IntStrMap) Pop() (key int, value string) { + m.mu.Lock() + defer m.mu.Unlock() + for key, value = range m.data { + delete(m.data, key) + return + } + return +} + +// Pops retrieves and deletes `size` items from the map. +// It returns all items if size == -1. +func (m *IntStrMap) Pops(size int) map[int]string { + m.mu.Lock() + defer m.mu.Unlock() + if size > len(m.data) || size == -1 { + size = len(m.data) + } + if size == 0 { + return nil + } + var ( + index = 0 + newMap = make(map[int]string, size) + ) + for k, v := range m.data { + delete(m.data, k) + newMap[k] = v + index++ + if index == size { + break + } + } + return newMap +} + +// doSetWithLockCheck checks whether value of the key exists with mutex.Lock, +// if not exists, set value to the map with given `key`, +// or else just return the existing value. +// +// It returns value with given `key`. +func (m *IntStrMap) doSetWithLockCheck(key int, value string) string { + m.mu.Lock() + defer m.mu.Unlock() + if m.data == nil { + m.data = make(map[int]string) + } + if v, ok := m.data[key]; ok { + return v + } + m.data[key] = value + return value +} + +// GetOrSet returns the value by key, +// or sets value with given `value` if it does not exist and then returns this value. +func (m *IntStrMap) GetOrSet(key int, value string) string { + if v, ok := m.Search(key); !ok { + return m.doSetWithLockCheck(key, value) + } else { + return v + } +} + +// GetOrSetFunc returns the value by key, +// or sets value with returned value of callback function `f` if it does not exist and returns this value. +func (m *IntStrMap) GetOrSetFunc(key int, f func() string) string { + if v, ok := m.Search(key); !ok { + return m.doSetWithLockCheck(key, f()) + } else { + return v + } +} + +// GetOrSetFuncLock returns the value by key, +// or sets value with returned value of callback function `f` if it does not exist and returns this value. +// +// GetOrSetFuncLock differs with GetOrSetFunc function is that it executes function `f` +// with mutex.Lock of the hash map. +func (m *IntStrMap) GetOrSetFuncLock(key int, f func() string) string { + if v, ok := m.Search(key); !ok { + m.mu.Lock() + defer m.mu.Unlock() + if m.data == nil { + m.data = make(map[int]string) + } + if v, ok = m.data[key]; ok { + return v + } + v = f() + m.data[key] = v + return v + } else { + return v + } +} + +// SetIfNotExist sets `value` to the map if the `key` does not exist, and then returns true. +// It returns false if `key` exists, and `value` would be ignored. +func (m *IntStrMap) SetIfNotExist(key int, value string) bool { + if !m.Contains(key) { + m.doSetWithLockCheck(key, value) + return true + } + return false +} + +// SetIfNotExistFunc sets value with return value of callback function `f`, and then returns true. +// It returns false if `key` exists, and `value` would be ignored. +func (m *IntStrMap) SetIfNotExistFunc(key int, f func() string) bool { + if !m.Contains(key) { + m.doSetWithLockCheck(key, f()) + return true + } + return false +} + +// SetIfNotExistFuncLock sets value with return value of callback function `f`, and then returns true. +// It returns false if `key` exists, and `value` would be ignored. +// +// SetIfNotExistFuncLock differs with SetIfNotExistFunc function is that +// it executes function `f` with mutex.Lock of the hash map. +func (m *IntStrMap) SetIfNotExistFuncLock(key int, f func() string) bool { + if !m.Contains(key) { + m.mu.Lock() + defer m.mu.Unlock() + if m.data == nil { + m.data = make(map[int]string) + } + if _, ok := m.data[key]; !ok { + m.data[key] = f() + } + return true + } + return false +} + +// Removes batch deletes values of the map by keys. +func (m *IntStrMap) Removes(keys []int) { + m.mu.Lock() + if m.data != nil { + for _, key := range keys { + delete(m.data, key) + } + } + m.mu.Unlock() +} + +// Remove deletes value from map by given `key`, and return this deleted value. +func (m *IntStrMap) Remove(key int) (value string) { + m.mu.Lock() + if m.data != nil { + var ok bool + if value, ok = m.data[key]; ok { + delete(m.data, key) + } + } + m.mu.Unlock() + return +} + +// Keys returns all keys of the map as a slice. +func (m *IntStrMap) Keys() []int { + m.mu.RLock() + var ( + keys = make([]int, len(m.data)) + index = 0 + ) + for key := range m.data { + keys[index] = key + index++ + } + m.mu.RUnlock() + return keys +} + +// Values returns all values of the map as a slice. +func (m *IntStrMap) Values() []string { + m.mu.RLock() + var ( + values = make([]string, len(m.data)) + index = 0 + ) + for _, value := range m.data { + values[index] = value + index++ + } + m.mu.RUnlock() + return values +} + +// Contains checks whether a key exists. +// It returns true if the `key` exists, or else false. +func (m *IntStrMap) Contains(key int) bool { + var ok bool + m.mu.RLock() + if m.data != nil { + _, ok = m.data[key] + } + m.mu.RUnlock() + return ok +} + +// Size returns the size of the map. +func (m *IntStrMap) Size() int { + m.mu.RLock() + length := len(m.data) + m.mu.RUnlock() + return length +} + +// IsEmpty checks whether the map is empty. +// It returns true if map is empty, or else false. +func (m *IntStrMap) IsEmpty() bool { + return m.Size() == 0 +} + +// Clear deletes all data of the map, it will remake a new underlying data map. +func (m *IntStrMap) Clear() { + m.mu.Lock() + m.data = make(map[int]string) + m.mu.Unlock() +} + +// Replace the data of the map with given `data`. +func (m *IntStrMap) Replace(data map[int]string) { + m.mu.Lock() + m.data = data + m.mu.Unlock() +} + +// LockFunc locks writing with given callback function `f` within RWMutex.Lock. +func (m *IntStrMap) LockFunc(f func(m map[int]string)) { + m.mu.Lock() + defer m.mu.Unlock() + f(m.data) +} + +// RLockFunc locks reading with given callback function `f` within RWMutex.RLock. +func (m *IntStrMap) RLockFunc(f func(m map[int]string)) { + m.mu.RLock() + defer m.mu.RUnlock() + f(m.data) +} + +// Flip exchanges key-value of the map to value-key. +func (m *IntStrMap) Flip() { + m.mu.Lock() + defer m.mu.Unlock() + n := make(map[int]string, len(m.data)) + for k, v := range m.data { + n[gconv.Int(v)] = gconv.String(k) + } + m.data = n +} + +// Merge merges two hash maps. +// The `other` map will be merged into the map `m`. +func (m *IntStrMap) Merge(other *IntStrMap) { + m.mu.Lock() + defer m.mu.Unlock() + if m.data == nil { + m.data = other.MapCopy() + return + } + if other != m { + other.mu.RLock() + defer other.mu.RUnlock() + } + for k, v := range other.data { + m.data[k] = v + } +} + +// String returns the map as a string. +func (m *IntStrMap) String() string { + if m == nil { + return "" + } + b, _ := m.MarshalJSON() + return string(b) +} + +// MarshalJSON implements the interface MarshalJSON for json.Marshal. +func (m IntStrMap) MarshalJSON() ([]byte, error) { + m.mu.RLock() + defer m.mu.RUnlock() + return json.Marshal(m.data) +} + +// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. +func (m *IntStrMap) UnmarshalJSON(b []byte) error { + m.mu.Lock() + defer m.mu.Unlock() + if m.data == nil { + m.data = make(map[int]string) + } + if err := json.UnmarshalUseNumber(b, &m.data); err != nil { + return err + } + return nil +} + +// UnmarshalValue is an interface implement which sets any type of value for map. +func (m *IntStrMap) UnmarshalValue(value interface{}) (err error) { + m.mu.Lock() + defer m.mu.Unlock() + if m.data == nil { + m.data = make(map[int]string) + } + switch value.(type) { + case string, []byte: + return json.UnmarshalUseNumber(gconv.Bytes(value), &m.data) + default: + for k, v := range gconv.Map(value) { + m.data[gconv.Int(k)] = gconv.String(v) + } + } + return +} + +// DeepCopy implements interface for deep copy of current type. +func (m *IntStrMap) DeepCopy() interface{} { + if m == nil { + return nil + } + m.mu.RLock() + defer m.mu.RUnlock() + data := make(map[int]string, len(m.data)) + for k, v := range m.data { + data[k] = v + } + return NewIntStrMapFrom(data, m.mu.IsSafe()) +} + +// IsSubOf checks whether the current map is a sub-map of `other`. +func (m *IntStrMap) IsSubOf(other *IntStrMap) bool { + if m == other { + return true + } + m.mu.RLock() + defer m.mu.RUnlock() + other.mu.RLock() + defer other.mu.RUnlock() + for key, value := range m.data { + otherValue, ok := other.data[key] + if !ok { + return false + } + if otherValue != value { + return false + } + } + return true +} diff --git a/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_str_any_map.go b/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_str_any_map.go new file mode 100644 index 00000000..b05be65a --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_str_any_map.go @@ -0,0 +1,524 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with gm file, +// You can obtain one at https://github.com/gogf/gf. +// + +package gmap + +import ( + "github.com/gogf/gf/v2/container/gvar" + "github.com/gogf/gf/v2/internal/deepcopy" + "github.com/gogf/gf/v2/internal/empty" + "github.com/gogf/gf/v2/internal/json" + "github.com/gogf/gf/v2/internal/rwmutex" + "github.com/gogf/gf/v2/util/gconv" +) + +// StrAnyMap implements map[string]interface{} with RWMutex that has switch. +type StrAnyMap struct { + mu rwmutex.RWMutex + data map[string]interface{} +} + +// NewStrAnyMap returns an empty StrAnyMap object. +// The parameter `safe` is used to specify whether using map in concurrent-safety, +// which is false in default. +func NewStrAnyMap(safe ...bool) *StrAnyMap { + return &StrAnyMap{ + mu: rwmutex.Create(safe...), + data: make(map[string]interface{}), + } +} + +// NewStrAnyMapFrom creates and returns a hash map from given map `data`. +// Note that, the param `data` map will be set as the underlying data map(no deep copy), +// there might be some concurrent-safe issues when changing the map outside. +func NewStrAnyMapFrom(data map[string]interface{}, safe ...bool) *StrAnyMap { + return &StrAnyMap{ + mu: rwmutex.Create(safe...), + data: data, + } +} + +// Iterator iterates the hash map readonly with custom callback function `f`. +// If `f` returns true, then it continues iterating; or false to stop. +func (m *StrAnyMap) Iterator(f func(k string, v interface{}) bool) { + m.mu.RLock() + defer m.mu.RUnlock() + for k, v := range m.data { + if !f(k, v) { + break + } + } +} + +// Clone returns a new hash map with copy of current map data. +func (m *StrAnyMap) Clone() *StrAnyMap { + return NewStrAnyMapFrom(m.MapCopy(), m.mu.IsSafe()) +} + +// Map returns the underlying data map. +// Note that, if it's in concurrent-safe usage, it returns a copy of underlying data, +// or else a pointer to the underlying data. +func (m *StrAnyMap) Map() map[string]interface{} { + m.mu.RLock() + defer m.mu.RUnlock() + if !m.mu.IsSafe() { + return m.data + } + data := make(map[string]interface{}, len(m.data)) + for k, v := range m.data { + data[k] = v + } + return data +} + +// MapStrAny returns a copy of the underlying data of the map as map[string]interface{}. +func (m *StrAnyMap) MapStrAny() map[string]interface{} { + return m.Map() +} + +// MapCopy returns a copy of the underlying data of the hash map. +func (m *StrAnyMap) MapCopy() map[string]interface{} { + m.mu.RLock() + defer m.mu.RUnlock() + data := make(map[string]interface{}, len(m.data)) + for k, v := range m.data { + data[k] = v + } + return data +} + +// FilterEmpty deletes all key-value pair of which the value is empty. +// Values like: 0, nil, false, "", len(slice/map/chan) == 0 are considered empty. +func (m *StrAnyMap) FilterEmpty() { + m.mu.Lock() + for k, v := range m.data { + if empty.IsEmpty(v) { + delete(m.data, k) + } + } + m.mu.Unlock() +} + +// FilterNil deletes all key-value pair of which the value is nil. +func (m *StrAnyMap) FilterNil() { + m.mu.Lock() + defer m.mu.Unlock() + for k, v := range m.data { + if empty.IsNil(v) { + delete(m.data, k) + } + } +} + +// Set sets key-value to the hash map. +func (m *StrAnyMap) Set(key string, val interface{}) { + m.mu.Lock() + if m.data == nil { + m.data = make(map[string]interface{}) + } + m.data[key] = val + m.mu.Unlock() +} + +// Sets batch sets key-values to the hash map. +func (m *StrAnyMap) Sets(data map[string]interface{}) { + m.mu.Lock() + if m.data == nil { + m.data = data + } else { + for k, v := range data { + m.data[k] = v + } + } + m.mu.Unlock() +} + +// Search searches the map with given `key`. +// Second return parameter `found` is true if key was found, otherwise false. +func (m *StrAnyMap) Search(key string) (value interface{}, found bool) { + m.mu.RLock() + if m.data != nil { + value, found = m.data[key] + } + m.mu.RUnlock() + return +} + +// Get returns the value by given `key`. +func (m *StrAnyMap) Get(key string) (value interface{}) { + m.mu.RLock() + if m.data != nil { + value = m.data[key] + } + m.mu.RUnlock() + return +} + +// Pop retrieves and deletes an item from the map. +func (m *StrAnyMap) Pop() (key string, value interface{}) { + m.mu.Lock() + defer m.mu.Unlock() + for key, value = range m.data { + delete(m.data, key) + return + } + return +} + +// Pops retrieves and deletes `size` items from the map. +// It returns all items if size == -1. +func (m *StrAnyMap) Pops(size int) map[string]interface{} { + m.mu.Lock() + defer m.mu.Unlock() + if size > len(m.data) || size == -1 { + size = len(m.data) + } + if size == 0 { + return nil + } + var ( + index = 0 + newMap = make(map[string]interface{}, size) + ) + for k, v := range m.data { + delete(m.data, k) + newMap[k] = v + index++ + if index == size { + break + } + } + return newMap +} + +// doSetWithLockCheck checks whether value of the key exists with mutex.Lock, +// if not exists, set value to the map with given `key`, +// or else just return the existing value. +// +// When setting value, if `value` is type of `func() interface {}`, +// it will be executed with mutex.Lock of the hash map, +// and its return value will be set to the map with `key`. +// +// It returns value with given `key`. +func (m *StrAnyMap) doSetWithLockCheck(key string, value interface{}) interface{} { + m.mu.Lock() + defer m.mu.Unlock() + if m.data == nil { + m.data = make(map[string]interface{}) + } + if v, ok := m.data[key]; ok { + return v + } + if f, ok := value.(func() interface{}); ok { + value = f() + } + if value != nil { + m.data[key] = value + } + return value +} + +// GetOrSet returns the value by key, +// or sets value with given `value` if it does not exist and then returns this value. +func (m *StrAnyMap) GetOrSet(key string, value interface{}) interface{} { + if v, ok := m.Search(key); !ok { + return m.doSetWithLockCheck(key, value) + } else { + return v + } +} + +// GetOrSetFunc returns the value by key, +// or sets value with returned value of callback function `f` if it does not exist +// and then returns this value. +func (m *StrAnyMap) GetOrSetFunc(key string, f func() interface{}) interface{} { + if v, ok := m.Search(key); !ok { + return m.doSetWithLockCheck(key, f()) + } else { + return v + } +} + +// GetOrSetFuncLock returns the value by key, +// or sets value with returned value of callback function `f` if it does not exist +// and then returns this value. +// +// GetOrSetFuncLock differs with GetOrSetFunc function is that it executes function `f` +// with mutex.Lock of the hash map. +func (m *StrAnyMap) GetOrSetFuncLock(key string, f func() interface{}) interface{} { + if v, ok := m.Search(key); !ok { + return m.doSetWithLockCheck(key, f) + } else { + return v + } +} + +// GetVar returns a Var with the value by given `key`. +// The returned Var is un-concurrent safe. +func (m *StrAnyMap) GetVar(key string) *gvar.Var { + return gvar.New(m.Get(key)) +} + +// GetVarOrSet returns a Var with result from GetVarOrSet. +// The returned Var is un-concurrent safe. +func (m *StrAnyMap) GetVarOrSet(key string, value interface{}) *gvar.Var { + return gvar.New(m.GetOrSet(key, value)) +} + +// GetVarOrSetFunc returns a Var with result from GetOrSetFunc. +// The returned Var is un-concurrent safe. +func (m *StrAnyMap) GetVarOrSetFunc(key string, f func() interface{}) *gvar.Var { + return gvar.New(m.GetOrSetFunc(key, f)) +} + +// GetVarOrSetFuncLock returns a Var with result from GetOrSetFuncLock. +// The returned Var is un-concurrent safe. +func (m *StrAnyMap) GetVarOrSetFuncLock(key string, f func() interface{}) *gvar.Var { + return gvar.New(m.GetOrSetFuncLock(key, f)) +} + +// SetIfNotExist sets `value` to the map if the `key` does not exist, and then returns true. +// It returns false if `key` exists, and `value` would be ignored. +func (m *StrAnyMap) SetIfNotExist(key string, value interface{}) bool { + if !m.Contains(key) { + m.doSetWithLockCheck(key, value) + return true + } + return false +} + +// SetIfNotExistFunc sets value with return value of callback function `f`, and then returns true. +// It returns false if `key` exists, and `value` would be ignored. +func (m *StrAnyMap) SetIfNotExistFunc(key string, f func() interface{}) bool { + if !m.Contains(key) { + m.doSetWithLockCheck(key, f()) + return true + } + return false +} + +// SetIfNotExistFuncLock sets value with return value of callback function `f`, and then returns true. +// It returns false if `key` exists, and `value` would be ignored. +// +// SetIfNotExistFuncLock differs with SetIfNotExistFunc function is that +// it executes function `f` with mutex.Lock of the hash map. +func (m *StrAnyMap) SetIfNotExistFuncLock(key string, f func() interface{}) bool { + if !m.Contains(key) { + m.doSetWithLockCheck(key, f) + return true + } + return false +} + +// Removes batch deletes values of the map by keys. +func (m *StrAnyMap) Removes(keys []string) { + m.mu.Lock() + if m.data != nil { + for _, key := range keys { + delete(m.data, key) + } + } + m.mu.Unlock() +} + +// Remove deletes value from map by given `key`, and return this deleted value. +func (m *StrAnyMap) Remove(key string) (value interface{}) { + m.mu.Lock() + if m.data != nil { + var ok bool + if value, ok = m.data[key]; ok { + delete(m.data, key) + } + } + m.mu.Unlock() + return +} + +// Keys returns all keys of the map as a slice. +func (m *StrAnyMap) Keys() []string { + m.mu.RLock() + var ( + keys = make([]string, len(m.data)) + index = 0 + ) + for key := range m.data { + keys[index] = key + index++ + } + m.mu.RUnlock() + return keys +} + +// Values returns all values of the map as a slice. +func (m *StrAnyMap) Values() []interface{} { + m.mu.RLock() + var ( + values = make([]interface{}, len(m.data)) + index = 0 + ) + for _, value := range m.data { + values[index] = value + index++ + } + m.mu.RUnlock() + return values +} + +// Contains checks whether a key exists. +// It returns true if the `key` exists, or else false. +func (m *StrAnyMap) Contains(key string) bool { + var ok bool + m.mu.RLock() + if m.data != nil { + _, ok = m.data[key] + } + m.mu.RUnlock() + return ok +} + +// Size returns the size of the map. +func (m *StrAnyMap) Size() int { + m.mu.RLock() + length := len(m.data) + m.mu.RUnlock() + return length +} + +// IsEmpty checks whether the map is empty. +// It returns true if map is empty, or else false. +func (m *StrAnyMap) IsEmpty() bool { + return m.Size() == 0 +} + +// Clear deletes all data of the map, it will remake a new underlying data map. +func (m *StrAnyMap) Clear() { + m.mu.Lock() + m.data = make(map[string]interface{}) + m.mu.Unlock() +} + +// Replace the data of the map with given `data`. +func (m *StrAnyMap) Replace(data map[string]interface{}) { + m.mu.Lock() + m.data = data + m.mu.Unlock() +} + +// LockFunc locks writing with given callback function `f` within RWMutex.Lock. +func (m *StrAnyMap) LockFunc(f func(m map[string]interface{})) { + m.mu.Lock() + defer m.mu.Unlock() + f(m.data) +} + +// RLockFunc locks reading with given callback function `f` within RWMutex.RLock. +func (m *StrAnyMap) RLockFunc(f func(m map[string]interface{})) { + m.mu.RLock() + defer m.mu.RUnlock() + f(m.data) +} + +// Flip exchanges key-value of the map to value-key. +func (m *StrAnyMap) Flip() { + m.mu.Lock() + defer m.mu.Unlock() + n := make(map[string]interface{}, len(m.data)) + for k, v := range m.data { + n[gconv.String(v)] = k + } + m.data = n +} + +// Merge merges two hash maps. +// The `other` map will be merged into the map `m`. +func (m *StrAnyMap) Merge(other *StrAnyMap) { + m.mu.Lock() + defer m.mu.Unlock() + if m.data == nil { + m.data = other.MapCopy() + return + } + if other != m { + other.mu.RLock() + defer other.mu.RUnlock() + } + for k, v := range other.data { + m.data[k] = v + } +} + +// String returns the map as a string. +func (m *StrAnyMap) String() string { + if m == nil { + return "" + } + b, _ := m.MarshalJSON() + return string(b) +} + +// MarshalJSON implements the interface MarshalJSON for json.Marshal. +func (m StrAnyMap) MarshalJSON() ([]byte, error) { + m.mu.RLock() + defer m.mu.RUnlock() + return json.Marshal(m.data) +} + +// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. +func (m *StrAnyMap) UnmarshalJSON(b []byte) error { + m.mu.Lock() + defer m.mu.Unlock() + if m.data == nil { + m.data = make(map[string]interface{}) + } + if err := json.UnmarshalUseNumber(b, &m.data); err != nil { + return err + } + return nil +} + +// UnmarshalValue is an interface implement which sets any type of value for map. +func (m *StrAnyMap) UnmarshalValue(value interface{}) (err error) { + m.mu.Lock() + defer m.mu.Unlock() + m.data = gconv.Map(value) + return +} + +// DeepCopy implements interface for deep copy of current type. +func (m *StrAnyMap) DeepCopy() interface{} { + if m == nil { + return nil + } + m.mu.RLock() + defer m.mu.RUnlock() + data := make(map[string]interface{}, len(m.data)) + for k, v := range m.data { + data[k] = deepcopy.Copy(v) + } + return NewStrAnyMapFrom(data, m.mu.IsSafe()) +} + +// IsSubOf checks whether the current map is a sub-map of `other`. +func (m *StrAnyMap) IsSubOf(other *StrAnyMap) bool { + if m == other { + return true + } + m.mu.RLock() + defer m.mu.RUnlock() + other.mu.RLock() + defer other.mu.RUnlock() + for key, value := range m.data { + otherValue, ok := other.data[key] + if !ok { + return false + } + if otherValue != value { + return false + } + } + return true +} diff --git a/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_str_int_map.go b/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_str_int_map.go new file mode 100644 index 00000000..e0b330f2 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_str_int_map.go @@ -0,0 +1,512 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with gm file, +// You can obtain one at https://github.com/gogf/gf. +// + +package gmap + +import ( + "github.com/gogf/gf/v2/internal/empty" + "github.com/gogf/gf/v2/internal/json" + "github.com/gogf/gf/v2/internal/rwmutex" + "github.com/gogf/gf/v2/util/gconv" +) + +// StrIntMap implements map[string]int with RWMutex that has switch. +type StrIntMap struct { + mu rwmutex.RWMutex + data map[string]int +} + +// NewStrIntMap returns an empty StrIntMap object. +// The parameter `safe` is used to specify whether using map in concurrent-safety, +// which is false in default. +func NewStrIntMap(safe ...bool) *StrIntMap { + return &StrIntMap{ + mu: rwmutex.Create(safe...), + data: make(map[string]int), + } +} + +// NewStrIntMapFrom creates and returns a hash map from given map `data`. +// Note that, the param `data` map will be set as the underlying data map(no deep copy), +// there might be some concurrent-safe issues when changing the map outside. +func NewStrIntMapFrom(data map[string]int, safe ...bool) *StrIntMap { + return &StrIntMap{ + mu: rwmutex.Create(safe...), + data: data, + } +} + +// Iterator iterates the hash map readonly with custom callback function `f`. +// If `f` returns true, then it continues iterating; or false to stop. +func (m *StrIntMap) Iterator(f func(k string, v int) bool) { + m.mu.RLock() + defer m.mu.RUnlock() + for k, v := range m.data { + if !f(k, v) { + break + } + } +} + +// Clone returns a new hash map with copy of current map data. +func (m *StrIntMap) Clone() *StrIntMap { + return NewStrIntMapFrom(m.MapCopy(), m.mu.IsSafe()) +} + +// Map returns the underlying data map. +// Note that, if it's in concurrent-safe usage, it returns a copy of underlying data, +// or else a pointer to the underlying data. +func (m *StrIntMap) Map() map[string]int { + m.mu.RLock() + defer m.mu.RUnlock() + if !m.mu.IsSafe() { + return m.data + } + data := make(map[string]int, len(m.data)) + for k, v := range m.data { + data[k] = v + } + return data +} + +// MapStrAny returns a copy of the underlying data of the map as map[string]interface{}. +func (m *StrIntMap) MapStrAny() map[string]interface{} { + m.mu.RLock() + defer m.mu.RUnlock() + data := make(map[string]interface{}, len(m.data)) + for k, v := range m.data { + data[k] = v + } + return data +} + +// MapCopy returns a copy of the underlying data of the hash map. +func (m *StrIntMap) MapCopy() map[string]int { + m.mu.RLock() + defer m.mu.RUnlock() + data := make(map[string]int, len(m.data)) + for k, v := range m.data { + data[k] = v + } + return data +} + +// FilterEmpty deletes all key-value pair of which the value is empty. +// Values like: 0, nil, false, "", len(slice/map/chan) == 0 are considered empty. +func (m *StrIntMap) FilterEmpty() { + m.mu.Lock() + for k, v := range m.data { + if empty.IsEmpty(v) { + delete(m.data, k) + } + } + m.mu.Unlock() +} + +// Set sets key-value to the hash map. +func (m *StrIntMap) Set(key string, val int) { + m.mu.Lock() + if m.data == nil { + m.data = make(map[string]int) + } + m.data[key] = val + m.mu.Unlock() +} + +// Sets batch sets key-values to the hash map. +func (m *StrIntMap) Sets(data map[string]int) { + m.mu.Lock() + if m.data == nil { + m.data = data + } else { + for k, v := range data { + m.data[k] = v + } + } + m.mu.Unlock() +} + +// Search searches the map with given `key`. +// Second return parameter `found` is true if key was found, otherwise false. +func (m *StrIntMap) Search(key string) (value int, found bool) { + m.mu.RLock() + if m.data != nil { + value, found = m.data[key] + } + m.mu.RUnlock() + return +} + +// Get returns the value by given `key`. +func (m *StrIntMap) Get(key string) (value int) { + m.mu.RLock() + if m.data != nil { + value = m.data[key] + } + m.mu.RUnlock() + return +} + +// Pop retrieves and deletes an item from the map. +func (m *StrIntMap) Pop() (key string, value int) { + m.mu.Lock() + defer m.mu.Unlock() + for key, value = range m.data { + delete(m.data, key) + return + } + return +} + +// Pops retrieves and deletes `size` items from the map. +// It returns all items if size == -1. +func (m *StrIntMap) Pops(size int) map[string]int { + m.mu.Lock() + defer m.mu.Unlock() + if size > len(m.data) || size == -1 { + size = len(m.data) + } + if size == 0 { + return nil + } + var ( + index = 0 + newMap = make(map[string]int, size) + ) + for k, v := range m.data { + delete(m.data, k) + newMap[k] = v + index++ + if index == size { + break + } + } + return newMap +} + +// doSetWithLockCheck checks whether value of the key exists with mutex.Lock, +// if not exists, set value to the map with given `key`, +// or else just return the existing value. +// +// It returns value with given `key`. +func (m *StrIntMap) doSetWithLockCheck(key string, value int) int { + m.mu.Lock() + if m.data == nil { + m.data = make(map[string]int) + } + if v, ok := m.data[key]; ok { + m.mu.Unlock() + return v + } + m.data[key] = value + m.mu.Unlock() + return value +} + +// GetOrSet returns the value by key, +// or sets value with given `value` if it does not exist and then returns this value. +func (m *StrIntMap) GetOrSet(key string, value int) int { + if v, ok := m.Search(key); !ok { + return m.doSetWithLockCheck(key, value) + } else { + return v + } +} + +// GetOrSetFunc returns the value by key, +// or sets value with returned value of callback function `f` if it does not exist +// and then returns this value. +func (m *StrIntMap) GetOrSetFunc(key string, f func() int) int { + if v, ok := m.Search(key); !ok { + return m.doSetWithLockCheck(key, f()) + } else { + return v + } +} + +// GetOrSetFuncLock returns the value by key, +// or sets value with returned value of callback function `f` if it does not exist +// and then returns this value. +// +// GetOrSetFuncLock differs with GetOrSetFunc function is that it executes function `f` +// with mutex.Lock of the hash map. +func (m *StrIntMap) GetOrSetFuncLock(key string, f func() int) int { + if v, ok := m.Search(key); !ok { + m.mu.Lock() + defer m.mu.Unlock() + if m.data == nil { + m.data = make(map[string]int) + } + if v, ok = m.data[key]; ok { + return v + } + v = f() + m.data[key] = v + return v + } else { + return v + } +} + +// SetIfNotExist sets `value` to the map if the `key` does not exist, and then returns true. +// It returns false if `key` exists, and `value` would be ignored. +func (m *StrIntMap) SetIfNotExist(key string, value int) bool { + if !m.Contains(key) { + m.doSetWithLockCheck(key, value) + return true + } + return false +} + +// SetIfNotExistFunc sets value with return value of callback function `f`, and then returns true. +// It returns false if `key` exists, and `value` would be ignored. +func (m *StrIntMap) SetIfNotExistFunc(key string, f func() int) bool { + if !m.Contains(key) { + m.doSetWithLockCheck(key, f()) + return true + } + return false +} + +// SetIfNotExistFuncLock sets value with return value of callback function `f`, and then returns true. +// It returns false if `key` exists, and `value` would be ignored. +// +// SetIfNotExistFuncLock differs with SetIfNotExistFunc function is that +// it executes function `f` with mutex.Lock of the hash map. +func (m *StrIntMap) SetIfNotExistFuncLock(key string, f func() int) bool { + if !m.Contains(key) { + m.mu.Lock() + defer m.mu.Unlock() + if m.data == nil { + m.data = make(map[string]int) + } + if _, ok := m.data[key]; !ok { + m.data[key] = f() + } + return true + } + return false +} + +// Removes batch deletes values of the map by keys. +func (m *StrIntMap) Removes(keys []string) { + m.mu.Lock() + if m.data != nil { + for _, key := range keys { + delete(m.data, key) + } + } + m.mu.Unlock() +} + +// Remove deletes value from map by given `key`, and return this deleted value. +func (m *StrIntMap) Remove(key string) (value int) { + m.mu.Lock() + if m.data != nil { + var ok bool + if value, ok = m.data[key]; ok { + delete(m.data, key) + } + } + m.mu.Unlock() + return +} + +// Keys returns all keys of the map as a slice. +func (m *StrIntMap) Keys() []string { + m.mu.RLock() + var ( + keys = make([]string, len(m.data)) + index = 0 + ) + for key := range m.data { + keys[index] = key + index++ + } + m.mu.RUnlock() + return keys +} + +// Values returns all values of the map as a slice. +func (m *StrIntMap) Values() []int { + m.mu.RLock() + var ( + values = make([]int, len(m.data)) + index = 0 + ) + for _, value := range m.data { + values[index] = value + index++ + } + m.mu.RUnlock() + return values +} + +// Contains checks whether a key exists. +// It returns true if the `key` exists, or else false. +func (m *StrIntMap) Contains(key string) bool { + var ok bool + m.mu.RLock() + if m.data != nil { + _, ok = m.data[key] + } + m.mu.RUnlock() + return ok +} + +// Size returns the size of the map. +func (m *StrIntMap) Size() int { + m.mu.RLock() + length := len(m.data) + m.mu.RUnlock() + return length +} + +// IsEmpty checks whether the map is empty. +// It returns true if map is empty, or else false. +func (m *StrIntMap) IsEmpty() bool { + return m.Size() == 0 +} + +// Clear deletes all data of the map, it will remake a new underlying data map. +func (m *StrIntMap) Clear() { + m.mu.Lock() + m.data = make(map[string]int) + m.mu.Unlock() +} + +// Replace the data of the map with given `data`. +func (m *StrIntMap) Replace(data map[string]int) { + m.mu.Lock() + m.data = data + m.mu.Unlock() +} + +// LockFunc locks writing with given callback function `f` within RWMutex.Lock. +func (m *StrIntMap) LockFunc(f func(m map[string]int)) { + m.mu.Lock() + defer m.mu.Unlock() + f(m.data) +} + +// RLockFunc locks reading with given callback function `f` within RWMutex.RLock. +func (m *StrIntMap) RLockFunc(f func(m map[string]int)) { + m.mu.RLock() + defer m.mu.RUnlock() + f(m.data) +} + +// Flip exchanges key-value of the map to value-key. +func (m *StrIntMap) Flip() { + m.mu.Lock() + defer m.mu.Unlock() + n := make(map[string]int, len(m.data)) + for k, v := range m.data { + n[gconv.String(v)] = gconv.Int(k) + } + m.data = n +} + +// Merge merges two hash maps. +// The `other` map will be merged into the map `m`. +func (m *StrIntMap) Merge(other *StrIntMap) { + m.mu.Lock() + defer m.mu.Unlock() + if m.data == nil { + m.data = other.MapCopy() + return + } + if other != m { + other.mu.RLock() + defer other.mu.RUnlock() + } + for k, v := range other.data { + m.data[k] = v + } +} + +// String returns the map as a string. +func (m *StrIntMap) String() string { + if m == nil { + return "" + } + b, _ := m.MarshalJSON() + return string(b) +} + +// MarshalJSON implements the interface MarshalJSON for json.Marshal. +func (m StrIntMap) MarshalJSON() ([]byte, error) { + m.mu.RLock() + defer m.mu.RUnlock() + return json.Marshal(m.data) +} + +// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. +func (m *StrIntMap) UnmarshalJSON(b []byte) error { + m.mu.Lock() + defer m.mu.Unlock() + if m.data == nil { + m.data = make(map[string]int) + } + if err := json.UnmarshalUseNumber(b, &m.data); err != nil { + return err + } + return nil +} + +// UnmarshalValue is an interface implement which sets any type of value for map. +func (m *StrIntMap) UnmarshalValue(value interface{}) (err error) { + m.mu.Lock() + defer m.mu.Unlock() + if m.data == nil { + m.data = make(map[string]int) + } + switch value.(type) { + case string, []byte: + return json.UnmarshalUseNumber(gconv.Bytes(value), &m.data) + default: + for k, v := range gconv.Map(value) { + m.data[k] = gconv.Int(v) + } + } + return +} + +// DeepCopy implements interface for deep copy of current type. +func (m *StrIntMap) DeepCopy() interface{} { + if m == nil { + return nil + } + m.mu.RLock() + defer m.mu.RUnlock() + data := make(map[string]int, len(m.data)) + for k, v := range m.data { + data[k] = v + } + return NewStrIntMapFrom(data, m.mu.IsSafe()) +} + +// IsSubOf checks whether the current map is a sub-map of `other`. +func (m *StrIntMap) IsSubOf(other *StrIntMap) bool { + if m == other { + return true + } + m.mu.RLock() + defer m.mu.RUnlock() + other.mu.RLock() + defer other.mu.RUnlock() + for key, value := range m.data { + otherValue, ok := other.data[key] + if !ok { + return false + } + if otherValue != value { + return false + } + } + return true +} diff --git a/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_str_str_map.go b/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_str_str_map.go new file mode 100644 index 00000000..e73628f8 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gmap/gmap_hash_str_str_map.go @@ -0,0 +1,501 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with gm file, +// You can obtain one at https://github.com/gogf/gf. +// + +package gmap + +import ( + "github.com/gogf/gf/v2/internal/empty" + "github.com/gogf/gf/v2/internal/json" + "github.com/gogf/gf/v2/internal/rwmutex" + "github.com/gogf/gf/v2/util/gconv" +) + +// StrStrMap implements map[string]string with RWMutex that has switch. +type StrStrMap struct { + mu rwmutex.RWMutex + data map[string]string +} + +// NewStrStrMap returns an empty StrStrMap object. +// The parameter `safe` is used to specify whether using map in concurrent-safety, +// which is false in default. +func NewStrStrMap(safe ...bool) *StrStrMap { + return &StrStrMap{ + data: make(map[string]string), + mu: rwmutex.Create(safe...), + } +} + +// NewStrStrMapFrom creates and returns a hash map from given map `data`. +// Note that, the param `data` map will be set as the underlying data map(no deep copy), +// there might be some concurrent-safe issues when changing the map outside. +func NewStrStrMapFrom(data map[string]string, safe ...bool) *StrStrMap { + return &StrStrMap{ + mu: rwmutex.Create(safe...), + data: data, + } +} + +// Iterator iterates the hash map readonly with custom callback function `f`. +// If `f` returns true, then it continues iterating; or false to stop. +func (m *StrStrMap) Iterator(f func(k string, v string) bool) { + m.mu.RLock() + defer m.mu.RUnlock() + for k, v := range m.data { + if !f(k, v) { + break + } + } +} + +// Clone returns a new hash map with copy of current map data. +func (m *StrStrMap) Clone() *StrStrMap { + return NewStrStrMapFrom(m.MapCopy(), m.mu.IsSafe()) +} + +// Map returns the underlying data map. +// Note that, if it's in concurrent-safe usage, it returns a copy of underlying data, +// or else a pointer to the underlying data. +func (m *StrStrMap) Map() map[string]string { + m.mu.RLock() + defer m.mu.RUnlock() + if !m.mu.IsSafe() { + return m.data + } + data := make(map[string]string, len(m.data)) + for k, v := range m.data { + data[k] = v + } + return data +} + +// MapStrAny returns a copy of the underlying data of the map as map[string]interface{}. +func (m *StrStrMap) MapStrAny() map[string]interface{} { + m.mu.RLock() + data := make(map[string]interface{}, len(m.data)) + for k, v := range m.data { + data[k] = v + } + m.mu.RUnlock() + return data +} + +// MapCopy returns a copy of the underlying data of the hash map. +func (m *StrStrMap) MapCopy() map[string]string { + m.mu.RLock() + defer m.mu.RUnlock() + data := make(map[string]string, len(m.data)) + for k, v := range m.data { + data[k] = v + } + return data +} + +// FilterEmpty deletes all key-value pair of which the value is empty. +// Values like: 0, nil, false, "", len(slice/map/chan) == 0 are considered empty. +func (m *StrStrMap) FilterEmpty() { + m.mu.Lock() + for k, v := range m.data { + if empty.IsEmpty(v) { + delete(m.data, k) + } + } + m.mu.Unlock() +} + +// Set sets key-value to the hash map. +func (m *StrStrMap) Set(key string, val string) { + m.mu.Lock() + if m.data == nil { + m.data = make(map[string]string) + } + m.data[key] = val + m.mu.Unlock() +} + +// Sets batch sets key-values to the hash map. +func (m *StrStrMap) Sets(data map[string]string) { + m.mu.Lock() + if m.data == nil { + m.data = data + } else { + for k, v := range data { + m.data[k] = v + } + } + m.mu.Unlock() +} + +// Search searches the map with given `key`. +// Second return parameter `found` is true if key was found, otherwise false. +func (m *StrStrMap) Search(key string) (value string, found bool) { + m.mu.RLock() + if m.data != nil { + value, found = m.data[key] + } + m.mu.RUnlock() + return +} + +// Get returns the value by given `key`. +func (m *StrStrMap) Get(key string) (value string) { + m.mu.RLock() + if m.data != nil { + value = m.data[key] + } + m.mu.RUnlock() + return +} + +// Pop retrieves and deletes an item from the map. +func (m *StrStrMap) Pop() (key, value string) { + m.mu.Lock() + defer m.mu.Unlock() + for key, value = range m.data { + delete(m.data, key) + return + } + return +} + +// Pops retrieves and deletes `size` items from the map. +// It returns all items if size == -1. +func (m *StrStrMap) Pops(size int) map[string]string { + m.mu.Lock() + defer m.mu.Unlock() + if size > len(m.data) || size == -1 { + size = len(m.data) + } + if size == 0 { + return nil + } + var ( + index = 0 + newMap = make(map[string]string, size) + ) + for k, v := range m.data { + delete(m.data, k) + newMap[k] = v + index++ + if index == size { + break + } + } + return newMap +} + +// doSetWithLockCheck checks whether value of the key exists with mutex.Lock, +// if not exists, set value to the map with given `key`, +// or else just return the existing value. +// +// It returns value with given `key`. +func (m *StrStrMap) doSetWithLockCheck(key string, value string) string { + m.mu.Lock() + defer m.mu.Unlock() + if m.data == nil { + m.data = make(map[string]string) + } + if v, ok := m.data[key]; ok { + return v + } + m.data[key] = value + return value +} + +// GetOrSet returns the value by key, +// or sets value with given `value` if it does not exist and then returns this value. +func (m *StrStrMap) GetOrSet(key string, value string) string { + if v, ok := m.Search(key); !ok { + return m.doSetWithLockCheck(key, value) + } else { + return v + } +} + +// GetOrSetFunc returns the value by key, +// or sets value with returned value of callback function `f` if it does not exist +// and then returns this value. +func (m *StrStrMap) GetOrSetFunc(key string, f func() string) string { + if v, ok := m.Search(key); !ok { + return m.doSetWithLockCheck(key, f()) + } else { + return v + } +} + +// GetOrSetFuncLock returns the value by key, +// or sets value with returned value of callback function `f` if it does not exist +// and then returns this value. +// +// GetOrSetFuncLock differs with GetOrSetFunc function is that it executes function `f` +// with mutex.Lock of the hash map. +func (m *StrStrMap) GetOrSetFuncLock(key string, f func() string) string { + if v, ok := m.Search(key); !ok { + m.mu.Lock() + defer m.mu.Unlock() + if m.data == nil { + m.data = make(map[string]string) + } + if v, ok = m.data[key]; ok { + return v + } + v = f() + m.data[key] = v + return v + } else { + return v + } +} + +// SetIfNotExist sets `value` to the map if the `key` does not exist, and then returns true. +// It returns false if `key` exists, and `value` would be ignored. +func (m *StrStrMap) SetIfNotExist(key string, value string) bool { + if !m.Contains(key) { + m.doSetWithLockCheck(key, value) + return true + } + return false +} + +// SetIfNotExistFunc sets value with return value of callback function `f`, and then returns true. +// It returns false if `key` exists, and `value` would be ignored. +func (m *StrStrMap) SetIfNotExistFunc(key string, f func() string) bool { + if !m.Contains(key) { + m.doSetWithLockCheck(key, f()) + return true + } + return false +} + +// SetIfNotExistFuncLock sets value with return value of callback function `f`, and then returns true. +// It returns false if `key` exists, and `value` would be ignored. +// +// SetIfNotExistFuncLock differs with SetIfNotExistFunc function is that +// it executes function `f` with mutex.Lock of the hash map. +func (m *StrStrMap) SetIfNotExistFuncLock(key string, f func() string) bool { + if !m.Contains(key) { + m.mu.Lock() + defer m.mu.Unlock() + if m.data == nil { + m.data = make(map[string]string) + } + if _, ok := m.data[key]; !ok { + m.data[key] = f() + } + return true + } + return false +} + +// Removes batch deletes values of the map by keys. +func (m *StrStrMap) Removes(keys []string) { + m.mu.Lock() + if m.data != nil { + for _, key := range keys { + delete(m.data, key) + } + } + m.mu.Unlock() +} + +// Remove deletes value from map by given `key`, and return this deleted value. +func (m *StrStrMap) Remove(key string) (value string) { + m.mu.Lock() + if m.data != nil { + var ok bool + if value, ok = m.data[key]; ok { + delete(m.data, key) + } + } + m.mu.Unlock() + return +} + +// Keys returns all keys of the map as a slice. +func (m *StrStrMap) Keys() []string { + m.mu.RLock() + var ( + keys = make([]string, len(m.data)) + index = 0 + ) + for key := range m.data { + keys[index] = key + index++ + } + m.mu.RUnlock() + return keys +} + +// Values returns all values of the map as a slice. +func (m *StrStrMap) Values() []string { + m.mu.RLock() + var ( + values = make([]string, len(m.data)) + index = 0 + ) + for _, value := range m.data { + values[index] = value + index++ + } + m.mu.RUnlock() + return values +} + +// Contains checks whether a key exists. +// It returns true if the `key` exists, or else false. +func (m *StrStrMap) Contains(key string) bool { + var ok bool + m.mu.RLock() + if m.data != nil { + _, ok = m.data[key] + } + m.mu.RUnlock() + return ok +} + +// Size returns the size of the map. +func (m *StrStrMap) Size() int { + m.mu.RLock() + length := len(m.data) + m.mu.RUnlock() + return length +} + +// IsEmpty checks whether the map is empty. +// It returns true if map is empty, or else false. +func (m *StrStrMap) IsEmpty() bool { + return m.Size() == 0 +} + +// Clear deletes all data of the map, it will remake a new underlying data map. +func (m *StrStrMap) Clear() { + m.mu.Lock() + m.data = make(map[string]string) + m.mu.Unlock() +} + +// Replace the data of the map with given `data`. +func (m *StrStrMap) Replace(data map[string]string) { + m.mu.Lock() + m.data = data + m.mu.Unlock() +} + +// LockFunc locks writing with given callback function `f` within RWMutex.Lock. +func (m *StrStrMap) LockFunc(f func(m map[string]string)) { + m.mu.Lock() + defer m.mu.Unlock() + f(m.data) +} + +// RLockFunc locks reading with given callback function `f` within RWMutex.RLock. +func (m *StrStrMap) RLockFunc(f func(m map[string]string)) { + m.mu.RLock() + defer m.mu.RUnlock() + f(m.data) +} + +// Flip exchanges key-value of the map to value-key. +func (m *StrStrMap) Flip() { + m.mu.Lock() + defer m.mu.Unlock() + n := make(map[string]string, len(m.data)) + for k, v := range m.data { + n[v] = k + } + m.data = n +} + +// Merge merges two hash maps. +// The `other` map will be merged into the map `m`. +func (m *StrStrMap) Merge(other *StrStrMap) { + m.mu.Lock() + defer m.mu.Unlock() + if m.data == nil { + m.data = other.MapCopy() + return + } + if other != m { + other.mu.RLock() + defer other.mu.RUnlock() + } + for k, v := range other.data { + m.data[k] = v + } +} + +// String returns the map as a string. +func (m *StrStrMap) String() string { + if m == nil { + return "" + } + b, _ := m.MarshalJSON() + return string(b) +} + +// MarshalJSON implements the interface MarshalJSON for json.Marshal. +func (m StrStrMap) MarshalJSON() ([]byte, error) { + m.mu.RLock() + defer m.mu.RUnlock() + return json.Marshal(m.data) +} + +// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. +func (m *StrStrMap) UnmarshalJSON(b []byte) error { + m.mu.Lock() + defer m.mu.Unlock() + if m.data == nil { + m.data = make(map[string]string) + } + if err := json.UnmarshalUseNumber(b, &m.data); err != nil { + return err + } + return nil +} + +// UnmarshalValue is an interface implement which sets any type of value for map. +func (m *StrStrMap) UnmarshalValue(value interface{}) (err error) { + m.mu.Lock() + defer m.mu.Unlock() + m.data = gconv.MapStrStr(value) + return +} + +// DeepCopy implements interface for deep copy of current type. +func (m *StrStrMap) DeepCopy() interface{} { + if m == nil { + return nil + } + m.mu.RLock() + defer m.mu.RUnlock() + data := make(map[string]string, len(m.data)) + for k, v := range m.data { + data[k] = v + } + return NewStrStrMapFrom(data, m.mu.IsSafe()) +} + +// IsSubOf checks whether the current map is a sub-map of `other`. +func (m *StrStrMap) IsSubOf(other *StrStrMap) bool { + if m == other { + return true + } + m.mu.RLock() + defer m.mu.RUnlock() + other.mu.RLock() + defer other.mu.RUnlock() + for key, value := range m.data { + otherValue, ok := other.data[key] + if !ok { + return false + } + if otherValue != value { + return false + } + } + return true +} diff --git a/vendor/github.com/gogf/gf/v2/container/gmap/gmap_list_map.go b/vendor/github.com/gogf/gf/v2/container/gmap/gmap_list_map.go new file mode 100644 index 00000000..3197a5ca --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gmap/gmap_list_map.go @@ -0,0 +1,612 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with gm file, +// You can obtain one at https://github.com/gogf/gf. + +package gmap + +import ( + "bytes" + "fmt" + + "github.com/gogf/gf/v2/container/glist" + "github.com/gogf/gf/v2/container/gvar" + "github.com/gogf/gf/v2/internal/deepcopy" + "github.com/gogf/gf/v2/internal/empty" + "github.com/gogf/gf/v2/internal/json" + "github.com/gogf/gf/v2/internal/rwmutex" + "github.com/gogf/gf/v2/util/gconv" +) + +// ListMap is a map that preserves insertion-order. +// +// It is backed by a hash table to store values and doubly-linked list to store ordering. +// +// Structure is not thread safe. +// +// Reference: http://en.wikipedia.org/wiki/Associative_array +type ListMap struct { + mu rwmutex.RWMutex + data map[interface{}]*glist.Element + list *glist.List +} + +type gListMapNode struct { + key interface{} + value interface{} +} + +// NewListMap returns an empty link map. +// ListMap is backed by a hash table to store values and doubly-linked list to store ordering. +// The parameter `safe` is used to specify whether using map in concurrent-safety, +// which is false in default. +func NewListMap(safe ...bool) *ListMap { + return &ListMap{ + mu: rwmutex.Create(safe...), + data: make(map[interface{}]*glist.Element), + list: glist.New(), + } +} + +// NewListMapFrom returns a link map from given map `data`. +// Note that, the param `data` map will be set as the underlying data map(no deep copy), +// there might be some concurrent-safe issues when changing the map outside. +func NewListMapFrom(data map[interface{}]interface{}, safe ...bool) *ListMap { + m := NewListMap(safe...) + m.Sets(data) + return m +} + +// Iterator is alias of IteratorAsc. +func (m *ListMap) Iterator(f func(key, value interface{}) bool) { + m.IteratorAsc(f) +} + +// IteratorAsc iterates the map readonly in ascending order with given callback function `f`. +// If `f` returns true, then it continues iterating; or false to stop. +func (m *ListMap) IteratorAsc(f func(key interface{}, value interface{}) bool) { + m.mu.RLock() + defer m.mu.RUnlock() + if m.list != nil { + var node *gListMapNode + m.list.IteratorAsc(func(e *glist.Element) bool { + node = e.Value.(*gListMapNode) + return f(node.key, node.value) + }) + } +} + +// IteratorDesc iterates the map readonly in descending order with given callback function `f`. +// If `f` returns true, then it continues iterating; or false to stop. +func (m *ListMap) IteratorDesc(f func(key interface{}, value interface{}) bool) { + m.mu.RLock() + defer m.mu.RUnlock() + if m.list != nil { + var node *gListMapNode + m.list.IteratorDesc(func(e *glist.Element) bool { + node = e.Value.(*gListMapNode) + return f(node.key, node.value) + }) + } +} + +// Clone returns a new link map with copy of current map data. +func (m *ListMap) Clone(safe ...bool) *ListMap { + return NewListMapFrom(m.Map(), safe...) +} + +// Clear deletes all data of the map, it will remake a new underlying data map. +func (m *ListMap) Clear() { + m.mu.Lock() + m.data = make(map[interface{}]*glist.Element) + m.list = glist.New() + m.mu.Unlock() +} + +// Replace the data of the map with given `data`. +func (m *ListMap) Replace(data map[interface{}]interface{}) { + m.mu.Lock() + m.data = make(map[interface{}]*glist.Element) + m.list = glist.New() + for key, value := range data { + if e, ok := m.data[key]; !ok { + m.data[key] = m.list.PushBack(&gListMapNode{key, value}) + } else { + e.Value = &gListMapNode{key, value} + } + } + m.mu.Unlock() +} + +// Map returns a copy of the underlying data of the map. +func (m *ListMap) Map() map[interface{}]interface{} { + m.mu.RLock() + var node *gListMapNode + var data map[interface{}]interface{} + if m.list != nil { + data = make(map[interface{}]interface{}, len(m.data)) + m.list.IteratorAsc(func(e *glist.Element) bool { + node = e.Value.(*gListMapNode) + data[node.key] = node.value + return true + }) + } + m.mu.RUnlock() + return data +} + +// MapStrAny returns a copy of the underlying data of the map as map[string]interface{}. +func (m *ListMap) MapStrAny() map[string]interface{} { + m.mu.RLock() + var node *gListMapNode + var data map[string]interface{} + if m.list != nil { + data = make(map[string]interface{}, len(m.data)) + m.list.IteratorAsc(func(e *glist.Element) bool { + node = e.Value.(*gListMapNode) + data[gconv.String(node.key)] = node.value + return true + }) + } + m.mu.RUnlock() + return data +} + +// FilterEmpty deletes all key-value pair of which the value is empty. +func (m *ListMap) FilterEmpty() { + m.mu.Lock() + if m.list != nil { + var ( + keys = make([]interface{}, 0) + node *gListMapNode + ) + m.list.IteratorAsc(func(e *glist.Element) bool { + node = e.Value.(*gListMapNode) + if empty.IsEmpty(node.value) { + keys = append(keys, node.key) + } + return true + }) + if len(keys) > 0 { + for _, key := range keys { + if e, ok := m.data[key]; ok { + delete(m.data, key) + m.list.Remove(e) + } + } + } + } + m.mu.Unlock() +} + +// Set sets key-value to the map. +func (m *ListMap) Set(key interface{}, value interface{}) { + m.mu.Lock() + if m.data == nil { + m.data = make(map[interface{}]*glist.Element) + m.list = glist.New() + } + if e, ok := m.data[key]; !ok { + m.data[key] = m.list.PushBack(&gListMapNode{key, value}) + } else { + e.Value = &gListMapNode{key, value} + } + m.mu.Unlock() +} + +// Sets batch sets key-values to the map. +func (m *ListMap) Sets(data map[interface{}]interface{}) { + m.mu.Lock() + if m.data == nil { + m.data = make(map[interface{}]*glist.Element) + m.list = glist.New() + } + for key, value := range data { + if e, ok := m.data[key]; !ok { + m.data[key] = m.list.PushBack(&gListMapNode{key, value}) + } else { + e.Value = &gListMapNode{key, value} + } + } + m.mu.Unlock() +} + +// Search searches the map with given `key`. +// Second return parameter `found` is true if key was found, otherwise false. +func (m *ListMap) Search(key interface{}) (value interface{}, found bool) { + m.mu.RLock() + if m.data != nil { + if e, ok := m.data[key]; ok { + value = e.Value.(*gListMapNode).value + found = ok + } + } + m.mu.RUnlock() + return +} + +// Get returns the value by given `key`. +func (m *ListMap) Get(key interface{}) (value interface{}) { + m.mu.RLock() + if m.data != nil { + if e, ok := m.data[key]; ok { + value = e.Value.(*gListMapNode).value + } + } + m.mu.RUnlock() + return +} + +// Pop retrieves and deletes an item from the map. +func (m *ListMap) Pop() (key, value interface{}) { + m.mu.Lock() + defer m.mu.Unlock() + for k, e := range m.data { + value = e.Value.(*gListMapNode).value + delete(m.data, k) + m.list.Remove(e) + return k, value + } + return +} + +// Pops retrieves and deletes `size` items from the map. +// It returns all items if size == -1. +func (m *ListMap) Pops(size int) map[interface{}]interface{} { + m.mu.Lock() + defer m.mu.Unlock() + if size > len(m.data) || size == -1 { + size = len(m.data) + } + if size == 0 { + return nil + } + index := 0 + newMap := make(map[interface{}]interface{}, size) + for k, e := range m.data { + value := e.Value.(*gListMapNode).value + delete(m.data, k) + m.list.Remove(e) + newMap[k] = value + index++ + if index == size { + break + } + } + return newMap +} + +// doSetWithLockCheck checks whether value of the key exists with mutex.Lock, +// if not exists, set value to the map with given `key`, +// or else just return the existing value. +// +// When setting value, if `value` is type of `func() interface {}`, +// it will be executed with mutex.Lock of the map, +// and its return value will be set to the map with `key`. +// +// It returns value with given `key`. +func (m *ListMap) doSetWithLockCheck(key interface{}, value interface{}) interface{} { + m.mu.Lock() + defer m.mu.Unlock() + if m.data == nil { + m.data = make(map[interface{}]*glist.Element) + m.list = glist.New() + } + if e, ok := m.data[key]; ok { + return e.Value.(*gListMapNode).value + } + if f, ok := value.(func() interface{}); ok { + value = f() + } + if value != nil { + m.data[key] = m.list.PushBack(&gListMapNode{key, value}) + } + return value +} + +// GetOrSet returns the value by key, +// or sets value with given `value` if it does not exist and then returns this value. +func (m *ListMap) GetOrSet(key interface{}, value interface{}) interface{} { + if v, ok := m.Search(key); !ok { + return m.doSetWithLockCheck(key, value) + } else { + return v + } +} + +// GetOrSetFunc returns the value by key, +// or sets value with returned value of callback function `f` if it does not exist +// and then returns this value. +func (m *ListMap) GetOrSetFunc(key interface{}, f func() interface{}) interface{} { + if v, ok := m.Search(key); !ok { + return m.doSetWithLockCheck(key, f()) + } else { + return v + } +} + +// GetOrSetFuncLock returns the value by key, +// or sets value with returned value of callback function `f` if it does not exist +// and then returns this value. +// +// GetOrSetFuncLock differs with GetOrSetFunc function is that it executes function `f` +// with mutex.Lock of the map. +func (m *ListMap) GetOrSetFuncLock(key interface{}, f func() interface{}) interface{} { + if v, ok := m.Search(key); !ok { + return m.doSetWithLockCheck(key, f) + } else { + return v + } +} + +// GetVar returns a Var with the value by given `key`. +// The returned Var is un-concurrent safe. +func (m *ListMap) GetVar(key interface{}) *gvar.Var { + return gvar.New(m.Get(key)) +} + +// GetVarOrSet returns a Var with result from GetVarOrSet. +// The returned Var is un-concurrent safe. +func (m *ListMap) GetVarOrSet(key interface{}, value interface{}) *gvar.Var { + return gvar.New(m.GetOrSet(key, value)) +} + +// GetVarOrSetFunc returns a Var with result from GetOrSetFunc. +// The returned Var is un-concurrent safe. +func (m *ListMap) GetVarOrSetFunc(key interface{}, f func() interface{}) *gvar.Var { + return gvar.New(m.GetOrSetFunc(key, f)) +} + +// GetVarOrSetFuncLock returns a Var with result from GetOrSetFuncLock. +// The returned Var is un-concurrent safe. +func (m *ListMap) GetVarOrSetFuncLock(key interface{}, f func() interface{}) *gvar.Var { + return gvar.New(m.GetOrSetFuncLock(key, f)) +} + +// SetIfNotExist sets `value` to the map if the `key` does not exist, and then returns true. +// It returns false if `key` exists, and `value` would be ignored. +func (m *ListMap) SetIfNotExist(key interface{}, value interface{}) bool { + if !m.Contains(key) { + m.doSetWithLockCheck(key, value) + return true + } + return false +} + +// SetIfNotExistFunc sets value with return value of callback function `f`, and then returns true. +// It returns false if `key` exists, and `value` would be ignored. +func (m *ListMap) SetIfNotExistFunc(key interface{}, f func() interface{}) bool { + if !m.Contains(key) { + m.doSetWithLockCheck(key, f()) + return true + } + return false +} + +// SetIfNotExistFuncLock sets value with return value of callback function `f`, and then returns true. +// It returns false if `key` exists, and `value` would be ignored. +// +// SetIfNotExistFuncLock differs with SetIfNotExistFunc function is that +// it executes function `f` with mutex.Lock of the map. +func (m *ListMap) SetIfNotExistFuncLock(key interface{}, f func() interface{}) bool { + if !m.Contains(key) { + m.doSetWithLockCheck(key, f) + return true + } + return false +} + +// Remove deletes value from map by given `key`, and return this deleted value. +func (m *ListMap) Remove(key interface{}) (value interface{}) { + m.mu.Lock() + if m.data != nil { + if e, ok := m.data[key]; ok { + value = e.Value.(*gListMapNode).value + delete(m.data, key) + m.list.Remove(e) + } + } + m.mu.Unlock() + return +} + +// Removes batch deletes values of the map by keys. +func (m *ListMap) Removes(keys []interface{}) { + m.mu.Lock() + if m.data != nil { + for _, key := range keys { + if e, ok := m.data[key]; ok { + delete(m.data, key) + m.list.Remove(e) + } + } + } + m.mu.Unlock() +} + +// Keys returns all keys of the map as a slice in ascending order. +func (m *ListMap) Keys() []interface{} { + m.mu.RLock() + var ( + keys = make([]interface{}, m.list.Len()) + index = 0 + ) + if m.list != nil { + m.list.IteratorAsc(func(e *glist.Element) bool { + keys[index] = e.Value.(*gListMapNode).key + index++ + return true + }) + } + m.mu.RUnlock() + return keys +} + +// Values returns all values of the map as a slice. +func (m *ListMap) Values() []interface{} { + m.mu.RLock() + var ( + values = make([]interface{}, m.list.Len()) + index = 0 + ) + if m.list != nil { + m.list.IteratorAsc(func(e *glist.Element) bool { + values[index] = e.Value.(*gListMapNode).value + index++ + return true + }) + } + m.mu.RUnlock() + return values +} + +// Contains checks whether a key exists. +// It returns true if the `key` exists, or else false. +func (m *ListMap) Contains(key interface{}) (ok bool) { + m.mu.RLock() + if m.data != nil { + _, ok = m.data[key] + } + m.mu.RUnlock() + return +} + +// Size returns the size of the map. +func (m *ListMap) Size() (size int) { + m.mu.RLock() + size = len(m.data) + m.mu.RUnlock() + return +} + +// IsEmpty checks whether the map is empty. +// It returns true if map is empty, or else false. +func (m *ListMap) IsEmpty() bool { + return m.Size() == 0 +} + +// Flip exchanges key-value of the map to value-key. +func (m *ListMap) Flip() { + data := m.Map() + m.Clear() + for key, value := range data { + m.Set(value, key) + } +} + +// Merge merges two link maps. +// The `other` map will be merged into the map `m`. +func (m *ListMap) Merge(other *ListMap) { + m.mu.Lock() + defer m.mu.Unlock() + if m.data == nil { + m.data = make(map[interface{}]*glist.Element) + m.list = glist.New() + } + if other != m { + other.mu.RLock() + defer other.mu.RUnlock() + } + var node *gListMapNode + other.list.IteratorAsc(func(e *glist.Element) bool { + node = e.Value.(*gListMapNode) + if e, ok := m.data[node.key]; !ok { + m.data[node.key] = m.list.PushBack(&gListMapNode{node.key, node.value}) + } else { + e.Value = &gListMapNode{node.key, node.value} + } + return true + }) +} + +// String returns the map as a string. +func (m *ListMap) String() string { + if m == nil { + return "" + } + b, _ := m.MarshalJSON() + return string(b) +} + +// MarshalJSON implements the interface MarshalJSON for json.Marshal. +func (m ListMap) MarshalJSON() (jsonBytes []byte, err error) { + if m.data == nil { + return []byte("null"), nil + } + buffer := bytes.NewBuffer(nil) + buffer.WriteByte('{') + m.Iterator(func(key, value interface{}) bool { + valueBytes, valueJsonErr := json.Marshal(value) + if valueJsonErr != nil { + err = valueJsonErr + return false + } + if buffer.Len() > 1 { + buffer.WriteByte(',') + } + buffer.WriteString(fmt.Sprintf(`"%v":%s`, key, valueBytes)) + return true + }) + buffer.WriteByte('}') + return buffer.Bytes(), nil +} + +// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. +func (m *ListMap) UnmarshalJSON(b []byte) error { + m.mu.Lock() + defer m.mu.Unlock() + if m.data == nil { + m.data = make(map[interface{}]*glist.Element) + m.list = glist.New() + } + var data map[string]interface{} + if err := json.UnmarshalUseNumber(b, &data); err != nil { + return err + } + for key, value := range data { + if e, ok := m.data[key]; !ok { + m.data[key] = m.list.PushBack(&gListMapNode{key, value}) + } else { + e.Value = &gListMapNode{key, value} + } + } + return nil +} + +// UnmarshalValue is an interface implement which sets any type of value for map. +func (m *ListMap) UnmarshalValue(value interface{}) (err error) { + m.mu.Lock() + defer m.mu.Unlock() + if m.data == nil { + m.data = make(map[interface{}]*glist.Element) + m.list = glist.New() + } + for k, v := range gconv.Map(value) { + if e, ok := m.data[k]; !ok { + m.data[k] = m.list.PushBack(&gListMapNode{k, v}) + } else { + e.Value = &gListMapNode{k, v} + } + } + return +} + +// DeepCopy implements interface for deep copy of current type. +func (m *ListMap) DeepCopy() interface{} { + if m == nil { + return nil + } + m.mu.RLock() + defer m.mu.RUnlock() + data := make(map[interface{}]interface{}, len(m.data)) + if m.list != nil { + var node *gListMapNode + m.list.IteratorAsc(func(e *glist.Element) bool { + node = e.Value.(*gListMapNode) + data[node.key] = deepcopy.Copy(node.value) + return true + }) + } + return NewListMapFrom(data, m.mu.IsSafe()) +} diff --git a/vendor/github.com/gogf/gf/v2/container/gmap/gmap_tree_map.go b/vendor/github.com/gogf/gf/v2/container/gmap/gmap_tree_map.go new file mode 100644 index 00000000..c81caa48 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gmap/gmap_tree_map.go @@ -0,0 +1,30 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with gm file, +// You can obtain one at https://github.com/gogf/gf. + +package gmap + +import ( + "github.com/gogf/gf/v2/container/gtree" +) + +// TreeMap based on red-black tree, alias of RedBlackTree. +type TreeMap = gtree.RedBlackTree + +// NewTreeMap instantiates a tree map with the custom comparator. +// The parameter `safe` is used to specify whether using tree in concurrent-safety, +// which is false in default. +func NewTreeMap(comparator func(v1, v2 interface{}) int, safe ...bool) *TreeMap { + return gtree.NewRedBlackTree(comparator, safe...) +} + +// NewTreeMapFrom instantiates a tree map with the custom comparator and `data` map. +// Note that, the param `data` map will be set as the underlying data map(no deep copy), +// there might be some concurrent-safe issues when changing the map outside. +// The parameter `safe` is used to specify whether using tree in concurrent-safety, +// which is false in default. +func NewTreeMapFrom(comparator func(v1, v2 interface{}) int, data map[interface{}]interface{}, safe ...bool) *TreeMap { + return gtree.NewRedBlackTreeFrom(comparator, data, safe...) +} diff --git a/vendor/github.com/gogf/gf/v2/container/gpool/gpool.go b/vendor/github.com/gogf/gf/v2/container/gpool/gpool.go new file mode 100644 index 00000000..d58fb781 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gpool/gpool.go @@ -0,0 +1,188 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package gpool provides object-reusable concurrent-safe pool. +package gpool + +import ( + "context" + "time" + + "github.com/gogf/gf/v2/container/glist" + "github.com/gogf/gf/v2/container/gtype" + "github.com/gogf/gf/v2/errors/gcode" + "github.com/gogf/gf/v2/errors/gerror" + "github.com/gogf/gf/v2/os/gtime" + "github.com/gogf/gf/v2/os/gtimer" +) + +// Pool is an Object-Reusable Pool. +type Pool struct { + list *glist.List // Available/idle items list. + closed *gtype.Bool // Whether the pool is closed. + TTL time.Duration // Time To Live for pool items. + NewFunc func() (interface{}, error) // Callback function to create pool item. + // ExpireFunc is the for expired items destruction. + // This function needs to be defined when the pool items + // need to perform additional destruction operations. + // Eg: net.Conn, os.File, etc. + ExpireFunc func(interface{}) +} + +// Pool item. +type poolItem struct { + value interface{} // Item value. + expireAt int64 // Expire timestamp in milliseconds. +} + +// NewFunc Creation function for object. +type NewFunc func() (interface{}, error) + +// ExpireFunc Destruction function for object. +type ExpireFunc func(interface{}) + +// New creates and returns a new object pool. +// To ensure execution efficiency, the expiration time cannot be modified once it is set. +// +// Note the expiration logic: +// ttl = 0 : not expired; +// ttl < 0 : immediate expired after use; +// ttl > 0 : timeout expired; +func New(ttl time.Duration, newFunc NewFunc, expireFunc ...ExpireFunc) *Pool { + r := &Pool{ + list: glist.New(true), + closed: gtype.NewBool(), + TTL: ttl, + NewFunc: newFunc, + } + if len(expireFunc) > 0 { + r.ExpireFunc = expireFunc[0] + } + gtimer.AddSingleton(context.Background(), time.Second, r.checkExpireItems) + return r +} + +// Put puts an item to pool. +func (p *Pool) Put(value interface{}) error { + if p.closed.Val() { + return gerror.NewCode(gcode.CodeInvalidOperation, "pool is closed") + } + item := &poolItem{ + value: value, + } + if p.TTL == 0 { + item.expireAt = 0 + } else { + // As for Golang version < 1.13, there's no method Milliseconds for time.Duration. + // So we need calculate the milliseconds using its nanoseconds value. + item.expireAt = gtime.TimestampMilli() + p.TTL.Nanoseconds()/1000000 + } + p.list.PushBack(item) + return nil +} + +// MustPut puts an item to pool, it panics if any error occurs. +func (p *Pool) MustPut(value interface{}) { + if err := p.Put(value); err != nil { + panic(err) + } +} + +// Clear clears pool, which means it will remove all items from pool. +func (p *Pool) Clear() { + if p.ExpireFunc != nil { + for { + if r := p.list.PopFront(); r != nil { + p.ExpireFunc(r.(*poolItem).value) + } else { + break + } + } + } else { + p.list.RemoveAll() + } +} + +// Get picks and returns an item from pool. If the pool is empty and NewFunc is defined, +// it creates and returns one from NewFunc. +func (p *Pool) Get() (interface{}, error) { + for !p.closed.Val() { + if r := p.list.PopFront(); r != nil { + f := r.(*poolItem) + if f.expireAt == 0 || f.expireAt > gtime.TimestampMilli() { + return f.value, nil + } else if p.ExpireFunc != nil { + // TODO: move expire function calling asynchronously out from `Get` operation. + p.ExpireFunc(f.value) + } + } else { + break + } + } + if p.NewFunc != nil { + return p.NewFunc() + } + return nil, gerror.NewCode(gcode.CodeInvalidOperation, "pool is empty") +} + +// Size returns the count of available items of pool. +func (p *Pool) Size() int { + return p.list.Len() +} + +// Close closes the pool. If `p` has ExpireFunc, +// then it automatically closes all items using this function before it's closed. +// Commonly you do not need to call this function manually. +func (p *Pool) Close() { + p.closed.Set(true) +} + +// checkExpire removes expired items from pool in every second. +func (p *Pool) checkExpireItems(ctx context.Context) { + if p.closed.Val() { + // If p has ExpireFunc, + // then it must close all items using this function. + if p.ExpireFunc != nil { + for { + if r := p.list.PopFront(); r != nil { + p.ExpireFunc(r.(*poolItem).value) + } else { + break + } + } + } + gtimer.Exit() + } + // All items do not expire. + if p.TTL == 0 { + return + } + // The latest item expire timestamp in milliseconds. + var latestExpire int64 = -1 + // Retrieve the current timestamp in milliseconds, it expires the items + // by comparing with this timestamp. It is not accurate comparison for + // every item expired, but high performance. + var timestampMilli = gtime.TimestampMilli() + for { + if latestExpire > timestampMilli { + break + } + if r := p.list.PopFront(); r != nil { + item := r.(*poolItem) + latestExpire = item.expireAt + // TODO improve the auto-expiration mechanism of the pool. + if item.expireAt > timestampMilli { + p.list.PushFront(item) + break + } + if p.ExpireFunc != nil { + p.ExpireFunc(item.value) + } + } else { + break + } + } +} diff --git a/vendor/github.com/gogf/gf/v2/container/gqueue/gqueue.go b/vendor/github.com/gogf/gf/v2/container/gqueue/gqueue.go new file mode 100644 index 00000000..67f9593f --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gqueue/gqueue.go @@ -0,0 +1,147 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package gqueue provides dynamic/static concurrent-safe queue. +// +// Features: +// +// 1. FIFO queue(data -> list -> chan); +// +// 2. Fast creation and initialization; +// +// 3. Support dynamic queue size(unlimited queue size); +// +// 4. Blocking when reading data from queue; +package gqueue + +import ( + "math" + + "github.com/gogf/gf/v2/container/glist" + "github.com/gogf/gf/v2/container/gtype" +) + +// Queue is a concurrent-safe queue built on doubly linked list and channel. +type Queue struct { + limit int // Limit for queue size. + length *gtype.Int64 // Queue length. + list *glist.List // Underlying list structure for data maintaining. + closed *gtype.Bool // Whether queue is closed. + events chan struct{} // Events for data writing. + C chan interface{} // Underlying channel for data reading. +} + +const ( + defaultQueueSize = 10000 // Size for queue buffer. + defaultBatchSize = 10 // Max batch size per-fetching from list. +) + +// New returns an empty queue object. +// Optional parameter `limit` is used to limit the size of the queue, which is unlimited in default. +// When `limit` is given, the queue will be static and high performance which is comparable with stdlib channel. +func New(limit ...int) *Queue { + q := &Queue{ + closed: gtype.NewBool(), + length: gtype.NewInt64(), + } + if len(limit) > 0 && limit[0] > 0 { + q.limit = limit[0] + q.C = make(chan interface{}, limit[0]) + } else { + q.list = glist.New(true) + q.events = make(chan struct{}, math.MaxInt32) + q.C = make(chan interface{}, defaultQueueSize) + go q.asyncLoopFromListToChannel() + } + return q +} + +// Push pushes the data `v` into the queue. +// Note that it would panic if Push is called after the queue is closed. +func (q *Queue) Push(v interface{}) { + q.length.Add(1) + if q.limit > 0 { + q.C <- v + } else { + q.list.PushBack(v) + if len(q.events) < defaultQueueSize { + q.events <- struct{}{} + } + } +} + +// Pop pops an item from the queue in FIFO way. +// Note that it would return nil immediately if Pop is called after the queue is closed. +func (q *Queue) Pop() interface{} { + item := <-q.C + q.length.Add(-1) + return item +} + +// Close closes the queue. +// Notice: It would notify all goroutines return immediately, +// which are being blocked reading using Pop method. +func (q *Queue) Close() { + if !q.closed.Cas(false, true) { + return + } + if q.events != nil { + close(q.events) + } + if q.limit > 0 { + close(q.C) + } else { + for i := 0; i < defaultBatchSize; i++ { + q.Pop() + } + } +} + +// Len returns the length of the queue. +// Note that the result might not be accurate as there's an +// asynchronous channel reading the list constantly. +func (q *Queue) Len() (length int64) { + return q.length.Val() +} + +// Size is alias of Len. +func (q *Queue) Size() int64 { + return q.Len() +} + +// asyncLoopFromListToChannel starts an asynchronous goroutine, +// which handles the data synchronization from list `q.list` to channel `q.C`. +func (q *Queue) asyncLoopFromListToChannel() { + defer func() { + if q.closed.Val() { + _ = recover() + } + }() + for !q.closed.Val() { + <-q.events + for !q.closed.Val() { + if length := q.list.Len(); length > 0 { + if length > defaultBatchSize { + length = defaultBatchSize + } + for _, v := range q.list.PopFronts(length) { + // When q.C is closed, it will panic here, especially q.C is being blocked for writing. + // If any error occurs here, it will be caught by recover and be ignored. + q.C <- v + } + } else { + break + } + } + // Clear q.events to remain just one event to do the next synchronization check. + for i := 0; i < len(q.events)-1; i++ { + <-q.events + } + } + // It should be here to close `q.C` if `q` is unlimited size. + // It's the sender's responsibility to close channel when it should be closed. + close(q.C) +} diff --git a/vendor/github.com/gogf/gf/v2/container/gset/gset_any_set.go b/vendor/github.com/gogf/gf/v2/container/gset/gset_any_set.go new file mode 100644 index 00000000..179862ca --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gset/gset_any_set.go @@ -0,0 +1,526 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package gset provides kinds of concurrent-safe/unsafe sets. +package gset + +import ( + "bytes" + + "github.com/gogf/gf/v2/internal/json" + "github.com/gogf/gf/v2/internal/rwmutex" + "github.com/gogf/gf/v2/text/gstr" + "github.com/gogf/gf/v2/util/gconv" +) + +type Set struct { + mu rwmutex.RWMutex + data map[interface{}]struct{} +} + +// New create and returns a new set, which contains un-repeated items. +// The parameter `safe` is used to specify whether using set in concurrent-safety, +// which is false in default. +func New(safe ...bool) *Set { + return NewSet(safe...) +} + +// NewSet create and returns a new set, which contains un-repeated items. +// Also see New. +func NewSet(safe ...bool) *Set { + return &Set{ + data: make(map[interface{}]struct{}), + mu: rwmutex.Create(safe...), + } +} + +// NewFrom returns a new set from `items`. +// Parameter `items` can be either a variable of any type, or a slice. +func NewFrom(items interface{}, safe ...bool) *Set { + m := make(map[interface{}]struct{}) + for _, v := range gconv.Interfaces(items) { + m[v] = struct{}{} + } + return &Set{ + data: m, + mu: rwmutex.Create(safe...), + } +} + +// Iterator iterates the set readonly with given callback function `f`, +// if `f` returns true then continue iterating; or false to stop. +func (set *Set) Iterator(f func(v interface{}) bool) { + set.mu.RLock() + defer set.mu.RUnlock() + for k := range set.data { + if !f(k) { + break + } + } +} + +// Add adds one or multiple items to the set. +func (set *Set) Add(items ...interface{}) { + set.mu.Lock() + if set.data == nil { + set.data = make(map[interface{}]struct{}) + } + for _, v := range items { + set.data[v] = struct{}{} + } + set.mu.Unlock() +} + +// AddIfNotExist checks whether item exists in the set, +// it adds the item to set and returns true if it does not exists in the set, +// or else it does nothing and returns false. +// +// Note that, if `item` is nil, it does nothing and returns false. +func (set *Set) AddIfNotExist(item interface{}) bool { + if item == nil { + return false + } + if !set.Contains(item) { + set.mu.Lock() + defer set.mu.Unlock() + if set.data == nil { + set.data = make(map[interface{}]struct{}) + } + if _, ok := set.data[item]; !ok { + set.data[item] = struct{}{} + return true + } + } + return false +} + +// AddIfNotExistFunc checks whether item exists in the set, +// it adds the item to set and returns true if it does not exist in the set and +// function `f` returns true, or else it does nothing and returns false. +// +// Note that, if `item` is nil, it does nothing and returns false. The function `f` +// is executed without writing lock. +func (set *Set) AddIfNotExistFunc(item interface{}, f func() bool) bool { + if item == nil { + return false + } + if !set.Contains(item) { + if f() { + set.mu.Lock() + defer set.mu.Unlock() + if set.data == nil { + set.data = make(map[interface{}]struct{}) + } + if _, ok := set.data[item]; !ok { + set.data[item] = struct{}{} + return true + } + } + } + return false +} + +// AddIfNotExistFuncLock checks whether item exists in the set, +// it adds the item to set and returns true if it does not exists in the set and +// function `f` returns true, or else it does nothing and returns false. +// +// Note that, if `item` is nil, it does nothing and returns false. The function `f` +// is executed within writing lock. +func (set *Set) AddIfNotExistFuncLock(item interface{}, f func() bool) bool { + if item == nil { + return false + } + if !set.Contains(item) { + set.mu.Lock() + defer set.mu.Unlock() + if set.data == nil { + set.data = make(map[interface{}]struct{}) + } + if f() { + if _, ok := set.data[item]; !ok { + set.data[item] = struct{}{} + return true + } + } + } + return false +} + +// Contains checks whether the set contains `item`. +func (set *Set) Contains(item interface{}) bool { + var ok bool + set.mu.RLock() + if set.data != nil { + _, ok = set.data[item] + } + set.mu.RUnlock() + return ok +} + +// Remove deletes `item` from set. +func (set *Set) Remove(item interface{}) { + set.mu.Lock() + if set.data != nil { + delete(set.data, item) + } + set.mu.Unlock() +} + +// Size returns the size of the set. +func (set *Set) Size() int { + set.mu.RLock() + l := len(set.data) + set.mu.RUnlock() + return l +} + +// Clear deletes all items of the set. +func (set *Set) Clear() { + set.mu.Lock() + set.data = make(map[interface{}]struct{}) + set.mu.Unlock() +} + +// Slice returns the an of items of the set as slice. +func (set *Set) Slice() []interface{} { + set.mu.RLock() + var ( + i = 0 + ret = make([]interface{}, len(set.data)) + ) + for item := range set.data { + ret[i] = item + i++ + } + set.mu.RUnlock() + return ret +} + +// Join joins items with a string `glue`. +func (set *Set) Join(glue string) string { + set.mu.RLock() + defer set.mu.RUnlock() + if len(set.data) == 0 { + return "" + } + var ( + l = len(set.data) + i = 0 + buffer = bytes.NewBuffer(nil) + ) + for k := range set.data { + buffer.WriteString(gconv.String(k)) + if i != l-1 { + buffer.WriteString(glue) + } + i++ + } + return buffer.String() +} + +// String returns items as a string, which implements like json.Marshal does. +func (set *Set) String() string { + if set == nil { + return "" + } + set.mu.RLock() + defer set.mu.RUnlock() + var ( + s string + l = len(set.data) + i = 0 + buffer = bytes.NewBuffer(nil) + ) + buffer.WriteByte('[') + for k := range set.data { + s = gconv.String(k) + if gstr.IsNumeric(s) { + buffer.WriteString(s) + } else { + buffer.WriteString(`"` + gstr.QuoteMeta(s, `"\`) + `"`) + } + if i != l-1 { + buffer.WriteByte(',') + } + i++ + } + buffer.WriteByte(']') + return buffer.String() +} + +// LockFunc locks writing with callback function `f`. +func (set *Set) LockFunc(f func(m map[interface{}]struct{})) { + set.mu.Lock() + defer set.mu.Unlock() + f(set.data) +} + +// RLockFunc locks reading with callback function `f`. +func (set *Set) RLockFunc(f func(m map[interface{}]struct{})) { + set.mu.RLock() + defer set.mu.RUnlock() + f(set.data) +} + +// Equal checks whether the two sets equal. +func (set *Set) Equal(other *Set) bool { + if set == other { + return true + } + set.mu.RLock() + defer set.mu.RUnlock() + other.mu.RLock() + defer other.mu.RUnlock() + if len(set.data) != len(other.data) { + return false + } + for key := range set.data { + if _, ok := other.data[key]; !ok { + return false + } + } + return true +} + +// IsSubsetOf checks whether the current set is a sub-set of `other`. +func (set *Set) IsSubsetOf(other *Set) bool { + if set == other { + return true + } + set.mu.RLock() + defer set.mu.RUnlock() + other.mu.RLock() + defer other.mu.RUnlock() + for key := range set.data { + if _, ok := other.data[key]; !ok { + return false + } + } + return true +} + +// Union returns a new set which is the union of `set` and `others`. +// Which means, all the items in `newSet` are in `set` or in `others`. +func (set *Set) Union(others ...*Set) (newSet *Set) { + newSet = NewSet() + set.mu.RLock() + defer set.mu.RUnlock() + for _, other := range others { + if set != other { + other.mu.RLock() + } + for k, v := range set.data { + newSet.data[k] = v + } + if set != other { + for k, v := range other.data { + newSet.data[k] = v + } + } + if set != other { + other.mu.RUnlock() + } + } + + return +} + +// Diff returns a new set which is the difference set from `set` to `others`. +// Which means, all the items in `newSet` are in `set` but not in `others`. +func (set *Set) Diff(others ...*Set) (newSet *Set) { + newSet = NewSet() + set.mu.RLock() + defer set.mu.RUnlock() + for _, other := range others { + if set == other { + continue + } + other.mu.RLock() + for k, v := range set.data { + if _, ok := other.data[k]; !ok { + newSet.data[k] = v + } + } + other.mu.RUnlock() + } + return +} + +// Intersect returns a new set which is the intersection from `set` to `others`. +// Which means, all the items in `newSet` are in `set` and also in `others`. +func (set *Set) Intersect(others ...*Set) (newSet *Set) { + newSet = NewSet() + set.mu.RLock() + defer set.mu.RUnlock() + for _, other := range others { + if set != other { + other.mu.RLock() + } + for k, v := range set.data { + if _, ok := other.data[k]; ok { + newSet.data[k] = v + } + } + if set != other { + other.mu.RUnlock() + } + } + return +} + +// Complement returns a new set which is the complement from `set` to `full`. +// Which means, all the items in `newSet` are in `full` and not in `set`. +// +// It returns the difference between `full` and `set` +// if the given set `full` is not the full set of `set`. +func (set *Set) Complement(full *Set) (newSet *Set) { + newSet = NewSet() + set.mu.RLock() + defer set.mu.RUnlock() + if set != full { + full.mu.RLock() + defer full.mu.RUnlock() + } + for k, v := range full.data { + if _, ok := set.data[k]; !ok { + newSet.data[k] = v + } + } + return +} + +// Merge adds items from `others` sets into `set`. +func (set *Set) Merge(others ...*Set) *Set { + set.mu.Lock() + defer set.mu.Unlock() + for _, other := range others { + if set != other { + other.mu.RLock() + } + for k, v := range other.data { + set.data[k] = v + } + if set != other { + other.mu.RUnlock() + } + } + return set +} + +// Sum sums items. +// Note: The items should be converted to int type, +// or you'd get a result that you unexpected. +func (set *Set) Sum() (sum int) { + set.mu.RLock() + defer set.mu.RUnlock() + for k := range set.data { + sum += gconv.Int(k) + } + return +} + +// Pop randomly pops an item from set. +func (set *Set) Pop() interface{} { + set.mu.Lock() + defer set.mu.Unlock() + for k := range set.data { + delete(set.data, k) + return k + } + return nil +} + +// Pops randomly pops `size` items from set. +// It returns all items if size == -1. +func (set *Set) Pops(size int) []interface{} { + set.mu.Lock() + defer set.mu.Unlock() + if size > len(set.data) || size == -1 { + size = len(set.data) + } + if size <= 0 { + return nil + } + index := 0 + array := make([]interface{}, size) + for k := range set.data { + delete(set.data, k) + array[index] = k + index++ + if index == size { + break + } + } + return array +} + +// Walk applies a user supplied function `f` to every item of set. +func (set *Set) Walk(f func(item interface{}) interface{}) *Set { + set.mu.Lock() + defer set.mu.Unlock() + m := make(map[interface{}]struct{}, len(set.data)) + for k, v := range set.data { + m[f(k)] = v + } + set.data = m + return set +} + +// MarshalJSON implements the interface MarshalJSON for json.Marshal. +func (set Set) MarshalJSON() ([]byte, error) { + return json.Marshal(set.Slice()) +} + +// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. +func (set *Set) UnmarshalJSON(b []byte) error { + set.mu.Lock() + defer set.mu.Unlock() + if set.data == nil { + set.data = make(map[interface{}]struct{}) + } + var array []interface{} + if err := json.UnmarshalUseNumber(b, &array); err != nil { + return err + } + for _, v := range array { + set.data[v] = struct{}{} + } + return nil +} + +// UnmarshalValue is an interface implement which sets any type of value for set. +func (set *Set) UnmarshalValue(value interface{}) (err error) { + set.mu.Lock() + defer set.mu.Unlock() + if set.data == nil { + set.data = make(map[interface{}]struct{}) + } + var array []interface{} + switch value.(type) { + case string, []byte: + err = json.UnmarshalUseNumber(gconv.Bytes(value), &array) + default: + array = gconv.SliceAny(value) + } + for _, v := range array { + set.data[v] = struct{}{} + } + return +} + +// DeepCopy implements interface for deep copy of current type. +func (set *Set) DeepCopy() interface{} { + if set == nil { + return nil + } + set.mu.RLock() + defer set.mu.RUnlock() + data := make([]interface{}, 0) + for k := range set.data { + data = append(data, k) + } + return NewFrom(data, set.mu.IsSafe()) +} diff --git a/vendor/github.com/gogf/gf/v2/container/gset/gset_int_set.go b/vendor/github.com/gogf/gf/v2/container/gset/gset_int_set.go new file mode 100644 index 00000000..b29ebfd0 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gset/gset_int_set.go @@ -0,0 +1,489 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. +// + +package gset + +import ( + "bytes" + + "github.com/gogf/gf/v2/internal/json" + "github.com/gogf/gf/v2/internal/rwmutex" + "github.com/gogf/gf/v2/util/gconv" +) + +type IntSet struct { + mu rwmutex.RWMutex + data map[int]struct{} +} + +// NewIntSet create and returns a new set, which contains un-repeated items. +// The parameter `safe` is used to specify whether using set in concurrent-safety, +// which is false in default. +func NewIntSet(safe ...bool) *IntSet { + return &IntSet{ + mu: rwmutex.Create(safe...), + data: make(map[int]struct{}), + } +} + +// NewIntSetFrom returns a new set from `items`. +func NewIntSetFrom(items []int, safe ...bool) *IntSet { + m := make(map[int]struct{}) + for _, v := range items { + m[v] = struct{}{} + } + return &IntSet{ + mu: rwmutex.Create(safe...), + data: m, + } +} + +// Iterator iterates the set readonly with given callback function `f`, +// if `f` returns true then continue iterating; or false to stop. +func (set *IntSet) Iterator(f func(v int) bool) { + set.mu.RLock() + defer set.mu.RUnlock() + for k := range set.data { + if !f(k) { + break + } + } +} + +// Add adds one or multiple items to the set. +func (set *IntSet) Add(item ...int) { + set.mu.Lock() + if set.data == nil { + set.data = make(map[int]struct{}) + } + for _, v := range item { + set.data[v] = struct{}{} + } + set.mu.Unlock() +} + +// AddIfNotExist checks whether item exists in the set, +// it adds the item to set and returns true if it does not exists in the set, +// or else it does nothing and returns false. +// +// Note that, if `item` is nil, it does nothing and returns false. +func (set *IntSet) AddIfNotExist(item int) bool { + if !set.Contains(item) { + set.mu.Lock() + defer set.mu.Unlock() + if set.data == nil { + set.data = make(map[int]struct{}) + } + if _, ok := set.data[item]; !ok { + set.data[item] = struct{}{} + return true + } + } + return false +} + +// AddIfNotExistFunc checks whether item exists in the set, +// it adds the item to set and returns true if it does not exists in the set and +// function `f` returns true, or else it does nothing and returns false. +// +// Note that, the function `f` is executed without writing lock. +func (set *IntSet) AddIfNotExistFunc(item int, f func() bool) bool { + if !set.Contains(item) { + if f() { + set.mu.Lock() + defer set.mu.Unlock() + if set.data == nil { + set.data = make(map[int]struct{}) + } + if _, ok := set.data[item]; !ok { + set.data[item] = struct{}{} + return true + } + } + } + return false +} + +// AddIfNotExistFuncLock checks whether item exists in the set, +// it adds the item to set and returns true if it does not exists in the set and +// function `f` returns true, or else it does nothing and returns false. +// +// Note that, the function `f` is executed without writing lock. +func (set *IntSet) AddIfNotExistFuncLock(item int, f func() bool) bool { + if !set.Contains(item) { + set.mu.Lock() + defer set.mu.Unlock() + if set.data == nil { + set.data = make(map[int]struct{}) + } + if f() { + if _, ok := set.data[item]; !ok { + set.data[item] = struct{}{} + return true + } + } + } + return false +} + +// Contains checks whether the set contains `item`. +func (set *IntSet) Contains(item int) bool { + var ok bool + set.mu.RLock() + if set.data != nil { + _, ok = set.data[item] + } + set.mu.RUnlock() + return ok +} + +// Remove deletes `item` from set. +func (set *IntSet) Remove(item int) { + set.mu.Lock() + if set.data != nil { + delete(set.data, item) + } + set.mu.Unlock() +} + +// Size returns the size of the set. +func (set *IntSet) Size() int { + set.mu.RLock() + l := len(set.data) + set.mu.RUnlock() + return l +} + +// Clear deletes all items of the set. +func (set *IntSet) Clear() { + set.mu.Lock() + set.data = make(map[int]struct{}) + set.mu.Unlock() +} + +// Slice returns the an of items of the set as slice. +func (set *IntSet) Slice() []int { + set.mu.RLock() + var ( + i = 0 + ret = make([]int, len(set.data)) + ) + for k := range set.data { + ret[i] = k + i++ + } + set.mu.RUnlock() + return ret +} + +// Join joins items with a string `glue`. +func (set *IntSet) Join(glue string) string { + set.mu.RLock() + defer set.mu.RUnlock() + if len(set.data) == 0 { + return "" + } + var ( + l = len(set.data) + i = 0 + buffer = bytes.NewBuffer(nil) + ) + for k := range set.data { + buffer.WriteString(gconv.String(k)) + if i != l-1 { + buffer.WriteString(glue) + } + i++ + } + return buffer.String() +} + +// String returns items as a string, which implements like json.Marshal does. +func (set *IntSet) String() string { + if set == nil { + return "" + } + return "[" + set.Join(",") + "]" +} + +// LockFunc locks writing with callback function `f`. +func (set *IntSet) LockFunc(f func(m map[int]struct{})) { + set.mu.Lock() + defer set.mu.Unlock() + f(set.data) +} + +// RLockFunc locks reading with callback function `f`. +func (set *IntSet) RLockFunc(f func(m map[int]struct{})) { + set.mu.RLock() + defer set.mu.RUnlock() + f(set.data) +} + +// Equal checks whether the two sets equal. +func (set *IntSet) Equal(other *IntSet) bool { + if set == other { + return true + } + set.mu.RLock() + defer set.mu.RUnlock() + other.mu.RLock() + defer other.mu.RUnlock() + if len(set.data) != len(other.data) { + return false + } + for key := range set.data { + if _, ok := other.data[key]; !ok { + return false + } + } + return true +} + +// IsSubsetOf checks whether the current set is a sub-set of `other`. +func (set *IntSet) IsSubsetOf(other *IntSet) bool { + if set == other { + return true + } + set.mu.RLock() + defer set.mu.RUnlock() + other.mu.RLock() + defer other.mu.RUnlock() + for key := range set.data { + if _, ok := other.data[key]; !ok { + return false + } + } + return true +} + +// Union returns a new set which is the union of `set` and `other`. +// Which means, all the items in `newSet` are in `set` or in `other`. +func (set *IntSet) Union(others ...*IntSet) (newSet *IntSet) { + newSet = NewIntSet() + set.mu.RLock() + defer set.mu.RUnlock() + for _, other := range others { + if set != other { + other.mu.RLock() + } + for k, v := range set.data { + newSet.data[k] = v + } + if set != other { + for k, v := range other.data { + newSet.data[k] = v + } + } + if set != other { + other.mu.RUnlock() + } + } + + return +} + +// Diff returns a new set which is the difference set from `set` to `other`. +// Which means, all the items in `newSet` are in `set` but not in `other`. +func (set *IntSet) Diff(others ...*IntSet) (newSet *IntSet) { + newSet = NewIntSet() + set.mu.RLock() + defer set.mu.RUnlock() + for _, other := range others { + if set == other { + continue + } + other.mu.RLock() + for k, v := range set.data { + if _, ok := other.data[k]; !ok { + newSet.data[k] = v + } + } + other.mu.RUnlock() + } + return +} + +// Intersect returns a new set which is the intersection from `set` to `other`. +// Which means, all the items in `newSet` are in `set` and also in `other`. +func (set *IntSet) Intersect(others ...*IntSet) (newSet *IntSet) { + newSet = NewIntSet() + set.mu.RLock() + defer set.mu.RUnlock() + for _, other := range others { + if set != other { + other.mu.RLock() + } + for k, v := range set.data { + if _, ok := other.data[k]; ok { + newSet.data[k] = v + } + } + if set != other { + other.mu.RUnlock() + } + } + return +} + +// Complement returns a new set which is the complement from `set` to `full`. +// Which means, all the items in `newSet` are in `full` and not in `set`. +// +// It returns the difference between `full` and `set` +// if the given set `full` is not the full set of `set`. +func (set *IntSet) Complement(full *IntSet) (newSet *IntSet) { + newSet = NewIntSet() + set.mu.RLock() + defer set.mu.RUnlock() + if set != full { + full.mu.RLock() + defer full.mu.RUnlock() + } + for k, v := range full.data { + if _, ok := set.data[k]; !ok { + newSet.data[k] = v + } + } + return +} + +// Merge adds items from `others` sets into `set`. +func (set *IntSet) Merge(others ...*IntSet) *IntSet { + set.mu.Lock() + defer set.mu.Unlock() + for _, other := range others { + if set != other { + other.mu.RLock() + } + for k, v := range other.data { + set.data[k] = v + } + if set != other { + other.mu.RUnlock() + } + } + return set +} + +// Sum sums items. +// Note: The items should be converted to int type, +// or you'd get a result that you unexpected. +func (set *IntSet) Sum() (sum int) { + set.mu.RLock() + defer set.mu.RUnlock() + for k := range set.data { + sum += k + } + return +} + +// Pop randomly pops an item from set. +func (set *IntSet) Pop() int { + set.mu.Lock() + defer set.mu.Unlock() + for k := range set.data { + delete(set.data, k) + return k + } + return 0 +} + +// Pops randomly pops `size` items from set. +// It returns all items if size == -1. +func (set *IntSet) Pops(size int) []int { + set.mu.Lock() + defer set.mu.Unlock() + if size > len(set.data) || size == -1 { + size = len(set.data) + } + if size <= 0 { + return nil + } + index := 0 + array := make([]int, size) + for k := range set.data { + delete(set.data, k) + array[index] = k + index++ + if index == size { + break + } + } + return array +} + +// Walk applies a user supplied function `f` to every item of set. +func (set *IntSet) Walk(f func(item int) int) *IntSet { + set.mu.Lock() + defer set.mu.Unlock() + m := make(map[int]struct{}, len(set.data)) + for k, v := range set.data { + m[f(k)] = v + } + set.data = m + return set +} + +// MarshalJSON implements the interface MarshalJSON for json.Marshal. +func (set IntSet) MarshalJSON() ([]byte, error) { + return json.Marshal(set.Slice()) +} + +// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. +func (set *IntSet) UnmarshalJSON(b []byte) error { + set.mu.Lock() + defer set.mu.Unlock() + if set.data == nil { + set.data = make(map[int]struct{}) + } + var array []int + if err := json.UnmarshalUseNumber(b, &array); err != nil { + return err + } + for _, v := range array { + set.data[v] = struct{}{} + } + return nil +} + +// UnmarshalValue is an interface implement which sets any type of value for set. +func (set *IntSet) UnmarshalValue(value interface{}) (err error) { + set.mu.Lock() + defer set.mu.Unlock() + if set.data == nil { + set.data = make(map[int]struct{}) + } + var array []int + switch value.(type) { + case string, []byte: + err = json.UnmarshalUseNumber(gconv.Bytes(value), &array) + default: + array = gconv.SliceInt(value) + } + for _, v := range array { + set.data[v] = struct{}{} + } + return +} + +// DeepCopy implements interface for deep copy of current type. +func (set *IntSet) DeepCopy() interface{} { + if set == nil { + return nil + } + set.mu.RLock() + defer set.mu.RUnlock() + var ( + slice = make([]int, len(set.data)) + index = 0 + ) + for k := range set.data { + slice[index] = k + index++ + } + return NewIntSetFrom(slice, set.mu.IsSafe()) +} diff --git a/vendor/github.com/gogf/gf/v2/container/gset/gset_str_set.go b/vendor/github.com/gogf/gf/v2/container/gset/gset_str_set.go new file mode 100644 index 00000000..386ba6b2 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gset/gset_str_set.go @@ -0,0 +1,519 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. +// + +package gset + +import ( + "bytes" + "strings" + + "github.com/gogf/gf/v2/internal/json" + "github.com/gogf/gf/v2/internal/rwmutex" + "github.com/gogf/gf/v2/text/gstr" + "github.com/gogf/gf/v2/util/gconv" +) + +type StrSet struct { + mu rwmutex.RWMutex + data map[string]struct{} +} + +// NewStrSet create and returns a new set, which contains un-repeated items. +// The parameter `safe` is used to specify whether using set in concurrent-safety, +// which is false in default. +func NewStrSet(safe ...bool) *StrSet { + return &StrSet{ + mu: rwmutex.Create(safe...), + data: make(map[string]struct{}), + } +} + +// NewStrSetFrom returns a new set from `items`. +func NewStrSetFrom(items []string, safe ...bool) *StrSet { + m := make(map[string]struct{}) + for _, v := range items { + m[v] = struct{}{} + } + return &StrSet{ + mu: rwmutex.Create(safe...), + data: m, + } +} + +// Iterator iterates the set readonly with given callback function `f`, +// if `f` returns true then continue iterating; or false to stop. +func (set *StrSet) Iterator(f func(v string) bool) { + set.mu.RLock() + defer set.mu.RUnlock() + for k := range set.data { + if !f(k) { + break + } + } +} + +// Add adds one or multiple items to the set. +func (set *StrSet) Add(item ...string) { + set.mu.Lock() + if set.data == nil { + set.data = make(map[string]struct{}) + } + for _, v := range item { + set.data[v] = struct{}{} + } + set.mu.Unlock() +} + +// AddIfNotExist checks whether item exists in the set, +// it adds the item to set and returns true if it does not exist in the set, +// or else it does nothing and returns false. +func (set *StrSet) AddIfNotExist(item string) bool { + if !set.Contains(item) { + set.mu.Lock() + defer set.mu.Unlock() + if set.data == nil { + set.data = make(map[string]struct{}) + } + if _, ok := set.data[item]; !ok { + set.data[item] = struct{}{} + return true + } + } + return false +} + +// AddIfNotExistFunc checks whether item exists in the set, +// it adds the item to set and returns true if it does not exists in the set and +// function `f` returns true, or else it does nothing and returns false. +// +// Note that, the function `f` is executed without writing lock. +func (set *StrSet) AddIfNotExistFunc(item string, f func() bool) bool { + if !set.Contains(item) { + if f() { + set.mu.Lock() + defer set.mu.Unlock() + if set.data == nil { + set.data = make(map[string]struct{}) + } + if _, ok := set.data[item]; !ok { + set.data[item] = struct{}{} + return true + } + } + } + return false +} + +// AddIfNotExistFuncLock checks whether item exists in the set, +// it adds the item to set and returns true if it does not exists in the set and +// function `f` returns true, or else it does nothing and returns false. +// +// Note that, the function `f` is executed without writing lock. +func (set *StrSet) AddIfNotExistFuncLock(item string, f func() bool) bool { + if !set.Contains(item) { + set.mu.Lock() + defer set.mu.Unlock() + if set.data == nil { + set.data = make(map[string]struct{}) + } + if f() { + if _, ok := set.data[item]; !ok { + set.data[item] = struct{}{} + return true + } + } + } + return false +} + +// Contains checks whether the set contains `item`. +func (set *StrSet) Contains(item string) bool { + var ok bool + set.mu.RLock() + if set.data != nil { + _, ok = set.data[item] + } + set.mu.RUnlock() + return ok +} + +// ContainsI checks whether a value exists in the set with case-insensitively. +// Note that it internally iterates the whole set to do the comparison with case-insensitively. +func (set *StrSet) ContainsI(item string) bool { + set.mu.RLock() + defer set.mu.RUnlock() + for k := range set.data { + if strings.EqualFold(k, item) { + return true + } + } + return false +} + +// Remove deletes `item` from set. +func (set *StrSet) Remove(item string) { + set.mu.Lock() + if set.data != nil { + delete(set.data, item) + } + set.mu.Unlock() +} + +// Size returns the size of the set. +func (set *StrSet) Size() int { + set.mu.RLock() + l := len(set.data) + set.mu.RUnlock() + return l +} + +// Clear deletes all items of the set. +func (set *StrSet) Clear() { + set.mu.Lock() + set.data = make(map[string]struct{}) + set.mu.Unlock() +} + +// Slice returns the an of items of the set as slice. +func (set *StrSet) Slice() []string { + set.mu.RLock() + var ( + i = 0 + ret = make([]string, len(set.data)) + ) + for item := range set.data { + ret[i] = item + i++ + } + + set.mu.RUnlock() + return ret +} + +// Join joins items with a string `glue`. +func (set *StrSet) Join(glue string) string { + set.mu.RLock() + defer set.mu.RUnlock() + if len(set.data) == 0 { + return "" + } + var ( + l = len(set.data) + i = 0 + buffer = bytes.NewBuffer(nil) + ) + for k := range set.data { + buffer.WriteString(k) + if i != l-1 { + buffer.WriteString(glue) + } + i++ + } + return buffer.String() +} + +// String returns items as a string, which implements like json.Marshal does. +func (set *StrSet) String() string { + if set == nil { + return "" + } + set.mu.RLock() + defer set.mu.RUnlock() + var ( + l = len(set.data) + i = 0 + buffer = bytes.NewBuffer(nil) + ) + buffer.WriteByte('[') + for k := range set.data { + buffer.WriteString(`"` + gstr.QuoteMeta(k, `"\`) + `"`) + if i != l-1 { + buffer.WriteByte(',') + } + i++ + } + buffer.WriteByte(']') + return buffer.String() +} + +// LockFunc locks writing with callback function `f`. +func (set *StrSet) LockFunc(f func(m map[string]struct{})) { + set.mu.Lock() + defer set.mu.Unlock() + f(set.data) +} + +// RLockFunc locks reading with callback function `f`. +func (set *StrSet) RLockFunc(f func(m map[string]struct{})) { + set.mu.RLock() + defer set.mu.RUnlock() + f(set.data) +} + +// Equal checks whether the two sets equal. +func (set *StrSet) Equal(other *StrSet) bool { + if set == other { + return true + } + set.mu.RLock() + defer set.mu.RUnlock() + other.mu.RLock() + defer other.mu.RUnlock() + if len(set.data) != len(other.data) { + return false + } + for key := range set.data { + if _, ok := other.data[key]; !ok { + return false + } + } + return true +} + +// IsSubsetOf checks whether the current set is a sub-set of `other`. +func (set *StrSet) IsSubsetOf(other *StrSet) bool { + if set == other { + return true + } + set.mu.RLock() + defer set.mu.RUnlock() + other.mu.RLock() + defer other.mu.RUnlock() + for key := range set.data { + if _, ok := other.data[key]; !ok { + return false + } + } + return true +} + +// Union returns a new set which is the union of `set` and `other`. +// Which means, all the items in `newSet` are in `set` or in `other`. +func (set *StrSet) Union(others ...*StrSet) (newSet *StrSet) { + newSet = NewStrSet() + set.mu.RLock() + defer set.mu.RUnlock() + for _, other := range others { + if set != other { + other.mu.RLock() + } + for k, v := range set.data { + newSet.data[k] = v + } + if set != other { + for k, v := range other.data { + newSet.data[k] = v + } + } + if set != other { + other.mu.RUnlock() + } + } + + return +} + +// Diff returns a new set which is the difference set from `set` to `other`. +// Which means, all the items in `newSet` are in `set` but not in `other`. +func (set *StrSet) Diff(others ...*StrSet) (newSet *StrSet) { + newSet = NewStrSet() + set.mu.RLock() + defer set.mu.RUnlock() + for _, other := range others { + if set == other { + continue + } + other.mu.RLock() + for k, v := range set.data { + if _, ok := other.data[k]; !ok { + newSet.data[k] = v + } + } + other.mu.RUnlock() + } + return +} + +// Intersect returns a new set which is the intersection from `set` to `other`. +// Which means, all the items in `newSet` are in `set` and also in `other`. +func (set *StrSet) Intersect(others ...*StrSet) (newSet *StrSet) { + newSet = NewStrSet() + set.mu.RLock() + defer set.mu.RUnlock() + for _, other := range others { + if set != other { + other.mu.RLock() + } + for k, v := range set.data { + if _, ok := other.data[k]; ok { + newSet.data[k] = v + } + } + if set != other { + other.mu.RUnlock() + } + } + return +} + +// Complement returns a new set which is the complement from `set` to `full`. +// Which means, all the items in `newSet` are in `full` and not in `set`. +// +// It returns the difference between `full` and `set` +// if the given set `full` is not the full set of `set`. +func (set *StrSet) Complement(full *StrSet) (newSet *StrSet) { + newSet = NewStrSet() + set.mu.RLock() + defer set.mu.RUnlock() + if set != full { + full.mu.RLock() + defer full.mu.RUnlock() + } + for k, v := range full.data { + if _, ok := set.data[k]; !ok { + newSet.data[k] = v + } + } + return +} + +// Merge adds items from `others` sets into `set`. +func (set *StrSet) Merge(others ...*StrSet) *StrSet { + set.mu.Lock() + defer set.mu.Unlock() + for _, other := range others { + if set != other { + other.mu.RLock() + } + for k, v := range other.data { + set.data[k] = v + } + if set != other { + other.mu.RUnlock() + } + } + return set +} + +// Sum sums items. +// Note: The items should be converted to int type, +// or you'd get a result that you unexpected. +func (set *StrSet) Sum() (sum int) { + set.mu.RLock() + defer set.mu.RUnlock() + for k := range set.data { + sum += gconv.Int(k) + } + return +} + +// Pop randomly pops an item from set. +func (set *StrSet) Pop() string { + set.mu.Lock() + defer set.mu.Unlock() + for k := range set.data { + delete(set.data, k) + return k + } + return "" +} + +// Pops randomly pops `size` items from set. +// It returns all items if size == -1. +func (set *StrSet) Pops(size int) []string { + set.mu.Lock() + defer set.mu.Unlock() + if size > len(set.data) || size == -1 { + size = len(set.data) + } + if size <= 0 { + return nil + } + index := 0 + array := make([]string, size) + for k := range set.data { + delete(set.data, k) + array[index] = k + index++ + if index == size { + break + } + } + return array +} + +// Walk applies a user supplied function `f` to every item of set. +func (set *StrSet) Walk(f func(item string) string) *StrSet { + set.mu.Lock() + defer set.mu.Unlock() + m := make(map[string]struct{}, len(set.data)) + for k, v := range set.data { + m[f(k)] = v + } + set.data = m + return set +} + +// MarshalJSON implements the interface MarshalJSON for json.Marshal. +func (set StrSet) MarshalJSON() ([]byte, error) { + return json.Marshal(set.Slice()) +} + +// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. +func (set *StrSet) UnmarshalJSON(b []byte) error { + set.mu.Lock() + defer set.mu.Unlock() + if set.data == nil { + set.data = make(map[string]struct{}) + } + var array []string + if err := json.UnmarshalUseNumber(b, &array); err != nil { + return err + } + for _, v := range array { + set.data[v] = struct{}{} + } + return nil +} + +// UnmarshalValue is an interface implement which sets any type of value for set. +func (set *StrSet) UnmarshalValue(value interface{}) (err error) { + set.mu.Lock() + defer set.mu.Unlock() + if set.data == nil { + set.data = make(map[string]struct{}) + } + var array []string + switch value.(type) { + case string, []byte: + err = json.UnmarshalUseNumber(gconv.Bytes(value), &array) + default: + array = gconv.SliceStr(value) + } + for _, v := range array { + set.data[v] = struct{}{} + } + return +} + +// DeepCopy implements interface for deep copy of current type. +func (set *StrSet) DeepCopy() interface{} { + if set == nil { + return nil + } + set.mu.RLock() + defer set.mu.RUnlock() + var ( + slice = make([]string, len(set.data)) + index = 0 + ) + for k := range set.data { + slice[index] = k + index++ + } + return NewStrSetFrom(slice, set.mu.IsSafe()) +} diff --git a/vendor/github.com/gogf/gf/v2/container/gtree/gtree.go b/vendor/github.com/gogf/gf/v2/container/gtree/gtree.go new file mode 100644 index 00000000..2cb7b25e --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gtree/gtree.go @@ -0,0 +1,10 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package gtree provides concurrent-safe/unsafe tree containers. +// +// Some implements are from: https://github.com/emirpasic/gods +package gtree diff --git a/vendor/github.com/gogf/gf/v2/container/gtree/gtree_avltree.go b/vendor/github.com/gogf/gf/v2/container/gtree/gtree_avltree.go new file mode 100644 index 00000000..005be372 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gtree/gtree_avltree.go @@ -0,0 +1,816 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gtree + +import ( + "bytes" + "fmt" + + "github.com/gogf/gf/v2/container/gvar" + "github.com/gogf/gf/v2/internal/json" + "github.com/gogf/gf/v2/internal/rwmutex" + "github.com/gogf/gf/v2/util/gconv" +) + +// AVLTree holds elements of the AVL tree. +type AVLTree struct { + mu rwmutex.RWMutex + root *AVLTreeNode + comparator func(v1, v2 interface{}) int + size int +} + +// AVLTreeNode is a single element within the tree. +type AVLTreeNode struct { + Key interface{} + Value interface{} + parent *AVLTreeNode + children [2]*AVLTreeNode + b int8 +} + +// NewAVLTree instantiates an AVL tree with the custom key comparator. +// The parameter `safe` is used to specify whether using tree in concurrent-safety, +// which is false in default. +func NewAVLTree(comparator func(v1, v2 interface{}) int, safe ...bool) *AVLTree { + return &AVLTree{ + mu: rwmutex.Create(safe...), + comparator: comparator, + } +} + +// NewAVLTreeFrom instantiates an AVL tree with the custom key comparator and data map. +// The parameter `safe` is used to specify whether using tree in concurrent-safety, +// which is false in default. +func NewAVLTreeFrom(comparator func(v1, v2 interface{}) int, data map[interface{}]interface{}, safe ...bool) *AVLTree { + tree := NewAVLTree(comparator, safe...) + for k, v := range data { + tree.put(k, v, nil, &tree.root) + } + return tree +} + +// Clone returns a new tree with a copy of current tree. +func (tree *AVLTree) Clone() *AVLTree { + newTree := NewAVLTree(tree.comparator, tree.mu.IsSafe()) + newTree.Sets(tree.Map()) + return newTree +} + +// Set inserts node into the tree. +func (tree *AVLTree) Set(key interface{}, value interface{}) { + tree.mu.Lock() + defer tree.mu.Unlock() + tree.put(key, value, nil, &tree.root) +} + +// Sets batch sets key-values to the tree. +func (tree *AVLTree) Sets(data map[interface{}]interface{}) { + tree.mu.Lock() + defer tree.mu.Unlock() + for key, value := range data { + tree.put(key, value, nil, &tree.root) + } +} + +// Search searches the tree with given `key`. +// Second return parameter `found` is true if key was found, otherwise false. +func (tree *AVLTree) Search(key interface{}) (value interface{}, found bool) { + tree.mu.RLock() + defer tree.mu.RUnlock() + if node, found := tree.doSearch(key); found { + return node.Value, true + } + return nil, false +} + +// doSearch searches the tree with given `key`. +// Second return parameter `found` is true if key was found, otherwise false. +func (tree *AVLTree) doSearch(key interface{}) (node *AVLTreeNode, found bool) { + node = tree.root + for node != nil { + cmp := tree.getComparator()(key, node.Key) + switch { + case cmp == 0: + return node, true + case cmp < 0: + node = node.children[0] + case cmp > 0: + node = node.children[1] + } + } + return nil, false +} + +// Get searches the node in the tree by `key` and returns its value or nil if key is not found in tree. +func (tree *AVLTree) Get(key interface{}) (value interface{}) { + value, _ = tree.Search(key) + return +} + +// doSetWithLockCheck checks whether value of the key exists with mutex.Lock, +// if not exists, set value to the map with given `key`, +// or else just return the existing value. +// +// When setting value, if `value` is type of , +// it will be executed with mutex.Lock of the hash map, +// and its return value will be set to the map with `key`. +// +// It returns value with given `key`. +func (tree *AVLTree) doSetWithLockCheck(key interface{}, value interface{}) interface{} { + tree.mu.Lock() + defer tree.mu.Unlock() + if node, found := tree.doSearch(key); found { + return node.Value + } + if f, ok := value.(func() interface{}); ok { + value = f() + } + if value != nil { + tree.put(key, value, nil, &tree.root) + } + return value +} + +// GetOrSet returns the value by key, +// or sets value with given `value` if it does not exist and then returns this value. +func (tree *AVLTree) GetOrSet(key interface{}, value interface{}) interface{} { + if v, ok := tree.Search(key); !ok { + return tree.doSetWithLockCheck(key, value) + } else { + return v + } +} + +// GetOrSetFunc returns the value by key, +// or sets value with returned value of callback function `f` if it does not exist +// and then returns this value. +func (tree *AVLTree) GetOrSetFunc(key interface{}, f func() interface{}) interface{} { + if v, ok := tree.Search(key); !ok { + return tree.doSetWithLockCheck(key, f()) + } else { + return v + } +} + +// GetOrSetFuncLock returns the value by key, +// or sets value with returned value of callback function `f` if it does not exist +// and then returns this value. +// +// GetOrSetFuncLock differs with GetOrSetFunc function is that it executes function `f` +// with mutex.Lock of the hash map. +func (tree *AVLTree) GetOrSetFuncLock(key interface{}, f func() interface{}) interface{} { + if v, ok := tree.Search(key); !ok { + return tree.doSetWithLockCheck(key, f) + } else { + return v + } +} + +// GetVar returns a gvar.Var with the value by given `key`. +// The returned gvar.Var is un-concurrent safe. +func (tree *AVLTree) GetVar(key interface{}) *gvar.Var { + return gvar.New(tree.Get(key)) +} + +// GetVarOrSet returns a gvar.Var with result from GetVarOrSet. +// The returned gvar.Var is un-concurrent safe. +func (tree *AVLTree) GetVarOrSet(key interface{}, value interface{}) *gvar.Var { + return gvar.New(tree.GetOrSet(key, value)) +} + +// GetVarOrSetFunc returns a gvar.Var with result from GetOrSetFunc. +// The returned gvar.Var is un-concurrent safe. +func (tree *AVLTree) GetVarOrSetFunc(key interface{}, f func() interface{}) *gvar.Var { + return gvar.New(tree.GetOrSetFunc(key, f)) +} + +// GetVarOrSetFuncLock returns a gvar.Var with result from GetOrSetFuncLock. +// The returned gvar.Var is un-concurrent safe. +func (tree *AVLTree) GetVarOrSetFuncLock(key interface{}, f func() interface{}) *gvar.Var { + return gvar.New(tree.GetOrSetFuncLock(key, f)) +} + +// SetIfNotExist sets `value` to the map if the `key` does not exist, and then returns true. +// It returns false if `key` exists, and `value` would be ignored. +func (tree *AVLTree) SetIfNotExist(key interface{}, value interface{}) bool { + if !tree.Contains(key) { + tree.doSetWithLockCheck(key, value) + return true + } + return false +} + +// SetIfNotExistFunc sets value with return value of callback function `f`, and then returns true. +// It returns false if `key` exists, and `value` would be ignored. +func (tree *AVLTree) SetIfNotExistFunc(key interface{}, f func() interface{}) bool { + if !tree.Contains(key) { + tree.doSetWithLockCheck(key, f()) + return true + } + return false +} + +// SetIfNotExistFuncLock sets value with return value of callback function `f`, and then returns true. +// It returns false if `key` exists, and `value` would be ignored. +// +// SetIfNotExistFuncLock differs with SetIfNotExistFunc function is that +// it executes function `f` with mutex.Lock of the hash map. +func (tree *AVLTree) SetIfNotExistFuncLock(key interface{}, f func() interface{}) bool { + if !tree.Contains(key) { + tree.doSetWithLockCheck(key, f) + return true + } + return false +} + +// Contains checks whether `key` exists in the tree. +func (tree *AVLTree) Contains(key interface{}) bool { + _, ok := tree.Search(key) + return ok +} + +// Remove removes the node from the tree by key. +// Key should adhere to the comparator's type assertion, otherwise method panics. +func (tree *AVLTree) Remove(key interface{}) (value interface{}) { + tree.mu.Lock() + defer tree.mu.Unlock() + value, _ = tree.remove(key, &tree.root) + return +} + +// Removes batch deletes values of the tree by `keys`. +func (tree *AVLTree) Removes(keys []interface{}) { + tree.mu.Lock() + defer tree.mu.Unlock() + for _, key := range keys { + tree.remove(key, &tree.root) + } +} + +// IsEmpty returns true if tree does not contain any nodes. +func (tree *AVLTree) IsEmpty() bool { + return tree.Size() == 0 +} + +// Size returns number of nodes in the tree. +func (tree *AVLTree) Size() int { + tree.mu.RLock() + defer tree.mu.RUnlock() + return tree.size +} + +// Keys returns all keys in asc order. +func (tree *AVLTree) Keys() []interface{} { + keys := make([]interface{}, tree.Size()) + index := 0 + tree.IteratorAsc(func(key, value interface{}) bool { + keys[index] = key + index++ + return true + }) + return keys +} + +// Values returns all values in asc order based on the key. +func (tree *AVLTree) Values() []interface{} { + values := make([]interface{}, tree.Size()) + index := 0 + tree.IteratorAsc(func(key, value interface{}) bool { + values[index] = value + index++ + return true + }) + return values +} + +// Left returns the minimum element of the AVL tree +// or nil if the tree is empty. +func (tree *AVLTree) Left() *AVLTreeNode { + tree.mu.RLock() + defer tree.mu.RUnlock() + node := tree.bottom(0) + if tree.mu.IsSafe() { + return &AVLTreeNode{ + Key: node.Key, + Value: node.Value, + } + } + return node +} + +// Right returns the maximum element of the AVL tree +// or nil if the tree is empty. +func (tree *AVLTree) Right() *AVLTreeNode { + tree.mu.RLock() + defer tree.mu.RUnlock() + node := tree.bottom(1) + if tree.mu.IsSafe() { + return &AVLTreeNode{ + Key: node.Key, + Value: node.Value, + } + } + return node +} + +// Floor Finds floor node of the input key, return the floor node or nil if no floor node is found. +// Second return parameter is true if floor was found, otherwise false. +// +// Floor node is defined as the largest node that is smaller than or equal to the given node. +// A floor node may not be found, either because the tree is empty, or because +// all nodes in the tree is larger than the given node. +// +// Key should adhere to the comparator's type assertion, otherwise method panics. +func (tree *AVLTree) Floor(key interface{}) (floor *AVLTreeNode, found bool) { + tree.mu.RLock() + defer tree.mu.RUnlock() + n := tree.root + for n != nil { + c := tree.getComparator()(key, n.Key) + switch { + case c == 0: + return n, true + case c < 0: + n = n.children[0] + case c > 0: + floor, found = n, true + n = n.children[1] + } + } + if found { + return + } + return nil, false +} + +// Ceiling finds ceiling node of the input key, return the ceiling node or nil if no ceiling node is found. +// Second return parameter is true if ceiling was found, otherwise false. +// +// Ceiling node is defined as the smallest node that is larger than or equal to the given node. +// A ceiling node may not be found, either because the tree is empty, or because +// all nodes in the tree is smaller than the given node. +// +// Key should adhere to the comparator's type assertion, otherwise method panics. +func (tree *AVLTree) Ceiling(key interface{}) (ceiling *AVLTreeNode, found bool) { + tree.mu.RLock() + defer tree.mu.RUnlock() + n := tree.root + for n != nil { + c := tree.getComparator()(key, n.Key) + switch { + case c == 0: + return n, true + case c > 0: + n = n.children[1] + case c < 0: + ceiling, found = n, true + n = n.children[0] + } + } + if found { + return + } + return nil, false +} + +// Clear removes all nodes from the tree. +func (tree *AVLTree) Clear() { + tree.mu.Lock() + defer tree.mu.Unlock() + tree.root = nil + tree.size = 0 +} + +// Replace the data of the tree with given `data`. +func (tree *AVLTree) Replace(data map[interface{}]interface{}) { + tree.mu.Lock() + defer tree.mu.Unlock() + tree.root = nil + tree.size = 0 + for key, value := range data { + tree.put(key, value, nil, &tree.root) + } +} + +// String returns a string representation of container +func (tree *AVLTree) String() string { + if tree == nil { + return "" + } + tree.mu.RLock() + defer tree.mu.RUnlock() + str := "" + if tree.size != 0 { + output(tree.root, "", true, &str) + } + return str +} + +// Print prints the tree to stdout. +func (tree *AVLTree) Print() { + fmt.Println(tree.String()) +} + +// Map returns all key-value items as map. +func (tree *AVLTree) Map() map[interface{}]interface{} { + m := make(map[interface{}]interface{}, tree.Size()) + tree.IteratorAsc(func(key, value interface{}) bool { + m[key] = value + return true + }) + return m +} + +// MapStrAny returns all key-value items as map[string]interface{}. +func (tree *AVLTree) MapStrAny() map[string]interface{} { + m := make(map[string]interface{}, tree.Size()) + tree.IteratorAsc(func(key, value interface{}) bool { + m[gconv.String(key)] = value + return true + }) + return m +} + +// Flip exchanges key-value of the tree to value-key. +// Note that you should guarantee the value is the same type as key, +// or else the comparator would panic. +// +// If the type of value is different with key, you pass the new `comparator`. +func (tree *AVLTree) Flip(comparator ...func(v1, v2 interface{}) int) { + t := (*AVLTree)(nil) + if len(comparator) > 0 { + t = NewAVLTree(comparator[0], tree.mu.IsSafe()) + } else { + t = NewAVLTree(tree.comparator, tree.mu.IsSafe()) + } + tree.IteratorAsc(func(key, value interface{}) bool { + t.put(value, key, nil, &t.root) + return true + }) + tree.mu.Lock() + tree.root = t.root + tree.size = t.size + tree.mu.Unlock() +} + +// Iterator is alias of IteratorAsc. +func (tree *AVLTree) Iterator(f func(key, value interface{}) bool) { + tree.IteratorAsc(f) +} + +// IteratorFrom is alias of IteratorAscFrom. +func (tree *AVLTree) IteratorFrom(key interface{}, match bool, f func(key, value interface{}) bool) { + tree.IteratorAscFrom(key, match, f) +} + +// IteratorAsc iterates the tree readonly in ascending order with given callback function `f`. +// If `f` returns true, then it continues iterating; or false to stop. +func (tree *AVLTree) IteratorAsc(f func(key, value interface{}) bool) { + tree.mu.RLock() + defer tree.mu.RUnlock() + tree.doIteratorAsc(tree.bottom(0), f) +} + +// IteratorAscFrom iterates the tree readonly in ascending order with given callback function `f`. +// The parameter `key` specifies the start entry for iterating. The `match` specifies whether +// starting iterating if the `key` is fully matched, or else using index searching iterating. +// If `f` returns true, then it continues iterating; or false to stop. +func (tree *AVLTree) IteratorAscFrom(key interface{}, match bool, f func(key, value interface{}) bool) { + tree.mu.RLock() + defer tree.mu.RUnlock() + node, found := tree.doSearch(key) + if match { + if found { + tree.doIteratorAsc(node, f) + } + } else { + tree.doIteratorAsc(node, f) + } +} + +func (tree *AVLTree) doIteratorAsc(node *AVLTreeNode, f func(key, value interface{}) bool) { + for node != nil { + if !f(node.Key, node.Value) { + return + } + node = node.Next() + } +} + +// IteratorDesc iterates the tree readonly in descending order with given callback function `f`. +// If `f` returns true, then it continues iterating; or false to stop. +func (tree *AVLTree) IteratorDesc(f func(key, value interface{}) bool) { + tree.mu.RLock() + defer tree.mu.RUnlock() + tree.doIteratorDesc(tree.bottom(1), f) +} + +// IteratorDescFrom iterates the tree readonly in descending order with given callback function `f`. +// The parameter `key` specifies the start entry for iterating. The `match` specifies whether +// starting iterating if the `key` is fully matched, or else using index searching iterating. +// If `f` returns true, then it continues iterating; or false to stop. +func (tree *AVLTree) IteratorDescFrom(key interface{}, match bool, f func(key, value interface{}) bool) { + tree.mu.RLock() + defer tree.mu.RUnlock() + node, found := tree.doSearch(key) + if match { + if found { + tree.doIteratorDesc(node, f) + } + } else { + tree.doIteratorDesc(node, f) + } +} + +func (tree *AVLTree) doIteratorDesc(node *AVLTreeNode, f func(key, value interface{}) bool) { + for node != nil { + if !f(node.Key, node.Value) { + return + } + node = node.Prev() + } +} + +func (tree *AVLTree) put(key interface{}, value interface{}, p *AVLTreeNode, qp **AVLTreeNode) bool { + q := *qp + if q == nil { + tree.size++ + *qp = &AVLTreeNode{Key: key, Value: value, parent: p} + return true + } + + c := tree.getComparator()(key, q.Key) + if c == 0 { + q.Key = key + q.Value = value + return false + } + + if c < 0 { + c = -1 + } else { + c = 1 + } + a := (c + 1) / 2 + if tree.put(key, value, q, &q.children[a]) { + return putFix(int8(c), qp) + } + return false +} + +func (tree *AVLTree) remove(key interface{}, qp **AVLTreeNode) (value interface{}, fix bool) { + q := *qp + if q == nil { + return nil, false + } + + c := tree.getComparator()(key, q.Key) + if c == 0 { + tree.size-- + value = q.Value + fix = true + if q.children[1] == nil { + if q.children[0] != nil { + q.children[0].parent = q.parent + } + *qp = q.children[0] + return + } + if removeMin(&q.children[1], &q.Key, &q.Value) { + return value, removeFix(-1, qp) + } + return + } + + if c < 0 { + c = -1 + } else { + c = 1 + } + a := (c + 1) / 2 + value, fix = tree.remove(key, &q.children[a]) + if fix { + return value, removeFix(int8(-c), qp) + } + return value, false +} + +func removeMin(qp **AVLTreeNode, minKey *interface{}, minVal *interface{}) bool { + q := *qp + if q.children[0] == nil { + *minKey = q.Key + *minVal = q.Value + if q.children[1] != nil { + q.children[1].parent = q.parent + } + *qp = q.children[1] + return true + } + fix := removeMin(&q.children[0], minKey, minVal) + if fix { + return removeFix(1, qp) + } + return false +} + +func putFix(c int8, t **AVLTreeNode) bool { + s := *t + if s.b == 0 { + s.b = c + return true + } + + if s.b == -c { + s.b = 0 + return false + } + + if s.children[(c+1)/2].b == c { + s = singleRotate(c, s) + } else { + s = doubleRotate(c, s) + } + *t = s + return false +} + +func removeFix(c int8, t **AVLTreeNode) bool { + s := *t + if s.b == 0 { + s.b = c + return false + } + + if s.b == -c { + s.b = 0 + return true + } + + a := (c + 1) / 2 + if s.children[a].b == 0 { + s = rotate(c, s) + s.b = -c + *t = s + return false + } + + if s.children[a].b == c { + s = singleRotate(c, s) + } else { + s = doubleRotate(c, s) + } + *t = s + return true +} + +func singleRotate(c int8, s *AVLTreeNode) *AVLTreeNode { + s.b = 0 + s = rotate(c, s) + s.b = 0 + return s +} + +func doubleRotate(c int8, s *AVLTreeNode) *AVLTreeNode { + a := (c + 1) / 2 + r := s.children[a] + s.children[a] = rotate(-c, s.children[a]) + p := rotate(c, s) + + switch { + default: + s.b = 0 + r.b = 0 + case p.b == c: + s.b = -c + r.b = 0 + case p.b == -c: + s.b = 0 + r.b = c + } + + p.b = 0 + return p +} + +func rotate(c int8, s *AVLTreeNode) *AVLTreeNode { + a := (c + 1) / 2 + r := s.children[a] + s.children[a] = r.children[a^1] + if s.children[a] != nil { + s.children[a].parent = s + } + r.children[a^1] = s + r.parent = s.parent + s.parent = r + return r +} + +func (tree *AVLTree) bottom(d int) *AVLTreeNode { + n := tree.root + if n == nil { + return nil + } + + for c := n.children[d]; c != nil; c = n.children[d] { + n = c + } + return n +} + +// Prev returns the previous element in an inorder +// walk of the AVL tree. +func (node *AVLTreeNode) Prev() *AVLTreeNode { + return node.walk1(0) +} + +// Next returns the next element in an inorder +// walk of the AVL tree. +func (node *AVLTreeNode) Next() *AVLTreeNode { + return node.walk1(1) +} + +func (node *AVLTreeNode) walk1(a int) *AVLTreeNode { + if node == nil { + return nil + } + n := node + if n.children[a] != nil { + n = n.children[a] + for n.children[a^1] != nil { + n = n.children[a^1] + } + return n + } + + p := n.parent + for p != nil && p.children[a] == n { + n = p + p = p.parent + } + return p +} + +func output(node *AVLTreeNode, prefix string, isTail bool, str *string) { + if node.children[1] != nil { + newPrefix := prefix + if isTail { + newPrefix += "│ " + } else { + newPrefix += " " + } + output(node.children[1], newPrefix, false, str) + } + *str += prefix + if isTail { + *str += "└── " + } else { + *str += "┌── " + } + *str += fmt.Sprintf("%v\n", node.Key) + if node.children[0] != nil { + newPrefix := prefix + if isTail { + newPrefix += " " + } else { + newPrefix += "│ " + } + output(node.children[0], newPrefix, true, str) + } +} + +// MarshalJSON implements the interface MarshalJSON for json.Marshal. +func (tree AVLTree) MarshalJSON() (jsonBytes []byte, err error) { + if tree.root == nil { + return []byte("null"), nil + } + buffer := bytes.NewBuffer(nil) + buffer.WriteByte('{') + tree.Iterator(func(key, value interface{}) bool { + valueBytes, valueJsonErr := json.Marshal(value) + if valueJsonErr != nil { + err = valueJsonErr + return false + } + if buffer.Len() > 1 { + buffer.WriteByte(',') + } + buffer.WriteString(fmt.Sprintf(`"%v":%s`, key, valueBytes)) + return true + }) + buffer.WriteByte('}') + return buffer.Bytes(), nil +} + +// getComparator returns the comparator if it's previously set, +// or else it panics. +func (tree *AVLTree) getComparator() func(a, b interface{}) int { + if tree.comparator == nil { + panic("comparator is missing for tree") + } + return tree.comparator +} diff --git a/vendor/github.com/gogf/gf/v2/container/gtree/gtree_btree.go b/vendor/github.com/gogf/gf/v2/container/gtree/gtree_btree.go new file mode 100644 index 00000000..fd6c06ce --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gtree/gtree_btree.go @@ -0,0 +1,979 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gtree + +import ( + "bytes" + "context" + "fmt" + "strings" + + "github.com/gogf/gf/v2/container/gvar" + "github.com/gogf/gf/v2/internal/intlog" + "github.com/gogf/gf/v2/internal/json" + "github.com/gogf/gf/v2/internal/rwmutex" + "github.com/gogf/gf/v2/util/gconv" +) + +// BTree holds elements of the B-tree. +type BTree struct { + mu rwmutex.RWMutex + root *BTreeNode + comparator func(v1, v2 interface{}) int + size int // Total number of keys in the tree + m int // order (maximum number of children) +} + +// BTreeNode is a single element within the tree. +type BTreeNode struct { + Parent *BTreeNode + Entries []*BTreeEntry // Contained keys in node + Children []*BTreeNode // Children nodes +} + +// BTreeEntry represents the key-value pair contained within nodes. +type BTreeEntry struct { + Key interface{} + Value interface{} +} + +// NewBTree instantiates a B-tree with `m` (maximum number of children) and a custom key comparator. +// The parameter `safe` is used to specify whether using tree in concurrent-safety, +// which is false in default. +// Note that the `m` must be greater or equal than 3, or else it panics. +func NewBTree(m int, comparator func(v1, v2 interface{}) int, safe ...bool) *BTree { + if m < 3 { + panic("Invalid order, should be at least 3") + } + return &BTree{ + comparator: comparator, + mu: rwmutex.Create(safe...), + m: m, + } +} + +// NewBTreeFrom instantiates a B-tree with `m` (maximum number of children), a custom key comparator and data map. +// The parameter `safe` is used to specify whether using tree in concurrent-safety, +// which is false in default. +func NewBTreeFrom(m int, comparator func(v1, v2 interface{}) int, data map[interface{}]interface{}, safe ...bool) *BTree { + tree := NewBTree(m, comparator, safe...) + for k, v := range data { + tree.doSet(k, v) + } + return tree +} + +// Clone returns a new tree with a copy of current tree. +func (tree *BTree) Clone() *BTree { + newTree := NewBTree(tree.m, tree.comparator, tree.mu.IsSafe()) + newTree.Sets(tree.Map()) + return newTree +} + +// Set inserts key-value item into the tree. +func (tree *BTree) Set(key interface{}, value interface{}) { + tree.mu.Lock() + defer tree.mu.Unlock() + tree.doSet(key, value) +} + +// doSet inserts key-value pair node into the tree. +// If key already exists, then its value is updated with the new value. +func (tree *BTree) doSet(key interface{}, value interface{}) { + entry := &BTreeEntry{Key: key, Value: value} + if tree.root == nil { + tree.root = &BTreeNode{Entries: []*BTreeEntry{entry}, Children: []*BTreeNode{}} + tree.size++ + return + } + + if tree.insert(tree.root, entry) { + tree.size++ + } +} + +// Sets batch sets key-values to the tree. +func (tree *BTree) Sets(data map[interface{}]interface{}) { + tree.mu.Lock() + defer tree.mu.Unlock() + for k, v := range data { + tree.doSet(k, v) + } +} + +// Get searches the node in the tree by `key` and returns its value or nil if key is not found in tree. +func (tree *BTree) Get(key interface{}) (value interface{}) { + value, _ = tree.Search(key) + return +} + +// doSetWithLockCheck checks whether value of the key exists with mutex.Lock, +// if not exists, set value to the map with given `key`, +// or else just return the existing value. +// +// When setting value, if `value` is type of , +// it will be executed with mutex.Lock of the hash map, +// and its return value will be set to the map with `key`. +// +// It returns value with given `key`. +func (tree *BTree) doSetWithLockCheck(key interface{}, value interface{}) interface{} { + tree.mu.Lock() + defer tree.mu.Unlock() + if entry := tree.doSearch(key); entry != nil { + return entry.Value + } + if f, ok := value.(func() interface{}); ok { + value = f() + } + if value != nil { + tree.doSet(key, value) + } + return value +} + +// GetOrSet returns the value by key, +// or sets value with given `value` if it does not exist and then returns this value. +func (tree *BTree) GetOrSet(key interface{}, value interface{}) interface{} { + if v, ok := tree.Search(key); !ok { + return tree.doSetWithLockCheck(key, value) + } else { + return v + } +} + +// GetOrSetFunc returns the value by key, +// or sets value with returned value of callback function `f` if it does not exist +// and then returns this value. +func (tree *BTree) GetOrSetFunc(key interface{}, f func() interface{}) interface{} { + if v, ok := tree.Search(key); !ok { + return tree.doSetWithLockCheck(key, f()) + } else { + return v + } +} + +// GetOrSetFuncLock returns the value by key, +// or sets value with returned value of callback function `f` if it does not exist +// and then returns this value. +// +// GetOrSetFuncLock differs with GetOrSetFunc function is that it executes function `f` +// with mutex.Lock of the hash map. +func (tree *BTree) GetOrSetFuncLock(key interface{}, f func() interface{}) interface{} { + if v, ok := tree.Search(key); !ok { + return tree.doSetWithLockCheck(key, f) + } else { + return v + } +} + +// GetVar returns a gvar.Var with the value by given `key`. +// The returned gvar.Var is un-concurrent safe. +func (tree *BTree) GetVar(key interface{}) *gvar.Var { + return gvar.New(tree.Get(key)) +} + +// GetVarOrSet returns a gvar.Var with result from GetVarOrSet. +// The returned gvar.Var is un-concurrent safe. +func (tree *BTree) GetVarOrSet(key interface{}, value interface{}) *gvar.Var { + return gvar.New(tree.GetOrSet(key, value)) +} + +// GetVarOrSetFunc returns a gvar.Var with result from GetOrSetFunc. +// The returned gvar.Var is un-concurrent safe. +func (tree *BTree) GetVarOrSetFunc(key interface{}, f func() interface{}) *gvar.Var { + return gvar.New(tree.GetOrSetFunc(key, f)) +} + +// GetVarOrSetFuncLock returns a gvar.Var with result from GetOrSetFuncLock. +// The returned gvar.Var is un-concurrent safe. +func (tree *BTree) GetVarOrSetFuncLock(key interface{}, f func() interface{}) *gvar.Var { + return gvar.New(tree.GetOrSetFuncLock(key, f)) +} + +// SetIfNotExist sets `value` to the map if the `key` does not exist, and then returns true. +// It returns false if `key` exists, and `value` would be ignored. +func (tree *BTree) SetIfNotExist(key interface{}, value interface{}) bool { + if !tree.Contains(key) { + tree.doSetWithLockCheck(key, value) + return true + } + return false +} + +// SetIfNotExistFunc sets value with return value of callback function `f`, and then returns true. +// It returns false if `key` exists, and `value` would be ignored. +func (tree *BTree) SetIfNotExistFunc(key interface{}, f func() interface{}) bool { + if !tree.Contains(key) { + tree.doSetWithLockCheck(key, f()) + return true + } + return false +} + +// SetIfNotExistFuncLock sets value with return value of callback function `f`, and then returns true. +// It returns false if `key` exists, and `value` would be ignored. +// +// SetIfNotExistFuncLock differs with SetIfNotExistFunc function is that +// it executes function `f` with mutex.Lock of the hash map. +func (tree *BTree) SetIfNotExistFuncLock(key interface{}, f func() interface{}) bool { + if !tree.Contains(key) { + tree.doSetWithLockCheck(key, f) + return true + } + return false +} + +// Contains checks whether `key` exists in the tree. +func (tree *BTree) Contains(key interface{}) bool { + _, ok := tree.Search(key) + return ok +} + +// doRemove removes the node from the tree by key. +// Key should adhere to the comparator's type assertion, otherwise method panics. +func (tree *BTree) doRemove(key interface{}) (value interface{}) { + node, index, found := tree.searchRecursively(tree.root, key) + if found { + value = node.Entries[index].Value + tree.delete(node, index) + tree.size-- + } + return +} + +// Remove removes the node from the tree by `key`. +func (tree *BTree) Remove(key interface{}) (value interface{}) { + tree.mu.Lock() + defer tree.mu.Unlock() + return tree.doRemove(key) +} + +// Removes batch deletes values of the tree by `keys`. +func (tree *BTree) Removes(keys []interface{}) { + tree.mu.Lock() + defer tree.mu.Unlock() + for _, key := range keys { + tree.doRemove(key) + } +} + +// IsEmpty returns true if tree does not contain any nodes +func (tree *BTree) IsEmpty() bool { + return tree.Size() == 0 +} + +// Size returns number of nodes in the tree. +func (tree *BTree) Size() int { + tree.mu.RLock() + defer tree.mu.RUnlock() + return tree.size +} + +// Keys returns all keys in asc order. +func (tree *BTree) Keys() []interface{} { + keys := make([]interface{}, tree.Size()) + index := 0 + tree.IteratorAsc(func(key, value interface{}) bool { + keys[index] = key + index++ + return true + }) + return keys +} + +// Values returns all values in asc order based on the key. +func (tree *BTree) Values() []interface{} { + values := make([]interface{}, tree.Size()) + index := 0 + tree.IteratorAsc(func(key, value interface{}) bool { + values[index] = value + index++ + return true + }) + return values +} + +// Map returns all key-value items as map. +func (tree *BTree) Map() map[interface{}]interface{} { + m := make(map[interface{}]interface{}, tree.Size()) + tree.IteratorAsc(func(key, value interface{}) bool { + m[key] = value + return true + }) + return m +} + +// MapStrAny returns all key-value items as map[string]interface{}. +func (tree *BTree) MapStrAny() map[string]interface{} { + m := make(map[string]interface{}, tree.Size()) + tree.IteratorAsc(func(key, value interface{}) bool { + m[gconv.String(key)] = value + return true + }) + return m +} + +// Clear removes all nodes from the tree. +func (tree *BTree) Clear() { + tree.mu.Lock() + defer tree.mu.Unlock() + tree.root = nil + tree.size = 0 +} + +// Replace the data of the tree with given `data`. +func (tree *BTree) Replace(data map[interface{}]interface{}) { + tree.mu.Lock() + defer tree.mu.Unlock() + tree.root = nil + tree.size = 0 + for k, v := range data { + tree.doSet(k, v) + } +} + +// Height returns the height of the tree. +func (tree *BTree) Height() int { + tree.mu.RLock() + defer tree.mu.RUnlock() + return tree.root.height() +} + +// Left returns the left-most (min) entry or nil if tree is empty. +func (tree *BTree) Left() *BTreeEntry { + tree.mu.RLock() + defer tree.mu.RUnlock() + node := tree.left(tree.root) + if node != nil { + return node.Entries[0] + } + return nil +} + +// Right returns the right-most (max) entry or nil if tree is empty. +func (tree *BTree) Right() *BTreeEntry { + tree.mu.RLock() + defer tree.mu.RUnlock() + node := tree.right(tree.root) + if node != nil { + return node.Entries[len(node.Entries)-1] + } + return nil +} + +// String returns a string representation of container (for debugging purposes) +func (tree *BTree) String() string { + if tree == nil { + return "" + } + tree.mu.RLock() + defer tree.mu.RUnlock() + var buffer bytes.Buffer + if tree.size != 0 { + tree.output(&buffer, tree.root, 0, true) + } + return buffer.String() +} + +// Search searches the tree with given `key`. +// Second return parameter `found` is true if key was found, otherwise false. +func (tree *BTree) Search(key interface{}) (value interface{}, found bool) { + tree.mu.RLock() + defer tree.mu.RUnlock() + node, index, found := tree.searchRecursively(tree.root, key) + if found { + return node.Entries[index].Value, true + } + return nil, false +} + +// Search searches the tree with given `key` without mutex. +// It returns the entry if found or otherwise nil. +func (tree *BTree) doSearch(key interface{}) *BTreeEntry { + node, index, found := tree.searchRecursively(tree.root, key) + if found { + return node.Entries[index] + } + return nil +} + +// Print prints the tree to stdout. +func (tree *BTree) Print() { + fmt.Println(tree.String()) +} + +// Iterator is alias of IteratorAsc. +func (tree *BTree) Iterator(f func(key, value interface{}) bool) { + tree.IteratorAsc(f) +} + +// IteratorFrom is alias of IteratorAscFrom. +func (tree *BTree) IteratorFrom(key interface{}, match bool, f func(key, value interface{}) bool) { + tree.IteratorAscFrom(key, match, f) +} + +// IteratorAsc iterates the tree readonly in ascending order with given callback function `f`. +// If `f` returns true, then it continues iterating; or false to stop. +func (tree *BTree) IteratorAsc(f func(key, value interface{}) bool) { + tree.mu.RLock() + defer tree.mu.RUnlock() + node := tree.left(tree.root) + if node == nil { + return + } + tree.doIteratorAsc(node, node.Entries[0], 0, f) +} + +// IteratorAscFrom iterates the tree readonly in ascending order with given callback function `f`. +// The parameter `key` specifies the start entry for iterating. The `match` specifies whether +// starting iterating if the `key` is fully matched, or else using index searching iterating. +// If `f` returns true, then it continues iterating; or false to stop. +func (tree *BTree) IteratorAscFrom(key interface{}, match bool, f func(key, value interface{}) bool) { + tree.mu.RLock() + defer tree.mu.RUnlock() + node, index, found := tree.searchRecursively(tree.root, key) + if match { + if found { + tree.doIteratorAsc(node, node.Entries[index], index, f) + } + } else { + if index >= 0 && index < len(node.Entries) { + tree.doIteratorAsc(node, node.Entries[index], index, f) + } + } +} + +func (tree *BTree) doIteratorAsc(node *BTreeNode, entry *BTreeEntry, index int, f func(key, value interface{}) bool) { + first := true +loop: + if entry == nil { + return + } + if !f(entry.Key, entry.Value) { + return + } + // Find current entry position in current node + if !first { + index, _ = tree.search(node, entry.Key) + } else { + first = false + } + // Try to go down to the child right of the current entry + if index+1 < len(node.Children) { + node = node.Children[index+1] + // Try to go down to the child left of the current node + for len(node.Children) > 0 { + node = node.Children[0] + } + // Return the left-most entry + entry = node.Entries[0] + goto loop + } + // Above assures that we have reached a leaf node, so return the next entry in current node (if any) + if index+1 < len(node.Entries) { + entry = node.Entries[index+1] + goto loop + } + // Reached leaf node and there are no entries to the right of the current entry, so go up to the parent + for node.Parent != nil { + node = node.Parent + // Find next entry position in current node (note: search returns the first equal or bigger than entry) + index, _ = tree.search(node, entry.Key) + // Check that there is a next entry position in current node + if index < len(node.Entries) { + entry = node.Entries[index] + goto loop + } + } +} + +// IteratorDesc iterates the tree readonly in descending order with given callback function `f`. +// If `f` returns true, then it continues iterating; or false to stop. +func (tree *BTree) IteratorDesc(f func(key, value interface{}) bool) { + tree.mu.RLock() + defer tree.mu.RUnlock() + node := tree.right(tree.root) + if node == nil { + return + } + index := len(node.Entries) - 1 + entry := node.Entries[index] + tree.doIteratorDesc(node, entry, index, f) +} + +// IteratorDescFrom iterates the tree readonly in descending order with given callback function `f`. +// The parameter `key` specifies the start entry for iterating. The `match` specifies whether +// starting iterating if the `key` is fully matched, or else using index searching iterating. +// If `f` returns true, then it continues iterating; or false to stop. +func (tree *BTree) IteratorDescFrom(key interface{}, match bool, f func(key, value interface{}) bool) { + tree.mu.RLock() + defer tree.mu.RUnlock() + node, index, found := tree.searchRecursively(tree.root, key) + if match { + if found { + tree.doIteratorDesc(node, node.Entries[index], index, f) + } + } else { + if index >= 0 && index < len(node.Entries) { + tree.doIteratorDesc(node, node.Entries[index], index, f) + } + } +} + +// IteratorDesc iterates the tree readonly in descending order with given callback function `f`. +// If `f` returns true, then it continues iterating; or false to stop. +func (tree *BTree) doIteratorDesc(node *BTreeNode, entry *BTreeEntry, index int, f func(key, value interface{}) bool) { + first := true +loop: + if entry == nil { + return + } + if !f(entry.Key, entry.Value) { + return + } + // Find current entry position in current node + if !first { + index, _ = tree.search(node, entry.Key) + } else { + first = false + } + // Try to go down to the child left of the current entry + if index < len(node.Children) { + node = node.Children[index] + // Try to go down to the child right of the current node + for len(node.Children) > 0 { + node = node.Children[len(node.Children)-1] + } + // Return the right-most entry + entry = node.Entries[len(node.Entries)-1] + goto loop + } + // Above assures that we have reached a leaf node, so return the previous entry in current node (if any) + if index-1 >= 0 { + entry = node.Entries[index-1] + goto loop + } + + // Reached leaf node and there are no entries to the left of the current entry, so go up to the parent + for node.Parent != nil { + node = node.Parent + // Find previous entry position in current node (note: search returns the first equal or bigger than entry) + index, _ = tree.search(node, entry.Key) + // Check that there is a previous entry position in current node + if index-1 >= 0 { + entry = node.Entries[index-1] + goto loop + } + } +} + +func (tree *BTree) output(buffer *bytes.Buffer, node *BTreeNode, level int, isTail bool) { + for e := 0; e < len(node.Entries)+1; e++ { + if e < len(node.Children) { + tree.output(buffer, node.Children[e], level+1, true) + } + if e < len(node.Entries) { + if _, err := buffer.WriteString(strings.Repeat(" ", level)); err != nil { + intlog.Errorf(context.TODO(), `%+v`, err) + } + if _, err := buffer.WriteString(fmt.Sprintf("%v", node.Entries[e].Key) + "\n"); err != nil { + intlog.Errorf(context.TODO(), `%+v`, err) + } + } + } +} + +func (node *BTreeNode) height() int { + h := 0 + n := node + for ; n != nil; n = n.Children[0] { + h++ + if len(n.Children) == 0 { + break + } + } + return h +} + +func (tree *BTree) isLeaf(node *BTreeNode) bool { + return len(node.Children) == 0 +} + +// func (tree *BTree) isFull(node *BTreeNode) bool { +// return len(node.Entries) == tree.maxEntries() +// } + +func (tree *BTree) shouldSplit(node *BTreeNode) bool { + return len(node.Entries) > tree.maxEntries() +} + +func (tree *BTree) maxChildren() int { + return tree.m +} + +func (tree *BTree) minChildren() int { + return (tree.m + 1) / 2 // ceil(m/2) +} + +func (tree *BTree) maxEntries() int { + return tree.maxChildren() - 1 +} + +func (tree *BTree) minEntries() int { + return tree.minChildren() - 1 +} + +func (tree *BTree) middle() int { + // "-1" to favor right nodes to have more keys when splitting + return (tree.m - 1) / 2 +} + +// search does search only within the single node among its entries +func (tree *BTree) search(node *BTreeNode, key interface{}) (index int, found bool) { + low, mid, high := 0, 0, len(node.Entries)-1 + for low <= high { + mid = low + (high-low)/2 + compare := tree.getComparator()(key, node.Entries[mid].Key) + switch { + case compare > 0: + low = mid + 1 + case compare < 0: + high = mid - 1 + case compare == 0: + return mid, true + } + } + return low, false +} + +// searchRecursively searches recursively down the tree starting at the startNode +func (tree *BTree) searchRecursively(startNode *BTreeNode, key interface{}) (node *BTreeNode, index int, found bool) { + if tree.size == 0 { + return nil, -1, false + } + node = startNode + for { + index, found = tree.search(node, key) + if found { + return node, index, true + } + if tree.isLeaf(node) { + return node, index, false + } + node = node.Children[index] + } +} + +func (tree *BTree) insert(node *BTreeNode, entry *BTreeEntry) (inserted bool) { + if tree.isLeaf(node) { + return tree.insertIntoLeaf(node, entry) + } + return tree.insertIntoInternal(node, entry) +} + +func (tree *BTree) insertIntoLeaf(node *BTreeNode, entry *BTreeEntry) (inserted bool) { + insertPosition, found := tree.search(node, entry.Key) + if found { + node.Entries[insertPosition] = entry + return false + } + // Insert entry's key in the middle of the node + node.Entries = append(node.Entries, nil) + copy(node.Entries[insertPosition+1:], node.Entries[insertPosition:]) + node.Entries[insertPosition] = entry + tree.split(node) + return true +} + +func (tree *BTree) insertIntoInternal(node *BTreeNode, entry *BTreeEntry) (inserted bool) { + insertPosition, found := tree.search(node, entry.Key) + if found { + node.Entries[insertPosition] = entry + return false + } + return tree.insert(node.Children[insertPosition], entry) +} + +func (tree *BTree) split(node *BTreeNode) { + if !tree.shouldSplit(node) { + return + } + + if node == tree.root { + tree.splitRoot() + return + } + + tree.splitNonRoot(node) +} + +func (tree *BTree) splitNonRoot(node *BTreeNode) { + middle := tree.middle() + parent := node.Parent + + left := &BTreeNode{Entries: append([]*BTreeEntry(nil), node.Entries[:middle]...), Parent: parent} + right := &BTreeNode{Entries: append([]*BTreeEntry(nil), node.Entries[middle+1:]...), Parent: parent} + + // Move children from the node to be split into left and right nodes + if !tree.isLeaf(node) { + left.Children = append([]*BTreeNode(nil), node.Children[:middle+1]...) + right.Children = append([]*BTreeNode(nil), node.Children[middle+1:]...) + setParent(left.Children, left) + setParent(right.Children, right) + } + + insertPosition, _ := tree.search(parent, node.Entries[middle].Key) + + // Insert middle key into parent + parent.Entries = append(parent.Entries, nil) + copy(parent.Entries[insertPosition+1:], parent.Entries[insertPosition:]) + parent.Entries[insertPosition] = node.Entries[middle] + + // Set child left of inserted key in parent to the created left node + parent.Children[insertPosition] = left + + // Set child right of inserted key in parent to the created right node + parent.Children = append(parent.Children, nil) + copy(parent.Children[insertPosition+2:], parent.Children[insertPosition+1:]) + parent.Children[insertPosition+1] = right + + tree.split(parent) +} + +func (tree *BTree) splitRoot() { + middle := tree.middle() + left := &BTreeNode{Entries: append([]*BTreeEntry(nil), tree.root.Entries[:middle]...)} + right := &BTreeNode{Entries: append([]*BTreeEntry(nil), tree.root.Entries[middle+1:]...)} + + // Move children from the node to be split into left and right nodes + if !tree.isLeaf(tree.root) { + left.Children = append([]*BTreeNode(nil), tree.root.Children[:middle+1]...) + right.Children = append([]*BTreeNode(nil), tree.root.Children[middle+1:]...) + setParent(left.Children, left) + setParent(right.Children, right) + } + + // Root is a node with one entry and two children (left and right) + newRoot := &BTreeNode{ + Entries: []*BTreeEntry{tree.root.Entries[middle]}, + Children: []*BTreeNode{left, right}, + } + + left.Parent = newRoot + right.Parent = newRoot + tree.root = newRoot +} + +func setParent(nodes []*BTreeNode, parent *BTreeNode) { + for _, node := range nodes { + node.Parent = parent + } +} + +func (tree *BTree) left(node *BTreeNode) *BTreeNode { + if tree.size == 0 { + return nil + } + current := node + for { + if tree.isLeaf(current) { + return current + } + current = current.Children[0] + } +} + +func (tree *BTree) right(node *BTreeNode) *BTreeNode { + if tree.size == 0 { + return nil + } + current := node + for { + if tree.isLeaf(current) { + return current + } + current = current.Children[len(current.Children)-1] + } +} + +// leftSibling returns the node's left sibling and child index (in parent) if it exists, otherwise (nil,-1) +// key is any of keys in node (could even be deleted). +func (tree *BTree) leftSibling(node *BTreeNode, key interface{}) (*BTreeNode, int) { + if node.Parent != nil { + index, _ := tree.search(node.Parent, key) + index-- + if index >= 0 && index < len(node.Parent.Children) { + return node.Parent.Children[index], index + } + } + return nil, -1 +} + +// rightSibling returns the node's right sibling and child index (in parent) if it exists, otherwise (nil,-1) +// key is any of keys in node (could even be deleted). +func (tree *BTree) rightSibling(node *BTreeNode, key interface{}) (*BTreeNode, int) { + if node.Parent != nil { + index, _ := tree.search(node.Parent, key) + index++ + if index < len(node.Parent.Children) { + return node.Parent.Children[index], index + } + } + return nil, -1 +} + +// delete deletes an entry in node at entries' index +// ref.: https://en.wikipedia.org/wiki/B-tree#Deletion +func (tree *BTree) delete(node *BTreeNode, index int) { + // deleting from a leaf node + if tree.isLeaf(node) { + deletedKey := node.Entries[index].Key + tree.deleteEntry(node, index) + tree.reBalance(node, deletedKey) + if len(tree.root.Entries) == 0 { + tree.root = nil + } + return + } + + // deleting from an internal node + leftLargestNode := tree.right(node.Children[index]) // largest node in the left sub-tree (assumed to exist) + leftLargestEntryIndex := len(leftLargestNode.Entries) - 1 + node.Entries[index] = leftLargestNode.Entries[leftLargestEntryIndex] + deletedKey := leftLargestNode.Entries[leftLargestEntryIndex].Key + tree.deleteEntry(leftLargestNode, leftLargestEntryIndex) + tree.reBalance(leftLargestNode, deletedKey) +} + +// reBalance reBalances the tree after deletion if necessary and returns true, otherwise false. +// Note that we first delete the entry and then call reBalance, thus the passed deleted key as reference. +func (tree *BTree) reBalance(node *BTreeNode, deletedKey interface{}) { + // check if re-balancing is needed + if node == nil || len(node.Entries) >= tree.minEntries() { + return + } + + // try to borrow from left sibling + leftSibling, leftSiblingIndex := tree.leftSibling(node, deletedKey) + if leftSibling != nil && len(leftSibling.Entries) > tree.minEntries() { + // rotate right + node.Entries = append([]*BTreeEntry{node.Parent.Entries[leftSiblingIndex]}, node.Entries...) // prepend parent's separator entry to node's entries + node.Parent.Entries[leftSiblingIndex] = leftSibling.Entries[len(leftSibling.Entries)-1] + tree.deleteEntry(leftSibling, len(leftSibling.Entries)-1) + if !tree.isLeaf(leftSibling) { + leftSiblingRightMostChild := leftSibling.Children[len(leftSibling.Children)-1] + leftSiblingRightMostChild.Parent = node + node.Children = append([]*BTreeNode{leftSiblingRightMostChild}, node.Children...) + tree.deleteChild(leftSibling, len(leftSibling.Children)-1) + } + return + } + + // try to borrow from right sibling + rightSibling, rightSiblingIndex := tree.rightSibling(node, deletedKey) + if rightSibling != nil && len(rightSibling.Entries) > tree.minEntries() { + // rotate left + node.Entries = append(node.Entries, node.Parent.Entries[rightSiblingIndex-1]) // append parent's separator entry to node's entries + node.Parent.Entries[rightSiblingIndex-1] = rightSibling.Entries[0] + tree.deleteEntry(rightSibling, 0) + if !tree.isLeaf(rightSibling) { + rightSiblingLeftMostChild := rightSibling.Children[0] + rightSiblingLeftMostChild.Parent = node + node.Children = append(node.Children, rightSiblingLeftMostChild) + tree.deleteChild(rightSibling, 0) + } + return + } + + // merge with siblings + if rightSibling != nil { + // merge with right sibling + node.Entries = append(node.Entries, node.Parent.Entries[rightSiblingIndex-1]) + node.Entries = append(node.Entries, rightSibling.Entries...) + deletedKey = node.Parent.Entries[rightSiblingIndex-1].Key + tree.deleteEntry(node.Parent, rightSiblingIndex-1) + tree.appendChildren(node.Parent.Children[rightSiblingIndex], node) + tree.deleteChild(node.Parent, rightSiblingIndex) + } else if leftSibling != nil { + // merge with left sibling + entries := append([]*BTreeEntry(nil), leftSibling.Entries...) + entries = append(entries, node.Parent.Entries[leftSiblingIndex]) + node.Entries = append(entries, node.Entries...) + deletedKey = node.Parent.Entries[leftSiblingIndex].Key + tree.deleteEntry(node.Parent, leftSiblingIndex) + tree.prependChildren(node.Parent.Children[leftSiblingIndex], node) + tree.deleteChild(node.Parent, leftSiblingIndex) + } + + // make the merged node the root if its parent was the root and the root is empty + if node.Parent == tree.root && len(tree.root.Entries) == 0 { + tree.root = node + node.Parent = nil + return + } + + // parent might be underflow, so try to reBalance if necessary + tree.reBalance(node.Parent, deletedKey) +} + +func (tree *BTree) prependChildren(fromNode *BTreeNode, toNode *BTreeNode) { + children := append([]*BTreeNode(nil), fromNode.Children...) + toNode.Children = append(children, toNode.Children...) + setParent(fromNode.Children, toNode) +} + +func (tree *BTree) appendChildren(fromNode *BTreeNode, toNode *BTreeNode) { + toNode.Children = append(toNode.Children, fromNode.Children...) + setParent(fromNode.Children, toNode) +} + +func (tree *BTree) deleteEntry(node *BTreeNode, index int) { + copy(node.Entries[index:], node.Entries[index+1:]) + node.Entries[len(node.Entries)-1] = nil + node.Entries = node.Entries[:len(node.Entries)-1] +} + +func (tree *BTree) deleteChild(node *BTreeNode, index int) { + if index >= len(node.Children) { + return + } + copy(node.Children[index:], node.Children[index+1:]) + node.Children[len(node.Children)-1] = nil + node.Children = node.Children[:len(node.Children)-1] +} + +// MarshalJSON implements the interface MarshalJSON for json.Marshal. +func (tree BTree) MarshalJSON() (jsonBytes []byte, err error) { + if tree.root == nil { + return []byte("null"), nil + } + buffer := bytes.NewBuffer(nil) + buffer.WriteByte('{') + tree.Iterator(func(key, value interface{}) bool { + valueBytes, valueJsonErr := json.Marshal(value) + if valueJsonErr != nil { + err = valueJsonErr + return false + } + if buffer.Len() > 1 { + buffer.WriteByte(',') + } + buffer.WriteString(fmt.Sprintf(`"%v":%s`, key, valueBytes)) + return true + }) + buffer.WriteByte('}') + return buffer.Bytes(), nil +} + +// getComparator returns the comparator if it's previously set, +// or else it panics. +func (tree *BTree) getComparator() func(a, b interface{}) int { + if tree.comparator == nil { + panic("comparator is missing for tree") + } + return tree.comparator +} diff --git a/vendor/github.com/gogf/gf/v2/container/gtree/gtree_redblacktree.go b/vendor/github.com/gogf/gf/v2/container/gtree/gtree_redblacktree.go new file mode 100644 index 00000000..e9da6a07 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gtree/gtree_redblacktree.go @@ -0,0 +1,991 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gtree + +import ( + "bytes" + "fmt" + + "github.com/gogf/gf/v2/container/gvar" + "github.com/gogf/gf/v2/internal/json" + "github.com/gogf/gf/v2/internal/rwmutex" + "github.com/gogf/gf/v2/util/gconv" + "github.com/gogf/gf/v2/util/gutil" +) + +type color bool + +const ( + black, red color = true, false +) + +// RedBlackTree holds elements of the red-black tree. +type RedBlackTree struct { + mu rwmutex.RWMutex + root *RedBlackTreeNode + size int + comparator func(v1, v2 interface{}) int +} + +// RedBlackTreeNode is a single element within the tree. +type RedBlackTreeNode struct { + Key interface{} + Value interface{} + color color + left *RedBlackTreeNode + right *RedBlackTreeNode + parent *RedBlackTreeNode +} + +// NewRedBlackTree instantiates a red-black tree with the custom key comparator. +// The parameter `safe` is used to specify whether using tree in concurrent-safety, +// which is false in default. +func NewRedBlackTree(comparator func(v1, v2 interface{}) int, safe ...bool) *RedBlackTree { + return &RedBlackTree{ + mu: rwmutex.Create(safe...), + comparator: comparator, + } +} + +// NewRedBlackTreeFrom instantiates a red-black tree with the custom key comparator and `data` map. +// The parameter `safe` is used to specify whether using tree in concurrent-safety, +// which is false in default. +func NewRedBlackTreeFrom(comparator func(v1, v2 interface{}) int, data map[interface{}]interface{}, safe ...bool) *RedBlackTree { + tree := NewRedBlackTree(comparator, safe...) + for k, v := range data { + tree.doSet(k, v) + } + return tree +} + +// SetComparator sets/changes the comparator for sorting. +func (tree *RedBlackTree) SetComparator(comparator func(a, b interface{}) int) { + tree.mu.Lock() + defer tree.mu.Unlock() + tree.comparator = comparator + if tree.size > 0 { + data := make(map[interface{}]interface{}, tree.size) + tree.doIteratorAsc(tree.leftNode(), func(key, value interface{}) bool { + data[key] = value + return true + }) + // Resort the tree if comparator is changed. + tree.root = nil + tree.size = 0 + for k, v := range data { + tree.doSet(k, v) + } + } +} + +// Clone returns a new tree with a copy of current tree. +func (tree *RedBlackTree) Clone() *RedBlackTree { + newTree := NewRedBlackTree(tree.comparator, tree.mu.IsSafe()) + newTree.Sets(tree.Map()) + return newTree +} + +// Set inserts key-value item into the tree. +func (tree *RedBlackTree) Set(key interface{}, value interface{}) { + tree.mu.Lock() + defer tree.mu.Unlock() + tree.doSet(key, value) +} + +// Sets batch sets key-values to the tree. +func (tree *RedBlackTree) Sets(data map[interface{}]interface{}) { + tree.mu.Lock() + defer tree.mu.Unlock() + for k, v := range data { + tree.doSet(k, v) + } +} + +// doSet inserts key-value item into the tree without mutex. +func (tree *RedBlackTree) doSet(key interface{}, value interface{}) { + insertedNode := (*RedBlackTreeNode)(nil) + if tree.root == nil { + // Assert key is of comparator's type for initial tree + tree.getComparator()(key, key) + tree.root = &RedBlackTreeNode{Key: key, Value: value, color: red} + insertedNode = tree.root + } else { + node := tree.root + loop := true + for loop { + compare := tree.getComparator()(key, node.Key) + switch { + case compare == 0: + // node.Key = key + node.Value = value + return + case compare < 0: + if node.left == nil { + node.left = &RedBlackTreeNode{Key: key, Value: value, color: red} + insertedNode = node.left + loop = false + } else { + node = node.left + } + case compare > 0: + if node.right == nil { + node.right = &RedBlackTreeNode{Key: key, Value: value, color: red} + insertedNode = node.right + loop = false + } else { + node = node.right + } + } + } + insertedNode.parent = node + } + tree.insertCase1(insertedNode) + tree.size++ +} + +// Get searches the node in the tree by `key` and returns its value or nil if key is not found in tree. +func (tree *RedBlackTree) Get(key interface{}) (value interface{}) { + value, _ = tree.Search(key) + return +} + +// doSetWithLockCheck checks whether value of the key exists with mutex.Lock, +// if not exists, set value to the map with given `key`, +// or else just return the existing value. +// +// When setting value, if `value` is type of , +// it will be executed with mutex.Lock of the hash map, +// and its return value will be set to the map with `key`. +// +// It returns value with given `key`. +func (tree *RedBlackTree) doSetWithLockCheck(key interface{}, value interface{}) interface{} { + tree.mu.Lock() + defer tree.mu.Unlock() + if node, found := tree.doSearch(key); found { + return node.Value + } + if f, ok := value.(func() interface{}); ok { + value = f() + } + if value != nil { + tree.doSet(key, value) + } + return value +} + +// GetOrSet returns the value by key, +// or sets value with given `value` if it does not exist and then returns this value. +func (tree *RedBlackTree) GetOrSet(key interface{}, value interface{}) interface{} { + if v, ok := tree.Search(key); !ok { + return tree.doSetWithLockCheck(key, value) + } else { + return v + } +} + +// GetOrSetFunc returns the value by key, +// or sets value with returned value of callback function `f` if it does not exist +// and then returns this value. +func (tree *RedBlackTree) GetOrSetFunc(key interface{}, f func() interface{}) interface{} { + if v, ok := tree.Search(key); !ok { + return tree.doSetWithLockCheck(key, f()) + } else { + return v + } +} + +// GetOrSetFuncLock returns the value by key, +// or sets value with returned value of callback function `f` if it does not exist +// and then returns this value. +// +// GetOrSetFuncLock differs with GetOrSetFunc function is that it executes function `f` +// with mutex.Lock of the hash map. +func (tree *RedBlackTree) GetOrSetFuncLock(key interface{}, f func() interface{}) interface{} { + if v, ok := tree.Search(key); !ok { + return tree.doSetWithLockCheck(key, f) + } else { + return v + } +} + +// GetVar returns a gvar.Var with the value by given `key`. +// The returned gvar.Var is un-concurrent safe. +func (tree *RedBlackTree) GetVar(key interface{}) *gvar.Var { + return gvar.New(tree.Get(key)) +} + +// GetVarOrSet returns a gvar.Var with result from GetVarOrSet. +// The returned gvar.Var is un-concurrent safe. +func (tree *RedBlackTree) GetVarOrSet(key interface{}, value interface{}) *gvar.Var { + return gvar.New(tree.GetOrSet(key, value)) +} + +// GetVarOrSetFunc returns a gvar.Var with result from GetOrSetFunc. +// The returned gvar.Var is un-concurrent safe. +func (tree *RedBlackTree) GetVarOrSetFunc(key interface{}, f func() interface{}) *gvar.Var { + return gvar.New(tree.GetOrSetFunc(key, f)) +} + +// GetVarOrSetFuncLock returns a gvar.Var with result from GetOrSetFuncLock. +// The returned gvar.Var is un-concurrent safe. +func (tree *RedBlackTree) GetVarOrSetFuncLock(key interface{}, f func() interface{}) *gvar.Var { + return gvar.New(tree.GetOrSetFuncLock(key, f)) +} + +// SetIfNotExist sets `value` to the map if the `key` does not exist, and then returns true. +// It returns false if `key` exists, and `value` would be ignored. +func (tree *RedBlackTree) SetIfNotExist(key interface{}, value interface{}) bool { + if !tree.Contains(key) { + tree.doSetWithLockCheck(key, value) + return true + } + return false +} + +// SetIfNotExistFunc sets value with return value of callback function `f`, and then returns true. +// It returns false if `key` exists, and `value` would be ignored. +func (tree *RedBlackTree) SetIfNotExistFunc(key interface{}, f func() interface{}) bool { + if !tree.Contains(key) { + tree.doSetWithLockCheck(key, f()) + return true + } + return false +} + +// SetIfNotExistFuncLock sets value with return value of callback function `f`, and then returns true. +// It returns false if `key` exists, and `value` would be ignored. +// +// SetIfNotExistFuncLock differs with SetIfNotExistFunc function is that +// it executes function `f` with mutex.Lock of the hash map. +func (tree *RedBlackTree) SetIfNotExistFuncLock(key interface{}, f func() interface{}) bool { + if !tree.Contains(key) { + tree.doSetWithLockCheck(key, f) + return true + } + return false +} + +// Contains checks whether `key` exists in the tree. +func (tree *RedBlackTree) Contains(key interface{}) bool { + _, ok := tree.Search(key) + return ok +} + +// doRemove removes the node from the tree by `key` without mutex. +func (tree *RedBlackTree) doRemove(key interface{}) (value interface{}) { + child := (*RedBlackTreeNode)(nil) + node, found := tree.doSearch(key) + if !found { + return + } + value = node.Value + if node.left != nil && node.right != nil { + p := node.left.maximumNode() + node.Key = p.Key + node.Value = p.Value + node = p + } + if node.left == nil || node.right == nil { + if node.right == nil { + child = node.left + } else { + child = node.right + } + if node.color == black { + node.color = tree.nodeColor(child) + tree.deleteCase1(node) + } + tree.replaceNode(node, child) + if node.parent == nil && child != nil { + child.color = black + } + } + tree.size-- + return +} + +// Remove removes the node from the tree by `key`. +func (tree *RedBlackTree) Remove(key interface{}) (value interface{}) { + tree.mu.Lock() + defer tree.mu.Unlock() + return tree.doRemove(key) +} + +// Removes batch deletes values of the tree by `keys`. +func (tree *RedBlackTree) Removes(keys []interface{}) { + tree.mu.Lock() + defer tree.mu.Unlock() + for _, key := range keys { + tree.doRemove(key) + } +} + +// IsEmpty returns true if tree does not contain any nodes. +func (tree *RedBlackTree) IsEmpty() bool { + return tree.Size() == 0 +} + +// Size returns number of nodes in the tree. +func (tree *RedBlackTree) Size() int { + tree.mu.RLock() + defer tree.mu.RUnlock() + return tree.size +} + +// Keys returns all keys in asc order. +func (tree *RedBlackTree) Keys() []interface{} { + var ( + keys = make([]interface{}, tree.Size()) + index = 0 + ) + tree.IteratorAsc(func(key, value interface{}) bool { + keys[index] = key + index++ + return true + }) + return keys +} + +// Values returns all values in asc order based on the key. +func (tree *RedBlackTree) Values() []interface{} { + var ( + values = make([]interface{}, tree.Size()) + index = 0 + ) + tree.IteratorAsc(func(key, value interface{}) bool { + values[index] = value + index++ + return true + }) + return values +} + +// Map returns all key-value items as map. +func (tree *RedBlackTree) Map() map[interface{}]interface{} { + m := make(map[interface{}]interface{}, tree.Size()) + tree.IteratorAsc(func(key, value interface{}) bool { + m[key] = value + return true + }) + return m +} + +// MapStrAny returns all key-value items as map[string]interface{}. +func (tree *RedBlackTree) MapStrAny() map[string]interface{} { + m := make(map[string]interface{}, tree.Size()) + tree.IteratorAsc(func(key, value interface{}) bool { + m[gconv.String(key)] = value + return true + }) + return m +} + +// Left returns the left-most (min) node or nil if tree is empty. +func (tree *RedBlackTree) Left() *RedBlackTreeNode { + tree.mu.RLock() + defer tree.mu.RUnlock() + node := tree.leftNode() + if tree.mu.IsSafe() { + return &RedBlackTreeNode{ + Key: node.Key, + Value: node.Value, + } + } + return node +} + +// Right returns the right-most (max) node or nil if tree is empty. +func (tree *RedBlackTree) Right() *RedBlackTreeNode { + tree.mu.RLock() + defer tree.mu.RUnlock() + node := tree.rightNode() + if tree.mu.IsSafe() { + return &RedBlackTreeNode{ + Key: node.Key, + Value: node.Value, + } + } + return node +} + +// leftNode returns the left-most (min) node or nil if tree is empty. +func (tree *RedBlackTree) leftNode() *RedBlackTreeNode { + p := (*RedBlackTreeNode)(nil) + n := tree.root + for n != nil { + p = n + n = n.left + } + return p +} + +// rightNode returns the right-most (max) node or nil if tree is empty. +func (tree *RedBlackTree) rightNode() *RedBlackTreeNode { + p := (*RedBlackTreeNode)(nil) + n := tree.root + for n != nil { + p = n + n = n.right + } + return p +} + +// Floor Finds floor node of the input key, return the floor node or nil if no floor node is found. +// Second return parameter is true if floor was found, otherwise false. +// +// Floor node is defined as the largest node that its key is smaller than or equal to the given `key`. +// A floor node may not be found, either because the tree is empty, or because +// all nodes in the tree are larger than the given node. +func (tree *RedBlackTree) Floor(key interface{}) (floor *RedBlackTreeNode, found bool) { + tree.mu.RLock() + defer tree.mu.RUnlock() + n := tree.root + for n != nil { + compare := tree.getComparator()(key, n.Key) + switch { + case compare == 0: + return n, true + case compare < 0: + n = n.left + case compare > 0: + floor, found = n, true + n = n.right + } + } + if found { + return + } + return nil, false +} + +// Ceiling finds ceiling node of the input key, return the ceiling node or nil if no ceiling node is found. +// Second return parameter is true if ceiling was found, otherwise false. +// +// Ceiling node is defined as the smallest node that its key is larger than or equal to the given `key`. +// A ceiling node may not be found, either because the tree is empty, or because +// all nodes in the tree are smaller than the given node. +func (tree *RedBlackTree) Ceiling(key interface{}) (ceiling *RedBlackTreeNode, found bool) { + tree.mu.RLock() + defer tree.mu.RUnlock() + n := tree.root + for n != nil { + compare := tree.getComparator()(key, n.Key) + switch { + case compare == 0: + return n, true + case compare > 0: + n = n.right + case compare < 0: + ceiling, found = n, true + n = n.left + } + } + if found { + return + } + return nil, false +} + +// Iterator is alias of IteratorAsc. +func (tree *RedBlackTree) Iterator(f func(key, value interface{}) bool) { + tree.IteratorAsc(f) +} + +// IteratorFrom is alias of IteratorAscFrom. +func (tree *RedBlackTree) IteratorFrom(key interface{}, match bool, f func(key, value interface{}) bool) { + tree.IteratorAscFrom(key, match, f) +} + +// IteratorAsc iterates the tree readonly in ascending order with given callback function `f`. +// If `f` returns true, then it continues iterating; or false to stop. +func (tree *RedBlackTree) IteratorAsc(f func(key, value interface{}) bool) { + tree.mu.RLock() + defer tree.mu.RUnlock() + tree.doIteratorAsc(tree.leftNode(), f) +} + +// IteratorAscFrom iterates the tree readonly in ascending order with given callback function `f`. +// The parameter `key` specifies the start entry for iterating. The `match` specifies whether +// starting iterating if the `key` is fully matched, or else using index searching iterating. +// If `f` returns true, then it continues iterating; or false to stop. +func (tree *RedBlackTree) IteratorAscFrom(key interface{}, match bool, f func(key, value interface{}) bool) { + tree.mu.RLock() + defer tree.mu.RUnlock() + node, found := tree.doSearch(key) + if match { + if found { + tree.doIteratorAsc(node, f) + } + } else { + tree.doIteratorAsc(node, f) + } +} + +func (tree *RedBlackTree) doIteratorAsc(node *RedBlackTreeNode, f func(key, value interface{}) bool) { +loop: + if node == nil { + return + } + if !f(node.Key, node.Value) { + return + } + if node.right != nil { + node = node.right + for node.left != nil { + node = node.left + } + goto loop + } + if node.parent != nil { + old := node + for node.parent != nil { + node = node.parent + if tree.getComparator()(old.Key, node.Key) <= 0 { + goto loop + } + } + } +} + +// IteratorDesc iterates the tree readonly in descending order with given callback function `f`. +// If `f` returns true, then it continues iterating; or false to stop. +func (tree *RedBlackTree) IteratorDesc(f func(key, value interface{}) bool) { + tree.mu.RLock() + defer tree.mu.RUnlock() + tree.doIteratorDesc(tree.rightNode(), f) +} + +// IteratorDescFrom iterates the tree readonly in descending order with given callback function `f`. +// The parameter `key` specifies the start entry for iterating. The `match` specifies whether +// starting iterating if the `key` is fully matched, or else using index searching iterating. +// If `f` returns true, then it continues iterating; or false to stop. +func (tree *RedBlackTree) IteratorDescFrom(key interface{}, match bool, f func(key, value interface{}) bool) { + tree.mu.RLock() + defer tree.mu.RUnlock() + node, found := tree.doSearch(key) + if match { + if found { + tree.doIteratorDesc(node, f) + } + } else { + tree.doIteratorDesc(node, f) + } +} + +func (tree *RedBlackTree) doIteratorDesc(node *RedBlackTreeNode, f func(key, value interface{}) bool) { +loop: + if node == nil { + return + } + if !f(node.Key, node.Value) { + return + } + if node.left != nil { + node = node.left + for node.right != nil { + node = node.right + } + goto loop + } + if node.parent != nil { + old := node + for node.parent != nil { + node = node.parent + if tree.getComparator()(old.Key, node.Key) >= 0 { + goto loop + } + } + } +} + +// Clear removes all nodes from the tree. +func (tree *RedBlackTree) Clear() { + tree.mu.Lock() + defer tree.mu.Unlock() + tree.root = nil + tree.size = 0 +} + +// Replace the data of the tree with given `data`. +func (tree *RedBlackTree) Replace(data map[interface{}]interface{}) { + tree.mu.Lock() + defer tree.mu.Unlock() + tree.root = nil + tree.size = 0 + for k, v := range data { + tree.doSet(k, v) + } +} + +// String returns a string representation of container. +func (tree *RedBlackTree) String() string { + if tree == nil { + return "" + } + tree.mu.RLock() + defer tree.mu.RUnlock() + str := "" + if tree.size != 0 { + tree.output(tree.root, "", true, &str) + } + return str +} + +// Print prints the tree to stdout. +func (tree *RedBlackTree) Print() { + fmt.Println(tree.String()) +} + +// Search searches the tree with given `key`. +// Second return parameter `found` is true if key was found, otherwise false. +func (tree *RedBlackTree) Search(key interface{}) (value interface{}, found bool) { + tree.mu.RLock() + defer tree.mu.RUnlock() + node, found := tree.doSearch(key) + if found { + return node.Value, true + } + return nil, false +} + +// Flip exchanges key-value of the tree to value-key. +// Note that you should guarantee the value is the same type as key, +// or else the comparator would panic. +// +// If the type of value is different with key, you pass the new `comparator`. +func (tree *RedBlackTree) Flip(comparator ...func(v1, v2 interface{}) int) { + t := (*RedBlackTree)(nil) + if len(comparator) > 0 { + t = NewRedBlackTree(comparator[0], tree.mu.IsSafe()) + } else { + t = NewRedBlackTree(tree.comparator, tree.mu.IsSafe()) + } + tree.IteratorAsc(func(key, value interface{}) bool { + t.doSet(value, key) + return true + }) + tree.mu.Lock() + tree.root = t.root + tree.size = t.size + tree.mu.Unlock() +} + +func (tree *RedBlackTree) output(node *RedBlackTreeNode, prefix string, isTail bool, str *string) { + if node.right != nil { + newPrefix := prefix + if isTail { + newPrefix += "│ " + } else { + newPrefix += " " + } + tree.output(node.right, newPrefix, false, str) + } + *str += prefix + if isTail { + *str += "└── " + } else { + *str += "┌── " + } + *str += fmt.Sprintf("%v\n", node.Key) + if node.left != nil { + newPrefix := prefix + if isTail { + newPrefix += " " + } else { + newPrefix += "│ " + } + tree.output(node.left, newPrefix, true, str) + } +} + +// doSearch searches the tree with given `key` without mutex. +// It returns the node if found or otherwise nil. +func (tree *RedBlackTree) doSearch(key interface{}) (node *RedBlackTreeNode, found bool) { + node = tree.root + for node != nil { + compare := tree.getComparator()(key, node.Key) + switch { + case compare == 0: + return node, true + case compare < 0: + node = node.left + case compare > 0: + node = node.right + } + } + return node, false +} + +func (node *RedBlackTreeNode) grandparent() *RedBlackTreeNode { + if node != nil && node.parent != nil { + return node.parent.parent + } + return nil +} + +func (node *RedBlackTreeNode) uncle() *RedBlackTreeNode { + if node == nil || node.parent == nil || node.parent.parent == nil { + return nil + } + return node.parent.sibling() +} + +func (node *RedBlackTreeNode) sibling() *RedBlackTreeNode { + if node == nil || node.parent == nil { + return nil + } + if node == node.parent.left { + return node.parent.right + } + return node.parent.left +} + +func (tree *RedBlackTree) rotateLeft(node *RedBlackTreeNode) { + right := node.right + tree.replaceNode(node, right) + node.right = right.left + if right.left != nil { + right.left.parent = node + } + right.left = node + node.parent = right +} + +func (tree *RedBlackTree) rotateRight(node *RedBlackTreeNode) { + left := node.left + tree.replaceNode(node, left) + node.left = left.right + if left.right != nil { + left.right.parent = node + } + left.right = node + node.parent = left +} + +func (tree *RedBlackTree) replaceNode(old *RedBlackTreeNode, new *RedBlackTreeNode) { + if old.parent == nil { + tree.root = new + } else { + if old == old.parent.left { + old.parent.left = new + } else { + old.parent.right = new + } + } + if new != nil { + new.parent = old.parent + } +} + +func (tree *RedBlackTree) insertCase1(node *RedBlackTreeNode) { + if node.parent == nil { + node.color = black + } else { + tree.insertCase2(node) + } +} + +func (tree *RedBlackTree) insertCase2(node *RedBlackTreeNode) { + if tree.nodeColor(node.parent) == black { + return + } + tree.insertCase3(node) +} + +func (tree *RedBlackTree) insertCase3(node *RedBlackTreeNode) { + uncle := node.uncle() + if tree.nodeColor(uncle) == red { + node.parent.color = black + uncle.color = black + node.grandparent().color = red + tree.insertCase1(node.grandparent()) + } else { + tree.insertCase4(node) + } +} + +func (tree *RedBlackTree) insertCase4(node *RedBlackTreeNode) { + grandparent := node.grandparent() + if node == node.parent.right && node.parent == grandparent.left { + tree.rotateLeft(node.parent) + node = node.left + } else if node == node.parent.left && node.parent == grandparent.right { + tree.rotateRight(node.parent) + node = node.right + } + tree.insertCase5(node) +} + +func (tree *RedBlackTree) insertCase5(node *RedBlackTreeNode) { + node.parent.color = black + grandparent := node.grandparent() + grandparent.color = red + if node == node.parent.left && node.parent == grandparent.left { + tree.rotateRight(grandparent) + } else if node == node.parent.right && node.parent == grandparent.right { + tree.rotateLeft(grandparent) + } +} + +func (node *RedBlackTreeNode) maximumNode() *RedBlackTreeNode { + if node == nil { + return nil + } + for node.right != nil { + return node.right + } + return node +} + +func (tree *RedBlackTree) deleteCase1(node *RedBlackTreeNode) { + if node.parent == nil { + return + } + tree.deleteCase2(node) +} + +func (tree *RedBlackTree) deleteCase2(node *RedBlackTreeNode) { + sibling := node.sibling() + if tree.nodeColor(sibling) == red { + node.parent.color = red + sibling.color = black + if node == node.parent.left { + tree.rotateLeft(node.parent) + } else { + tree.rotateRight(node.parent) + } + } + tree.deleteCase3(node) +} + +func (tree *RedBlackTree) deleteCase3(node *RedBlackTreeNode) { + sibling := node.sibling() + if tree.nodeColor(node.parent) == black && + tree.nodeColor(sibling) == black && + tree.nodeColor(sibling.left) == black && + tree.nodeColor(sibling.right) == black { + sibling.color = red + tree.deleteCase1(node.parent) + } else { + tree.deleteCase4(node) + } +} + +func (tree *RedBlackTree) deleteCase4(node *RedBlackTreeNode) { + sibling := node.sibling() + if tree.nodeColor(node.parent) == red && + tree.nodeColor(sibling) == black && + tree.nodeColor(sibling.left) == black && + tree.nodeColor(sibling.right) == black { + sibling.color = red + node.parent.color = black + } else { + tree.deleteCase5(node) + } +} + +func (tree *RedBlackTree) deleteCase5(node *RedBlackTreeNode) { + sibling := node.sibling() + if node == node.parent.left && + tree.nodeColor(sibling) == black && + tree.nodeColor(sibling.left) == red && + tree.nodeColor(sibling.right) == black { + sibling.color = red + sibling.left.color = black + tree.rotateRight(sibling) + } else if node == node.parent.right && + tree.nodeColor(sibling) == black && + tree.nodeColor(sibling.right) == red && + tree.nodeColor(sibling.left) == black { + sibling.color = red + sibling.right.color = black + tree.rotateLeft(sibling) + } + tree.deleteCase6(node) +} + +func (tree *RedBlackTree) deleteCase6(node *RedBlackTreeNode) { + sibling := node.sibling() + sibling.color = tree.nodeColor(node.parent) + node.parent.color = black + if node == node.parent.left && tree.nodeColor(sibling.right) == red { + sibling.right.color = black + tree.rotateLeft(node.parent) + } else if tree.nodeColor(sibling.left) == red { + sibling.left.color = black + tree.rotateRight(node.parent) + } +} + +func (tree *RedBlackTree) nodeColor(node *RedBlackTreeNode) color { + if node == nil { + return black + } + return node.color +} + +// MarshalJSON implements the interface MarshalJSON for json.Marshal. +func (tree RedBlackTree) MarshalJSON() (jsonBytes []byte, err error) { + if tree.root == nil { + return []byte("null"), nil + } + buffer := bytes.NewBuffer(nil) + buffer.WriteByte('{') + tree.Iterator(func(key, value interface{}) bool { + valueBytes, valueJsonErr := json.Marshal(value) + if valueJsonErr != nil { + err = valueJsonErr + return false + } + if buffer.Len() > 1 { + buffer.WriteByte(',') + } + buffer.WriteString(fmt.Sprintf(`"%v":%s`, key, valueBytes)) + return true + }) + buffer.WriteByte('}') + return buffer.Bytes(), nil +} + +// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. +func (tree *RedBlackTree) UnmarshalJSON(b []byte) error { + tree.mu.Lock() + defer tree.mu.Unlock() + if tree.comparator == nil { + tree.comparator = gutil.ComparatorString + } + var data map[string]interface{} + if err := json.UnmarshalUseNumber(b, &data); err != nil { + return err + } + for k, v := range data { + tree.doSet(k, v) + } + return nil +} + +// UnmarshalValue is an interface implement which sets any type of value for map. +func (tree *RedBlackTree) UnmarshalValue(value interface{}) (err error) { + tree.mu.Lock() + defer tree.mu.Unlock() + if tree.comparator == nil { + tree.comparator = gutil.ComparatorString + } + for k, v := range gconv.Map(value) { + tree.doSet(k, v) + } + return +} + +// getComparator returns the comparator if it's previously set, +// or else it panics. +func (tree *RedBlackTree) getComparator() func(a, b interface{}) int { + if tree.comparator == nil { + panic("comparator is missing for tree") + } + return tree.comparator +} diff --git a/vendor/github.com/gogf/gf/v2/container/gtype/gtype.go b/vendor/github.com/gogf/gf/v2/container/gtype/gtype.go new file mode 100644 index 00000000..d6711f2a --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gtype/gtype.go @@ -0,0 +1,14 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package gtype provides high performance and concurrent-safe basic variable types. +package gtype + +// New is alias of NewInterface. +// See NewInterface. +func New(value ...interface{}) *Interface { + return NewInterface(value...) +} diff --git a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_bool.go b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_bool.go new file mode 100644 index 00000000..b85dfd91 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_bool.go @@ -0,0 +1,106 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gtype + +import ( + "bytes" + "sync/atomic" + + "github.com/gogf/gf/v2/util/gconv" +) + +// Bool is a struct for concurrent-safe operation for type bool. +type Bool struct { + value int32 +} + +var ( + bytesTrue = []byte("true") + bytesFalse = []byte("false") +) + +// NewBool creates and returns a concurrent-safe object for bool type, +// with given initial value `value`. +func NewBool(value ...bool) *Bool { + t := &Bool{} + if len(value) > 0 { + if value[0] { + t.value = 1 + } else { + t.value = 0 + } + } + return t +} + +// Clone clones and returns a new concurrent-safe object for bool type. +func (v *Bool) Clone() *Bool { + return NewBool(v.Val()) +} + +// Set atomically stores `value` into t.value and returns the previous value of t.value. +func (v *Bool) Set(value bool) (old bool) { + if value { + old = atomic.SwapInt32(&v.value, 1) == 1 + } else { + old = atomic.SwapInt32(&v.value, 0) == 1 + } + return +} + +// Val atomically loads and returns t.value. +func (v *Bool) Val() bool { + return atomic.LoadInt32(&v.value) > 0 +} + +// Cas executes the compare-and-swap operation for value. +func (v *Bool) Cas(old, new bool) (swapped bool) { + var oldInt32, newInt32 int32 + if old { + oldInt32 = 1 + } + if new { + newInt32 = 1 + } + return atomic.CompareAndSwapInt32(&v.value, oldInt32, newInt32) +} + +// String implements String interface for string printing. +func (v *Bool) String() string { + if v.Val() { + return "true" + } + return "false" +} + +// MarshalJSON implements the interface MarshalJSON for json.Marshal. +func (v Bool) MarshalJSON() ([]byte, error) { + if v.Val() { + return bytesTrue, nil + } + return bytesFalse, nil +} + +// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. +func (v *Bool) UnmarshalJSON(b []byte) error { + v.Set(gconv.Bool(bytes.Trim(b, `"`))) + return nil +} + +// UnmarshalValue is an interface implement which sets any type of value for `v`. +func (v *Bool) UnmarshalValue(value interface{}) error { + v.Set(gconv.Bool(value)) + return nil +} + +// DeepCopy implements interface for deep copy of current type. +func (v *Bool) DeepCopy() interface{} { + if v == nil { + return nil + } + return NewBool(v.Val()) +} diff --git a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_byte.go b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_byte.go new file mode 100644 index 00000000..836231c5 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_byte.go @@ -0,0 +1,85 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gtype + +import ( + "strconv" + "sync/atomic" + + "github.com/gogf/gf/v2/util/gconv" +) + +// Byte is a struct for concurrent-safe operation for type byte. +type Byte struct { + value int32 +} + +// NewByte creates and returns a concurrent-safe object for byte type, +// with given initial value `value`. +func NewByte(value ...byte) *Byte { + if len(value) > 0 { + return &Byte{ + value: int32(value[0]), + } + } + return &Byte{} +} + +// Clone clones and returns a new concurrent-safe object for byte type. +func (v *Byte) Clone() *Byte { + return NewByte(v.Val()) +} + +// Set atomically stores `value` into t.value and returns the previous value of t.value. +func (v *Byte) Set(value byte) (old byte) { + return byte(atomic.SwapInt32(&v.value, int32(value))) +} + +// Val atomically loads and returns t.value. +func (v *Byte) Val() byte { + return byte(atomic.LoadInt32(&v.value)) +} + +// Add atomically adds `delta` to t.value and returns the new value. +func (v *Byte) Add(delta byte) (new byte) { + return byte(atomic.AddInt32(&v.value, int32(delta))) +} + +// Cas executes the compare-and-swap operation for value. +func (v *Byte) Cas(old, new byte) (swapped bool) { + return atomic.CompareAndSwapInt32(&v.value, int32(old), int32(new)) +} + +// String implements String interface for string printing. +func (v *Byte) String() string { + return strconv.FormatUint(uint64(v.Val()), 10) +} + +// MarshalJSON implements the interface MarshalJSON for json.Marshal. +func (v Byte) MarshalJSON() ([]byte, error) { + return []byte(strconv.FormatUint(uint64(v.Val()), 10)), nil +} + +// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. +func (v *Byte) UnmarshalJSON(b []byte) error { + v.Set(gconv.Uint8(string(b))) + return nil +} + +// UnmarshalValue is an interface implement which sets any type of value for `v`. +func (v *Byte) UnmarshalValue(value interface{}) error { + v.Set(gconv.Byte(value)) + return nil +} + +// DeepCopy implements interface for deep copy of current type. +func (v *Byte) DeepCopy() interface{} { + if v == nil { + return nil + } + return NewByte(v.Val()) +} diff --git a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_bytes.go b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_bytes.go new file mode 100644 index 00000000..e01ee418 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_bytes.go @@ -0,0 +1,96 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gtype + +import ( + "bytes" + "encoding/base64" + "sync/atomic" + + "github.com/gogf/gf/v2/errors/gerror" + "github.com/gogf/gf/v2/util/gconv" +) + +// Bytes is a struct for concurrent-safe operation for type []byte. +type Bytes struct { + value atomic.Value +} + +// NewBytes creates and returns a concurrent-safe object for []byte type, +// with given initial value `value`. +func NewBytes(value ...[]byte) *Bytes { + t := &Bytes{} + if len(value) > 0 { + t.value.Store(value[0]) + } + return t +} + +// Clone clones and returns a new shallow copy object for []byte type. +func (v *Bytes) Clone() *Bytes { + return NewBytes(v.Val()) +} + +// Set atomically stores `value` into t.value and returns the previous value of t.value. +// Note: The parameter `value` cannot be nil. +func (v *Bytes) Set(value []byte) (old []byte) { + old = v.Val() + v.value.Store(value) + return +} + +// Val atomically loads and returns t.value. +func (v *Bytes) Val() []byte { + if s := v.value.Load(); s != nil { + return s.([]byte) + } + return nil +} + +// String implements String interface for string printing. +func (v *Bytes) String() string { + return string(v.Val()) +} + +// MarshalJSON implements the interface MarshalJSON for json.Marshal. +func (v Bytes) MarshalJSON() ([]byte, error) { + val := v.Val() + dst := make([]byte, base64.StdEncoding.EncodedLen(len(val))) + base64.StdEncoding.Encode(dst, val) + return []byte(`"` + string(dst) + `"`), nil +} + +// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. +func (v *Bytes) UnmarshalJSON(b []byte) error { + var ( + src = make([]byte, base64.StdEncoding.DecodedLen(len(b))) + n, err = base64.StdEncoding.Decode(src, bytes.Trim(b, `"`)) + ) + if err != nil { + err = gerror.Wrap(err, `base64.StdEncoding.Decode failed`) + return err + } + v.Set(src[:n]) + return nil +} + +// UnmarshalValue is an interface implement which sets any type of value for `v`. +func (v *Bytes) UnmarshalValue(value interface{}) error { + v.Set(gconv.Bytes(value)) + return nil +} + +// DeepCopy implements interface for deep copy of current type. +func (v *Bytes) DeepCopy() interface{} { + if v == nil { + return nil + } + oldBytes := v.Val() + newBytes := make([]byte, len(oldBytes)) + copy(newBytes, oldBytes) + return NewBytes(newBytes) +} diff --git a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_float32.go b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_float32.go new file mode 100644 index 00000000..82289abb --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_float32.go @@ -0,0 +1,97 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gtype + +import ( + "math" + "strconv" + "sync/atomic" + + "github.com/gogf/gf/v2/util/gconv" +) + +// Float32 is a struct for concurrent-safe operation for type float32. +type Float32 struct { + value uint32 +} + +// NewFloat32 creates and returns a concurrent-safe object for float32 type, +// with given initial value `value`. +func NewFloat32(value ...float32) *Float32 { + if len(value) > 0 { + return &Float32{ + value: math.Float32bits(value[0]), + } + } + return &Float32{} +} + +// Clone clones and returns a new concurrent-safe object for float32 type. +func (v *Float32) Clone() *Float32 { + return NewFloat32(v.Val()) +} + +// Set atomically stores `value` into t.value and returns the previous value of t.value. +func (v *Float32) Set(value float32) (old float32) { + return math.Float32frombits(atomic.SwapUint32(&v.value, math.Float32bits(value))) +} + +// Val atomically loads and returns t.value. +func (v *Float32) Val() float32 { + return math.Float32frombits(atomic.LoadUint32(&v.value)) +} + +// Add atomically adds `delta` to t.value and returns the new value. +func (v *Float32) Add(delta float32) (new float32) { + for { + old := math.Float32frombits(v.value) + new = old + delta + if atomic.CompareAndSwapUint32( + &v.value, + math.Float32bits(old), + math.Float32bits(new), + ) { + break + } + } + return +} + +// Cas executes the compare-and-swap operation for value. +func (v *Float32) Cas(old, new float32) (swapped bool) { + return atomic.CompareAndSwapUint32(&v.value, math.Float32bits(old), math.Float32bits(new)) +} + +// String implements String interface for string printing. +func (v *Float32) String() string { + return strconv.FormatFloat(float64(v.Val()), 'g', -1, 32) +} + +// MarshalJSON implements the interface MarshalJSON for json.Marshal. +func (v Float32) MarshalJSON() ([]byte, error) { + return []byte(strconv.FormatFloat(float64(v.Val()), 'g', -1, 32)), nil +} + +// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. +func (v *Float32) UnmarshalJSON(b []byte) error { + v.Set(gconv.Float32(string(b))) + return nil +} + +// UnmarshalValue is an interface implement which sets any type of value for `v`. +func (v *Float32) UnmarshalValue(value interface{}) error { + v.Set(gconv.Float32(value)) + return nil +} + +// DeepCopy implements interface for deep copy of current type. +func (v *Float32) DeepCopy() interface{} { + if v == nil { + return nil + } + return NewFloat32(v.Val()) +} diff --git a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_float64.go b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_float64.go new file mode 100644 index 00000000..ce44abd1 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_float64.go @@ -0,0 +1,97 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gtype + +import ( + "math" + "strconv" + "sync/atomic" + + "github.com/gogf/gf/v2/util/gconv" +) + +// Float64 is a struct for concurrent-safe operation for type float64. +type Float64 struct { + value uint64 +} + +// NewFloat64 creates and returns a concurrent-safe object for float64 type, +// with given initial value `value`. +func NewFloat64(value ...float64) *Float64 { + if len(value) > 0 { + return &Float64{ + value: math.Float64bits(value[0]), + } + } + return &Float64{} +} + +// Clone clones and returns a new concurrent-safe object for float64 type. +func (v *Float64) Clone() *Float64 { + return NewFloat64(v.Val()) +} + +// Set atomically stores `value` into t.value and returns the previous value of t.value. +func (v *Float64) Set(value float64) (old float64) { + return math.Float64frombits(atomic.SwapUint64(&v.value, math.Float64bits(value))) +} + +// Val atomically loads and returns t.value. +func (v *Float64) Val() float64 { + return math.Float64frombits(atomic.LoadUint64(&v.value)) +} + +// Add atomically adds `delta` to t.value and returns the new value. +func (v *Float64) Add(delta float64) (new float64) { + for { + old := math.Float64frombits(v.value) + new = old + delta + if atomic.CompareAndSwapUint64( + &v.value, + math.Float64bits(old), + math.Float64bits(new), + ) { + break + } + } + return +} + +// Cas executes the compare-and-swap operation for value. +func (v *Float64) Cas(old, new float64) (swapped bool) { + return atomic.CompareAndSwapUint64(&v.value, math.Float64bits(old), math.Float64bits(new)) +} + +// String implements String interface for string printing. +func (v *Float64) String() string { + return strconv.FormatFloat(v.Val(), 'g', -1, 64) +} + +// MarshalJSON implements the interface MarshalJSON for json.Marshal. +func (v Float64) MarshalJSON() ([]byte, error) { + return []byte(strconv.FormatFloat(v.Val(), 'g', -1, 64)), nil +} + +// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. +func (v *Float64) UnmarshalJSON(b []byte) error { + v.Set(gconv.Float64(string(b))) + return nil +} + +// UnmarshalValue is an interface implement which sets any type of value for `v`. +func (v *Float64) UnmarshalValue(value interface{}) error { + v.Set(gconv.Float64(value)) + return nil +} + +// DeepCopy implements interface for deep copy of current type. +func (v *Float64) DeepCopy() interface{} { + if v == nil { + return nil + } + return NewFloat64(v.Val()) +} diff --git a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_int.go b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_int.go new file mode 100644 index 00000000..32a610fb --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_int.go @@ -0,0 +1,85 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gtype + +import ( + "strconv" + "sync/atomic" + + "github.com/gogf/gf/v2/util/gconv" +) + +// Int is a struct for concurrent-safe operation for type int. +type Int struct { + value int64 +} + +// NewInt creates and returns a concurrent-safe object for int type, +// with given initial value `value`. +func NewInt(value ...int) *Int { + if len(value) > 0 { + return &Int{ + value: int64(value[0]), + } + } + return &Int{} +} + +// Clone clones and returns a new concurrent-safe object for int type. +func (v *Int) Clone() *Int { + return NewInt(v.Val()) +} + +// Set atomically stores `value` into t.value and returns the previous value of t.value. +func (v *Int) Set(value int) (old int) { + return int(atomic.SwapInt64(&v.value, int64(value))) +} + +// Val atomically loads and returns t.value. +func (v *Int) Val() int { + return int(atomic.LoadInt64(&v.value)) +} + +// Add atomically adds `delta` to t.value and returns the new value. +func (v *Int) Add(delta int) (new int) { + return int(atomic.AddInt64(&v.value, int64(delta))) +} + +// Cas executes the compare-and-swap operation for value. +func (v *Int) Cas(old, new int) (swapped bool) { + return atomic.CompareAndSwapInt64(&v.value, int64(old), int64(new)) +} + +// String implements String interface for string printing. +func (v *Int) String() string { + return strconv.Itoa(v.Val()) +} + +// MarshalJSON implements the interface MarshalJSON for json.Marshal. +func (v Int) MarshalJSON() ([]byte, error) { + return []byte(strconv.Itoa(v.Val())), nil +} + +// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. +func (v *Int) UnmarshalJSON(b []byte) error { + v.Set(gconv.Int(string(b))) + return nil +} + +// UnmarshalValue is an interface implement which sets any type of value for `v`. +func (v *Int) UnmarshalValue(value interface{}) error { + v.Set(gconv.Int(value)) + return nil +} + +// DeepCopy implements interface for deep copy of current type. +func (v *Int) DeepCopy() interface{} { + if v == nil { + return nil + } + return NewInt(v.Val()) +} diff --git a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_int32.go b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_int32.go new file mode 100644 index 00000000..58ad36a3 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_int32.go @@ -0,0 +1,85 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gtype + +import ( + "strconv" + "sync/atomic" + + "github.com/gogf/gf/v2/util/gconv" +) + +// Int32 is a struct for concurrent-safe operation for type int32. +type Int32 struct { + value int32 +} + +// NewInt32 creates and returns a concurrent-safe object for int32 type, +// with given initial value `value`. +func NewInt32(value ...int32) *Int32 { + if len(value) > 0 { + return &Int32{ + value: value[0], + } + } + return &Int32{} +} + +// Clone clones and returns a new concurrent-safe object for int32 type. +func (v *Int32) Clone() *Int32 { + return NewInt32(v.Val()) +} + +// Set atomically stores `value` into t.value and returns the previous value of t.value. +func (v *Int32) Set(value int32) (old int32) { + return atomic.SwapInt32(&v.value, value) +} + +// Val atomically loads and returns t.value. +func (v *Int32) Val() int32 { + return atomic.LoadInt32(&v.value) +} + +// Add atomically adds `delta` to t.value and returns the new value. +func (v *Int32) Add(delta int32) (new int32) { + return atomic.AddInt32(&v.value, delta) +} + +// Cas executes the compare-and-swap operation for value. +func (v *Int32) Cas(old, new int32) (swapped bool) { + return atomic.CompareAndSwapInt32(&v.value, old, new) +} + +// String implements String interface for string printing. +func (v *Int32) String() string { + return strconv.Itoa(int(v.Val())) +} + +// MarshalJSON implements the interface MarshalJSON for json.Marshal. +func (v Int32) MarshalJSON() ([]byte, error) { + return []byte(strconv.Itoa(int(v.Val()))), nil +} + +// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. +func (v *Int32) UnmarshalJSON(b []byte) error { + v.Set(gconv.Int32(string(b))) + return nil +} + +// UnmarshalValue is an interface implement which sets any type of value for `v`. +func (v *Int32) UnmarshalValue(value interface{}) error { + v.Set(gconv.Int32(value)) + return nil +} + +// DeepCopy implements interface for deep copy of current type. +func (v *Int32) DeepCopy() interface{} { + if v == nil { + return nil + } + return NewInt32(v.Val()) +} diff --git a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_int64.go b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_int64.go new file mode 100644 index 00000000..54e72c12 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_int64.go @@ -0,0 +1,85 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gtype + +import ( + "strconv" + "sync/atomic" + + "github.com/gogf/gf/v2/util/gconv" +) + +// Int64 is a struct for concurrent-safe operation for type int64. +type Int64 struct { + value int64 +} + +// NewInt64 creates and returns a concurrent-safe object for int64 type, +// with given initial value `value`. +func NewInt64(value ...int64) *Int64 { + if len(value) > 0 { + return &Int64{ + value: value[0], + } + } + return &Int64{} +} + +// Clone clones and returns a new concurrent-safe object for int64 type. +func (v *Int64) Clone() *Int64 { + return NewInt64(v.Val()) +} + +// Set atomically stores `value` into t.value and returns the previous value of t.value. +func (v *Int64) Set(value int64) (old int64) { + return atomic.SwapInt64(&v.value, value) +} + +// Val atomically loads and returns t.value. +func (v *Int64) Val() int64 { + return atomic.LoadInt64(&v.value) +} + +// Add atomically adds `delta` to t.value and returns the new value. +func (v *Int64) Add(delta int64) (new int64) { + return atomic.AddInt64(&v.value, delta) +} + +// Cas executes the compare-and-swap operation for value. +func (v *Int64) Cas(old, new int64) (swapped bool) { + return atomic.CompareAndSwapInt64(&v.value, old, new) +} + +// String implements String interface for string printing. +func (v *Int64) String() string { + return strconv.FormatInt(v.Val(), 10) +} + +// MarshalJSON implements the interface MarshalJSON for json.Marshal. +func (v Int64) MarshalJSON() ([]byte, error) { + return []byte(strconv.FormatInt(v.Val(), 10)), nil +} + +// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. +func (v *Int64) UnmarshalJSON(b []byte) error { + v.Set(gconv.Int64(string(b))) + return nil +} + +// UnmarshalValue is an interface implement which sets any type of value for `v`. +func (v *Int64) UnmarshalValue(value interface{}) error { + v.Set(gconv.Int64(value)) + return nil +} + +// DeepCopy implements interface for deep copy of current type. +func (v *Int64) DeepCopy() interface{} { + if v == nil { + return nil + } + return NewInt64(v.Val()) +} diff --git a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_interface.go b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_interface.go new file mode 100644 index 00000000..9e57abb6 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_interface.go @@ -0,0 +1,82 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gtype + +import ( + "sync/atomic" + + "github.com/gogf/gf/v2/internal/deepcopy" + "github.com/gogf/gf/v2/internal/json" + "github.com/gogf/gf/v2/util/gconv" +) + +// Interface is a struct for concurrent-safe operation for type interface{}. +type Interface struct { + value atomic.Value +} + +// NewInterface creates and returns a concurrent-safe object for interface{} type, +// with given initial value `value`. +func NewInterface(value ...interface{}) *Interface { + t := &Interface{} + if len(value) > 0 && value[0] != nil { + t.value.Store(value[0]) + } + return t +} + +// Clone clones and returns a new concurrent-safe object for interface{} type. +func (v *Interface) Clone() *Interface { + return NewInterface(v.Val()) +} + +// Set atomically stores `value` into t.value and returns the previous value of t.value. +// Note: The parameter `value` cannot be nil. +func (v *Interface) Set(value interface{}) (old interface{}) { + old = v.Val() + v.value.Store(value) + return +} + +// Val atomically loads and returns t.value. +func (v *Interface) Val() interface{} { + return v.value.Load() +} + +// String implements String interface for string printing. +func (v *Interface) String() string { + return gconv.String(v.Val()) +} + +// MarshalJSON implements the interface MarshalJSON for json.Marshal. +func (v Interface) MarshalJSON() ([]byte, error) { + return json.Marshal(v.Val()) +} + +// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. +func (v *Interface) UnmarshalJSON(b []byte) error { + var i interface{} + if err := json.UnmarshalUseNumber(b, &i); err != nil { + return err + } + v.Set(i) + return nil +} + +// UnmarshalValue is an interface implement which sets any type of value for `v`. +func (v *Interface) UnmarshalValue(value interface{}) error { + v.Set(value) + return nil +} + +// DeepCopy implements interface for deep copy of current type. +func (v *Interface) DeepCopy() interface{} { + if v == nil { + return nil + } + return NewInterface(deepcopy.Copy(v.Val())) +} diff --git a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_string.go b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_string.go new file mode 100644 index 00000000..b753e0fa --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_string.go @@ -0,0 +1,80 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gtype + +import ( + "bytes" + "sync/atomic" + + "github.com/gogf/gf/v2/util/gconv" +) + +// String is a struct for concurrent-safe operation for type string. +type String struct { + value atomic.Value +} + +// NewString creates and returns a concurrent-safe object for string type, +// with given initial value `value`. +func NewString(value ...string) *String { + t := &String{} + if len(value) > 0 { + t.value.Store(value[0]) + } + return t +} + +// Clone clones and returns a new concurrent-safe object for string type. +func (v *String) Clone() *String { + return NewString(v.Val()) +} + +// Set atomically stores `value` into t.value and returns the previous value of t.value. +func (v *String) Set(value string) (old string) { + old = v.Val() + v.value.Store(value) + return +} + +// Val atomically loads and returns t.value. +func (v *String) Val() string { + s := v.value.Load() + if s != nil { + return s.(string) + } + return "" +} + +// String implements String interface for string printing. +func (v *String) String() string { + return v.Val() +} + +// MarshalJSON implements the interface MarshalJSON for json.Marshal. +func (v String) MarshalJSON() ([]byte, error) { + return []byte(`"` + v.Val() + `"`), nil +} + +// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. +func (v *String) UnmarshalJSON(b []byte) error { + v.Set(string(bytes.Trim(b, `"`))) + return nil +} + +// UnmarshalValue is an interface implement which sets any type of value for `v`. +func (v *String) UnmarshalValue(value interface{}) error { + v.Set(gconv.String(value)) + return nil +} + +// DeepCopy implements interface for deep copy of current type. +func (v *String) DeepCopy() interface{} { + if v == nil { + return nil + } + return NewString(v.Val()) +} diff --git a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_uint.go b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_uint.go new file mode 100644 index 00000000..fa00f472 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_uint.go @@ -0,0 +1,85 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gtype + +import ( + "strconv" + "sync/atomic" + + "github.com/gogf/gf/v2/util/gconv" +) + +// Uint is a struct for concurrent-safe operation for type uint. +type Uint struct { + value uint64 +} + +// NewUint creates and returns a concurrent-safe object for uint type, +// with given initial value `value`. +func NewUint(value ...uint) *Uint { + if len(value) > 0 { + return &Uint{ + value: uint64(value[0]), + } + } + return &Uint{} +} + +// Clone clones and returns a new concurrent-safe object for uint type. +func (v *Uint) Clone() *Uint { + return NewUint(v.Val()) +} + +// Set atomically stores `value` into t.value and returns the previous value of t.value. +func (v *Uint) Set(value uint) (old uint) { + return uint(atomic.SwapUint64(&v.value, uint64(value))) +} + +// Val atomically loads and returns t.value. +func (v *Uint) Val() uint { + return uint(atomic.LoadUint64(&v.value)) +} + +// Add atomically adds `delta` to t.value and returns the new value. +func (v *Uint) Add(delta uint) (new uint) { + return uint(atomic.AddUint64(&v.value, uint64(delta))) +} + +// Cas executes the compare-and-swap operation for value. +func (v *Uint) Cas(old, new uint) (swapped bool) { + return atomic.CompareAndSwapUint64(&v.value, uint64(old), uint64(new)) +} + +// String implements String interface for string printing. +func (v *Uint) String() string { + return strconv.FormatUint(uint64(v.Val()), 10) +} + +// MarshalJSON implements the interface MarshalJSON for json.Marshal. +func (v Uint) MarshalJSON() ([]byte, error) { + return []byte(strconv.FormatUint(uint64(v.Val()), 10)), nil +} + +// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. +func (v *Uint) UnmarshalJSON(b []byte) error { + v.Set(gconv.Uint(string(b))) + return nil +} + +// UnmarshalValue is an interface implement which sets any type of value for `v`. +func (v *Uint) UnmarshalValue(value interface{}) error { + v.Set(gconv.Uint(value)) + return nil +} + +// DeepCopy implements interface for deep copy of current type. +func (v *Uint) DeepCopy() interface{} { + if v == nil { + return nil + } + return NewUint(v.Val()) +} diff --git a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_uint32.go b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_uint32.go new file mode 100644 index 00000000..58df2e2c --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_uint32.go @@ -0,0 +1,85 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gtype + +import ( + "strconv" + "sync/atomic" + + "github.com/gogf/gf/v2/util/gconv" +) + +// Uint32 is a struct for concurrent-safe operation for type uint32. +type Uint32 struct { + value uint32 +} + +// NewUint32 creates and returns a concurrent-safe object for uint32 type, +// with given initial value `value`. +func NewUint32(value ...uint32) *Uint32 { + if len(value) > 0 { + return &Uint32{ + value: value[0], + } + } + return &Uint32{} +} + +// Clone clones and returns a new concurrent-safe object for uint32 type. +func (v *Uint32) Clone() *Uint32 { + return NewUint32(v.Val()) +} + +// Set atomically stores `value` into t.value and returns the previous value of t.value. +func (v *Uint32) Set(value uint32) (old uint32) { + return atomic.SwapUint32(&v.value, value) +} + +// Val atomically loads and returns t.value. +func (v *Uint32) Val() uint32 { + return atomic.LoadUint32(&v.value) +} + +// Add atomically adds `delta` to t.value and returns the new value. +func (v *Uint32) Add(delta uint32) (new uint32) { + return atomic.AddUint32(&v.value, delta) +} + +// Cas executes the compare-and-swap operation for value. +func (v *Uint32) Cas(old, new uint32) (swapped bool) { + return atomic.CompareAndSwapUint32(&v.value, old, new) +} + +// String implements String interface for string printing. +func (v *Uint32) String() string { + return strconv.FormatUint(uint64(v.Val()), 10) +} + +// MarshalJSON implements the interface MarshalJSON for json.Marshal. +func (v Uint32) MarshalJSON() ([]byte, error) { + return []byte(strconv.FormatUint(uint64(v.Val()), 10)), nil +} + +// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. +func (v *Uint32) UnmarshalJSON(b []byte) error { + v.Set(gconv.Uint32(string(b))) + return nil +} + +// UnmarshalValue is an interface implement which sets any type of value for `v`. +func (v *Uint32) UnmarshalValue(value interface{}) error { + v.Set(gconv.Uint32(value)) + return nil +} + +// DeepCopy implements interface for deep copy of current type. +func (v *Uint32) DeepCopy() interface{} { + if v == nil { + return nil + } + return NewUint32(v.Val()) +} diff --git a/vendor/github.com/gogf/gf/v2/container/gtype/gtype_uint64.go b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_uint64.go new file mode 100644 index 00000000..3b54eb63 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gtype/gtype_uint64.go @@ -0,0 +1,85 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gtype + +import ( + "strconv" + "sync/atomic" + + "github.com/gogf/gf/v2/util/gconv" +) + +// Uint64 is a struct for concurrent-safe operation for type uint64. +type Uint64 struct { + value uint64 +} + +// NewUint64 creates and returns a concurrent-safe object for uint64 type, +// with given initial value `value`. +func NewUint64(value ...uint64) *Uint64 { + if len(value) > 0 { + return &Uint64{ + value: value[0], + } + } + return &Uint64{} +} + +// Clone clones and returns a new concurrent-safe object for uint64 type. +func (v *Uint64) Clone() *Uint64 { + return NewUint64(v.Val()) +} + +// Set atomically stores `value` into t.value and returns the previous value of t.value. +func (v *Uint64) Set(value uint64) (old uint64) { + return atomic.SwapUint64(&v.value, value) +} + +// Val atomically loads and returns t.value. +func (v *Uint64) Val() uint64 { + return atomic.LoadUint64(&v.value) +} + +// Add atomically adds `delta` to t.value and returns the new value. +func (v *Uint64) Add(delta uint64) (new uint64) { + return atomic.AddUint64(&v.value, delta) +} + +// Cas executes the compare-and-swap operation for value. +func (v *Uint64) Cas(old, new uint64) (swapped bool) { + return atomic.CompareAndSwapUint64(&v.value, old, new) +} + +// String implements String interface for string printing. +func (v *Uint64) String() string { + return strconv.FormatUint(v.Val(), 10) +} + +// MarshalJSON implements the interface MarshalJSON for json.Marshal. +func (v Uint64) MarshalJSON() ([]byte, error) { + return []byte(strconv.FormatUint(v.Val(), 10)), nil +} + +// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. +func (v *Uint64) UnmarshalJSON(b []byte) error { + v.Set(gconv.Uint64(string(b))) + return nil +} + +// UnmarshalValue is an interface implement which sets any type of value for `v`. +func (v *Uint64) UnmarshalValue(value interface{}) error { + v.Set(gconv.Uint64(value)) + return nil +} + +// DeepCopy implements interface for deep copy of current type. +func (v *Uint64) DeepCopy() interface{} { + if v == nil { + return nil + } + return NewUint64(v.Val()) +} diff --git a/vendor/github.com/gogf/gf/v2/container/gvar/gvar.go b/vendor/github.com/gogf/gf/v2/container/gvar/gvar.go new file mode 100644 index 00000000..a343089d --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gvar/gvar.go @@ -0,0 +1,205 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package gvar provides an universal variable type, like generics. +package gvar + +import ( + "time" + + "github.com/gogf/gf/v2/container/gtype" + "github.com/gogf/gf/v2/internal/deepcopy" + "github.com/gogf/gf/v2/internal/json" + "github.com/gogf/gf/v2/os/gtime" + "github.com/gogf/gf/v2/util/gconv" + "github.com/gogf/gf/v2/util/gutil" +) + +// Var is an universal variable type implementer. +type Var struct { + value interface{} // Underlying value. + safe bool // Concurrent safe or not. +} + +// New creates and returns a new Var with given `value`. +// The optional parameter `safe` specifies whether Var is used in concurrent-safety, +// which is false in default. +func New(value interface{}, safe ...bool) *Var { + if len(safe) > 0 && !safe[0] { + return &Var{ + value: gtype.NewInterface(value), + safe: true, + } + } + return &Var{ + value: value, + } +} + +// Copy does a deep copy of current Var and returns a pointer to this Var. +func (v *Var) Copy() *Var { + return New(gutil.Copy(v.Val()), v.safe) +} + +// Clone does a shallow copy of current Var and returns a pointer to this Var. +func (v *Var) Clone() *Var { + return New(v.Val(), v.safe) +} + +// Set sets `value` to `v`, and returns the old value. +func (v *Var) Set(value interface{}) (old interface{}) { + if v.safe { + if t, ok := v.value.(*gtype.Interface); ok { + old = t.Set(value) + return + } + } + old = v.value + v.value = value + return +} + +// Val returns the current value of `v`. +func (v *Var) Val() interface{} { + if v == nil { + return nil + } + if v.safe { + if t, ok := v.value.(*gtype.Interface); ok { + return t.Val() + } + } + return v.value +} + +// Interface is alias of Val. +func (v *Var) Interface() interface{} { + return v.Val() +} + +// Bytes converts and returns `v` as []byte. +func (v *Var) Bytes() []byte { + return gconv.Bytes(v.Val()) +} + +// String converts and returns `v` as string. +func (v *Var) String() string { + return gconv.String(v.Val()) +} + +// Bool converts and returns `v` as bool. +func (v *Var) Bool() bool { + return gconv.Bool(v.Val()) +} + +// Int converts and returns `v` as int. +func (v *Var) Int() int { + return gconv.Int(v.Val()) +} + +// Int8 converts and returns `v` as int8. +func (v *Var) Int8() int8 { + return gconv.Int8(v.Val()) +} + +// Int16 converts and returns `v` as int16. +func (v *Var) Int16() int16 { + return gconv.Int16(v.Val()) +} + +// Int32 converts and returns `v` as int32. +func (v *Var) Int32() int32 { + return gconv.Int32(v.Val()) +} + +// Int64 converts and returns `v` as int64. +func (v *Var) Int64() int64 { + return gconv.Int64(v.Val()) +} + +// Uint converts and returns `v` as uint. +func (v *Var) Uint() uint { + return gconv.Uint(v.Val()) +} + +// Uint8 converts and returns `v` as uint8. +func (v *Var) Uint8() uint8 { + return gconv.Uint8(v.Val()) +} + +// Uint16 converts and returns `v` as uint16. +func (v *Var) Uint16() uint16 { + return gconv.Uint16(v.Val()) +} + +// Uint32 converts and returns `v` as uint32. +func (v *Var) Uint32() uint32 { + return gconv.Uint32(v.Val()) +} + +// Uint64 converts and returns `v` as uint64. +func (v *Var) Uint64() uint64 { + return gconv.Uint64(v.Val()) +} + +// Float32 converts and returns `v` as float32. +func (v *Var) Float32() float32 { + return gconv.Float32(v.Val()) +} + +// Float64 converts and returns `v` as float64. +func (v *Var) Float64() float64 { + return gconv.Float64(v.Val()) +} + +// Time converts and returns `v` as time.Time. +// The parameter `format` specifies the format of the time string using gtime, +// eg: Y-m-d H:i:s. +func (v *Var) Time(format ...string) time.Time { + return gconv.Time(v.Val(), format...) +} + +// Duration converts and returns `v` as time.Duration. +// If value of `v` is string, then it uses time.ParseDuration for conversion. +func (v *Var) Duration() time.Duration { + return gconv.Duration(v.Val()) +} + +// GTime converts and returns `v` as *gtime.Time. +// The parameter `format` specifies the format of the time string using gtime, +// eg: Y-m-d H:i:s. +func (v *Var) GTime(format ...string) *gtime.Time { + return gconv.GTime(v.Val(), format...) +} + +// MarshalJSON implements the interface MarshalJSON for json.Marshal. +func (v Var) MarshalJSON() ([]byte, error) { + return json.Marshal(v.Val()) +} + +// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. +func (v *Var) UnmarshalJSON(b []byte) error { + var i interface{} + if err := json.UnmarshalUseNumber(b, &i); err != nil { + return err + } + v.Set(i) + return nil +} + +// UnmarshalValue is an interface implement which sets any type of value for Var. +func (v *Var) UnmarshalValue(value interface{}) error { + v.Set(value) + return nil +} + +// DeepCopy implements interface for deep copy of current type. +func (v *Var) DeepCopy() interface{} { + if v == nil { + return nil + } + return New(deepcopy.Copy(v.Val()), v.safe) +} diff --git a/vendor/github.com/gogf/gf/v2/container/gvar/gvar_is.go b/vendor/github.com/gogf/gf/v2/container/gvar/gvar_is.go new file mode 100644 index 00000000..497996cd --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gvar/gvar_is.go @@ -0,0 +1,51 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gvar + +import ( + "github.com/gogf/gf/v2/internal/utils" +) + +// IsNil checks whether `v` is nil. +func (v *Var) IsNil() bool { + return utils.IsNil(v.Val()) +} + +// IsEmpty checks whether `v` is empty. +func (v *Var) IsEmpty() bool { + return utils.IsEmpty(v.Val()) +} + +// IsInt checks whether `v` is type of int. +func (v *Var) IsInt() bool { + return utils.IsInt(v.Val()) +} + +// IsUint checks whether `v` is type of uint. +func (v *Var) IsUint() bool { + return utils.IsUint(v.Val()) +} + +// IsFloat checks whether `v` is type of float. +func (v *Var) IsFloat() bool { + return utils.IsFloat(v.Val()) +} + +// IsSlice checks whether `v` is type of slice. +func (v *Var) IsSlice() bool { + return utils.IsSlice(v.Val()) +} + +// IsMap checks whether `v` is type of map. +func (v *Var) IsMap() bool { + return utils.IsMap(v.Val()) +} + +// IsStruct checks whether `v` is type of struct. +func (v *Var) IsStruct() bool { + return utils.IsStruct(v.Val()) +} diff --git a/vendor/github.com/gogf/gf/v2/container/gvar/gvar_list.go b/vendor/github.com/gogf/gf/v2/container/gvar/gvar_list.go new file mode 100644 index 00000000..1f24bca8 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gvar/gvar_list.go @@ -0,0 +1,25 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gvar + +import ( + "github.com/gogf/gf/v2/util/gutil" +) + +// ListItemValues retrieves and returns the elements of all item struct/map with key `key`. +// Note that the parameter `list` should be type of slice which contains elements of map or struct, +// or else it returns an empty slice. +func (v *Var) ListItemValues(key interface{}) (values []interface{}) { + return gutil.ListItemValues(v.Val(), key) +} + +// ListItemValuesUnique retrieves and returns the unique elements of all struct/map with key `key`. +// Note that the parameter `list` should be type of slice which contains elements of map or struct, +// or else it returns an empty slice. +func (v *Var) ListItemValuesUnique(key string) []interface{} { + return gutil.ListItemValuesUnique(v.Val(), key) +} diff --git a/vendor/github.com/gogf/gf/v2/container/gvar/gvar_map.go b/vendor/github.com/gogf/gf/v2/container/gvar/gvar_map.go new file mode 100644 index 00000000..268d9f14 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gvar/gvar_map.go @@ -0,0 +1,91 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gvar + +import "github.com/gogf/gf/v2/util/gconv" + +// Map converts and returns `v` as map[string]interface{}. +func (v *Var) Map(tags ...string) map[string]interface{} { + return gconv.Map(v.Val(), tags...) +} + +// MapStrAny is like function Map, but implements the interface of MapStrAny. +func (v *Var) MapStrAny() map[string]interface{} { + return v.Map() +} + +// MapStrStr converts and returns `v` as map[string]string. +func (v *Var) MapStrStr(tags ...string) map[string]string { + return gconv.MapStrStr(v.Val(), tags...) +} + +// MapStrVar converts and returns `v` as map[string]Var. +func (v *Var) MapStrVar(tags ...string) map[string]*Var { + m := v.Map(tags...) + if len(m) > 0 { + vMap := make(map[string]*Var, len(m)) + for k, v := range m { + vMap[k] = New(v) + } + return vMap + } + return nil +} + +// MapDeep converts and returns `v` as map[string]interface{} recursively. +func (v *Var) MapDeep(tags ...string) map[string]interface{} { + return gconv.MapDeep(v.Val(), tags...) +} + +// MapStrStrDeep converts and returns `v` as map[string]string recursively. +func (v *Var) MapStrStrDeep(tags ...string) map[string]string { + return gconv.MapStrStrDeep(v.Val(), tags...) +} + +// MapStrVarDeep converts and returns `v` as map[string]*Var recursively. +func (v *Var) MapStrVarDeep(tags ...string) map[string]*Var { + m := v.MapDeep(tags...) + if len(m) > 0 { + vMap := make(map[string]*Var, len(m)) + for k, v := range m { + vMap[k] = New(v) + } + return vMap + } + return nil +} + +// Maps converts and returns `v` as map[string]string. +// See gconv.Maps. +func (v *Var) Maps(tags ...string) []map[string]interface{} { + return gconv.Maps(v.Val(), tags...) +} + +// MapsDeep converts `value` to []map[string]interface{} recursively. +// See gconv.MapsDeep. +func (v *Var) MapsDeep(tags ...string) []map[string]interface{} { + return gconv.MapsDeep(v.Val(), tags...) +} + +// MapToMap converts any map type variable `params` to another map type variable `pointer`. +// See gconv.MapToMap. +func (v *Var) MapToMap(pointer interface{}, mapping ...map[string]string) (err error) { + return gconv.MapToMap(v.Val(), pointer, mapping...) +} + +// MapToMaps converts any map type variable `params` to another map type variable `pointer`. +// See gconv.MapToMaps. +func (v *Var) MapToMaps(pointer interface{}, mapping ...map[string]string) (err error) { + return gconv.MapToMaps(v.Val(), pointer, mapping...) +} + +// MapToMapsDeep converts any map type variable `params` to another map type variable +// `pointer` recursively. +// See gconv.MapToMapsDeep. +func (v *Var) MapToMapsDeep(pointer interface{}, mapping ...map[string]string) (err error) { + return gconv.MapToMaps(v.Val(), pointer, mapping...) +} diff --git a/vendor/github.com/gogf/gf/v2/container/gvar/gvar_scan.go b/vendor/github.com/gogf/gf/v2/container/gvar/gvar_scan.go new file mode 100644 index 00000000..469005b5 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gvar/gvar_scan.go @@ -0,0 +1,19 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gvar + +import ( + "github.com/gogf/gf/v2/util/gconv" +) + +// Scan automatically checks the type of `pointer` and converts `params` to `pointer`. It supports `pointer` +// with type of `*map/*[]map/*[]*map/*struct/**struct/*[]struct/*[]*struct` for converting. +// +// See gconv.Scan. +func (v *Var) Scan(pointer interface{}, mapping ...map[string]string) error { + return gconv.Scan(v.Val(), pointer, mapping...) +} diff --git a/vendor/github.com/gogf/gf/v2/container/gvar/gvar_slice.go b/vendor/github.com/gogf/gf/v2/container/gvar/gvar_slice.go new file mode 100644 index 00000000..02a61aa2 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gvar/gvar_slice.go @@ -0,0 +1,77 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gvar + +import "github.com/gogf/gf/v2/util/gconv" + +// Ints converts and returns `v` as []int. +func (v *Var) Ints() []int { + return gconv.Ints(v.Val()) +} + +// Int64s converts and returns `v` as []int64. +func (v *Var) Int64s() []int64 { + return gconv.Int64s(v.Val()) +} + +// Uints converts and returns `v` as []uint. +func (v *Var) Uints() []uint { + return gconv.Uints(v.Val()) +} + +// Uint64s converts and returns `v` as []uint64. +func (v *Var) Uint64s() []uint64 { + return gconv.Uint64s(v.Val()) +} + +// Floats is alias of Float64s. +func (v *Var) Floats() []float64 { + return gconv.Floats(v.Val()) +} + +// Float32s converts and returns `v` as []float32. +func (v *Var) Float32s() []float32 { + return gconv.Float32s(v.Val()) +} + +// Float64s converts and returns `v` as []float64. +func (v *Var) Float64s() []float64 { + return gconv.Float64s(v.Val()) +} + +// Strings converts and returns `v` as []string. +func (v *Var) Strings() []string { + return gconv.Strings(v.Val()) +} + +// Interfaces converts and returns `v` as []interfaces{}. +func (v *Var) Interfaces() []interface{} { + return gconv.Interfaces(v.Val()) +} + +// Slice is alias of Interfaces. +func (v *Var) Slice() []interface{} { + return v.Interfaces() +} + +// Array is alias of Interfaces. +func (v *Var) Array() []interface{} { + return v.Interfaces() +} + +// Vars converts and returns `v` as []Var. +func (v *Var) Vars() []*Var { + array := gconv.Interfaces(v.Val()) + if len(array) == 0 { + return nil + } + vars := make([]*Var, len(array)) + for k, v := range array { + vars[k] = New(v) + } + return vars +} diff --git a/vendor/github.com/gogf/gf/v2/container/gvar/gvar_struct.go b/vendor/github.com/gogf/gf/v2/container/gvar/gvar_struct.go new file mode 100644 index 00000000..30ca794b --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gvar/gvar_struct.go @@ -0,0 +1,23 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gvar + +import ( + "github.com/gogf/gf/v2/util/gconv" +) + +// Struct maps value of `v` to `pointer`. +// The parameter `pointer` should be a pointer to a struct instance. +// The parameter `mapping` is used to specify the key-to-attribute mapping rules. +func (v *Var) Struct(pointer interface{}, mapping ...map[string]string) error { + return gconv.Struct(v.Val(), pointer, mapping...) +} + +// Structs converts and returns `v` as given struct slice. +func (v *Var) Structs(pointer interface{}, mapping ...map[string]string) error { + return gconv.Structs(v.Val(), pointer, mapping...) +} diff --git a/vendor/github.com/gogf/gf/v2/container/gvar/gvar_vars.go b/vendor/github.com/gogf/gf/v2/container/gvar/gvar_vars.go new file mode 100644 index 00000000..f566a782 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/container/gvar/gvar_vars.go @@ -0,0 +1,131 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gvar + +import ( + "github.com/gogf/gf/v2/util/gconv" +) + +// Vars is a slice of *Var. +type Vars []*Var + +// Strings converts and returns `vs` as []string. +func (vs Vars) Strings() (s []string) { + for _, v := range vs { + s = append(s, v.String()) + } + return s +} + +// Interfaces converts and returns `vs` as []interface{}. +func (vs Vars) Interfaces() (s []interface{}) { + for _, v := range vs { + s = append(s, v.Val()) + } + return s +} + +// Float32s converts and returns `vs` as []float32. +func (vs Vars) Float32s() (s []float32) { + for _, v := range vs { + s = append(s, v.Float32()) + } + return s +} + +// Float64s converts and returns `vs` as []float64. +func (vs Vars) Float64s() (s []float64) { + for _, v := range vs { + s = append(s, v.Float64()) + } + return s +} + +// Ints converts and returns `vs` as []Int. +func (vs Vars) Ints() (s []int) { + for _, v := range vs { + s = append(s, v.Int()) + } + return s +} + +// Int8s converts and returns `vs` as []int8. +func (vs Vars) Int8s() (s []int8) { + for _, v := range vs { + s = append(s, v.Int8()) + } + return s +} + +// Int16s converts and returns `vs` as []int16. +func (vs Vars) Int16s() (s []int16) { + for _, v := range vs { + s = append(s, v.Int16()) + } + return s +} + +// Int32s converts and returns `vs` as []int32. +func (vs Vars) Int32s() (s []int32) { + for _, v := range vs { + s = append(s, v.Int32()) + } + return s +} + +// Int64s converts and returns `vs` as []int64. +func (vs Vars) Int64s() (s []int64) { + for _, v := range vs { + s = append(s, v.Int64()) + } + return s +} + +// Uints converts and returns `vs` as []uint. +func (vs Vars) Uints() (s []uint) { + for _, v := range vs { + s = append(s, v.Uint()) + } + return s +} + +// Uint8s converts and returns `vs` as []uint8. +func (vs Vars) Uint8s() (s []uint8) { + for _, v := range vs { + s = append(s, v.Uint8()) + } + return s +} + +// Uint16s converts and returns `vs` as []uint16. +func (vs Vars) Uint16s() (s []uint16) { + for _, v := range vs { + s = append(s, v.Uint16()) + } + return s +} + +// Uint32s converts and returns `vs` as []uint32. +func (vs Vars) Uint32s() (s []uint32) { + for _, v := range vs { + s = append(s, v.Uint32()) + } + return s +} + +// Uint64s converts and returns `vs` as []uint64. +func (vs Vars) Uint64s() (s []uint64) { + for _, v := range vs { + s = append(s, v.Uint64()) + } + return s +} + +// Scan converts `vs` to []struct/[]*struct. +func (vs Vars) Scan(pointer interface{}, mapping ...map[string]string) error { + return gconv.Structs(vs.Interfaces(), pointer, mapping...) +} diff --git a/vendor/github.com/gogf/gf/v2/database/gredis/gredis.go b/vendor/github.com/gogf/gf/v2/database/gredis/gredis.go new file mode 100644 index 00000000..82bf15e6 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/database/gredis/gredis.go @@ -0,0 +1,78 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package gredis provides convenient client for redis server. +// +// Redis Client. +// +// Redis Commands Official: https://redis.io/commands +// +// Redis Chinese Documentation: http://redisdoc.com/ +package gredis + +import ( + "github.com/gogf/gf/v2/errors/gcode" + "github.com/gogf/gf/v2/errors/gerror" +) + +// AdapterFunc is the function creating redis adapter. +type AdapterFunc func(config *Config) Adapter + +var ( + // defaultAdapterFunc is the default adapter function creating redis adapter. + defaultAdapterFunc AdapterFunc = func(config *Config) Adapter { + return nil + } +) + +// New creates and returns a redis client. +// It creates a default redis adapter of go-redis. +func New(config ...*Config) (*Redis, error) { + var ( + usedConfig *Config + usedAdapter Adapter + ) + if len(config) > 0 && config[0] != nil { + // Redis client with go redis implements adapter from given configuration. + usedConfig = config[0] + usedAdapter = defaultAdapterFunc(config[0]) + } else if configFromGlobal, ok := GetConfig(); ok { + // Redis client with go redis implements adapter from package configuration. + usedConfig = configFromGlobal + usedAdapter = defaultAdapterFunc(configFromGlobal) + } + if usedConfig == nil { + return nil, gerror.NewCode( + gcode.CodeInvalidConfiguration, + `no configuration found for creating Redis client`, + ) + } + if usedAdapter == nil { + return nil, gerror.NewCode( + gcode.CodeNecessaryPackageNotImport, + errorNilAdapter, + ) + } + redis := &Redis{ + config: config[0], + localAdapter: usedAdapter, + } + return redis.initGroup(), nil +} + +// NewWithAdapter creates and returns a redis client with given adapter. +func NewWithAdapter(adapter Adapter) (*Redis, error) { + if adapter == nil { + return nil, gerror.NewCodef(gcode.CodeInvalidParameter, `adapter cannot be nil`) + } + redis := &Redis{localAdapter: adapter} + return redis.initGroup(), nil +} + +// RegisterAdapterFunc registers default function creating redis adapter. +func RegisterAdapterFunc(adapterFunc AdapterFunc) { + defaultAdapterFunc = adapterFunc +} diff --git a/vendor/github.com/gogf/gf/v2/database/gredis/gredis_adapter.go b/vendor/github.com/gogf/gf/v2/database/gredis/gredis_adapter.go new file mode 100644 index 00000000..ad63bdbe --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/database/gredis/gredis_adapter.go @@ -0,0 +1,78 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gredis + +import ( + "context" + + "github.com/gogf/gf/v2/container/gvar" +) + +// Adapter is an interface for universal redis operations. +type Adapter interface { + AdapterGroup + + // Do send a command to the server and returns the received reply. + // It uses json.Marshal for struct/slice/map type values before committing them to redis. + Do(ctx context.Context, command string, args ...interface{}) (*gvar.Var, error) + + // Conn retrieves and returns a connection object for continuous operations. + // Note that you should call Close function manually if you do not use this connection any further. + Conn(ctx context.Context) (conn Conn, err error) + + // Close closes current redis client, closes its connection pool and releases all its related resources. + Close(ctx context.Context) (err error) +} + +// Conn is an interface of a connection from universal redis client. +type Conn interface { + ConnCommand + + // Do send a command to the server and returns the received reply. + // It uses json.Marshal for struct/slice/map type values before committing them to redis. + Do(ctx context.Context, command string, args ...interface{}) (result *gvar.Var, err error) + + // Close puts the connection back to connection pool. + Close(ctx context.Context) (err error) +} + +// AdapterGroup is an interface managing group operations for redis. +type AdapterGroup interface { + GroupGeneric() IGroupGeneric + GroupHash() IGroupHash + GroupList() IGroupList + GroupPubSub() IGroupPubSub + GroupScript() IGroupScript + GroupSet() IGroupSet + GroupSortedSet() IGroupSortedSet + GroupString() IGroupString +} + +// ConnCommand is an interface managing some operations bound to certain connection. +type ConnCommand interface { + // Subscribe subscribes the client to the specified channels. + // https://redis.io/commands/subscribe/ + Subscribe(ctx context.Context, channel string, channels ...string) ([]*Subscription, error) + + // PSubscribe subscribes the client to the given patterns. + // + // Supported glob-style patterns: + // - h?llo subscribes to hello, hallo and hxllo + // - h*llo subscribes to hllo and heeeello + // - h[ae]llo subscribes to hello and hallo, but not hillo + // + // Use \ to escape special characters if you want to match them verbatim. + // + // https://redis.io/commands/psubscribe/ + PSubscribe(ctx context.Context, pattern string, patterns ...string) ([]*Subscription, error) + + // ReceiveMessage receives a single message of subscription from the Redis server. + ReceiveMessage(ctx context.Context) (*Message, error) + + // Receive receives a single reply as gvar.Var from the Redis server. + Receive(ctx context.Context) (result *gvar.Var, err error) +} diff --git a/vendor/github.com/gogf/gf/v2/database/gredis/gredis_config.go b/vendor/github.com/gogf/gf/v2/database/gredis/gredis_config.go new file mode 100644 index 00000000..6a8f7d23 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/database/gredis/gredis_config.go @@ -0,0 +1,134 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gredis + +import ( + "context" + "crypto/tls" + "time" + + "github.com/gogf/gf/v2/container/gmap" + "github.com/gogf/gf/v2/errors/gcode" + "github.com/gogf/gf/v2/errors/gerror" + "github.com/gogf/gf/v2/internal/intlog" + "github.com/gogf/gf/v2/util/gconv" +) + +// Config is redis configuration. +type Config struct { + // Address It supports single and cluster redis server. Multiple addresses joined with char ','. Eg: 192.168.1.1:6379, 192.168.1.2:6379. + Address string `json:"address"` + Db int `json:"db"` // Redis db. + Pass string `json:"pass"` // Password for AUTH. + MinIdle int `json:"minIdle"` // Minimum number of connections allowed to be idle (default is 0) + MaxIdle int `json:"maxIdle"` // Maximum number of connections allowed to be idle (default is 10) + MaxActive int `json:"maxActive"` // Maximum number of connections limit (default is 0 means no limit). + MaxConnLifetime time.Duration `json:"maxConnLifetime"` // Maximum lifetime of the connection (default is 30 seconds, not allowed to be set to 0) + IdleTimeout time.Duration `json:"idleTimeout"` // Maximum idle time for connection (default is 10 seconds, not allowed to be set to 0) + WaitTimeout time.Duration `json:"waitTimeout"` // Timed out duration waiting to get a connection from the connection pool. + DialTimeout time.Duration `json:"dialTimeout"` // Dial connection timeout for TCP. + ReadTimeout time.Duration `json:"readTimeout"` // Read timeout for TCP. DO NOT set it if not necessary. + WriteTimeout time.Duration `json:"writeTimeout"` // Write timeout for TCP. + MasterName string `json:"masterName"` // Used in Redis Sentinel mode. + TLS bool `json:"tls"` // Specifies whether TLS should be used when connecting to the server. + TLSSkipVerify bool `json:"tlsSkipVerify"` // Disables server name verification when connecting over TLS. + TLSConfig *tls.Config `json:"-"` // TLS Config to use. When set TLS will be negotiated. + SlaveOnly bool `json:"slaveOnly"` // Route all commands to slave read-only nodes. +} + +const ( + DefaultGroupName = "default" // Default configuration group name. +) + +var ( + // Configuration groups. + localConfigMap = gmap.NewStrAnyMap(true) +) + +// SetConfig sets the global configuration for specified group. +// If `name` is not passed, it sets configuration for the default group name. +func SetConfig(config *Config, name ...string) { + group := DefaultGroupName + if len(name) > 0 { + group = name[0] + } + localConfigMap.Set(group, config) + + intlog.Printf(context.TODO(), `SetConfig for group "%s": %+v`, group, config) +} + +// SetConfigByMap sets the global configuration for specified group with map. +// If `name` is not passed, it sets configuration for the default group name. +func SetConfigByMap(m map[string]interface{}, name ...string) error { + group := DefaultGroupName + if len(name) > 0 { + group = name[0] + } + config, err := ConfigFromMap(m) + if err != nil { + return err + } + localConfigMap.Set(group, config) + return nil +} + +// ConfigFromMap parses and returns config from given map. +func ConfigFromMap(m map[string]interface{}) (config *Config, err error) { + config = &Config{} + if err = gconv.Scan(m, config); err != nil { + err = gerror.NewCodef(gcode.CodeInvalidConfiguration, `invalid redis configuration: %#v`, m) + } + if config.DialTimeout < time.Second { + config.DialTimeout = config.DialTimeout * time.Second + } + if config.WaitTimeout < time.Second { + config.WaitTimeout = config.WaitTimeout * time.Second + } + if config.WriteTimeout < time.Second { + config.WriteTimeout = config.WriteTimeout * time.Second + } + if config.ReadTimeout < time.Second { + config.ReadTimeout = config.ReadTimeout * time.Second + } + if config.IdleTimeout < time.Second { + config.IdleTimeout = config.IdleTimeout * time.Second + } + if config.MaxConnLifetime < time.Second { + config.MaxConnLifetime = config.MaxConnLifetime * time.Second + } + return +} + +// GetConfig returns the global configuration with specified group name. +// If `name` is not passed, it returns configuration of the default group name. +func GetConfig(name ...string) (config *Config, ok bool) { + group := DefaultGroupName + if len(name) > 0 { + group = name[0] + } + if v := localConfigMap.Get(group); v != nil { + return v.(*Config), true + } + return &Config{}, false +} + +// RemoveConfig removes the global configuration with specified group. +// If `name` is not passed, it removes configuration of the default group name. +func RemoveConfig(name ...string) { + group := DefaultGroupName + if len(name) > 0 { + group = name[0] + } + localConfigMap.Remove(group) + + intlog.Printf(context.TODO(), `RemoveConfig: %s`, group) +} + +// ClearConfig removes all configurations of redis. +func ClearConfig() { + localConfigMap.Clear() +} diff --git a/vendor/github.com/gogf/gf/v2/database/gredis/gredis_instance.go b/vendor/github.com/gogf/gf/v2/database/gredis/gredis_instance.go new file mode 100644 index 00000000..2805d557 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/database/gredis/gredis_instance.go @@ -0,0 +1,44 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gredis + +import ( + "context" + + "github.com/gogf/gf/v2/container/gmap" + "github.com/gogf/gf/v2/internal/intlog" +) + +var ( + // localInstances for instance management of redis client. + localInstances = gmap.NewStrAnyMap(true) +) + +// Instance returns an instance of redis client with specified group. +// The `name` param is unnecessary, if `name` is not passed, +// it returns a redis instance with default configuration group. +func Instance(name ...string) *Redis { + group := DefaultGroupName + if len(name) > 0 && name[0] != "" { + group = name[0] + } + v := localInstances.GetOrSetFuncLock(group, func() interface{} { + if config, ok := GetConfig(group); ok { + r, err := New(config) + if err != nil { + intlog.Errorf(context.TODO(), `%+v`, err) + return nil + } + return r + } + return nil + }) + if v != nil { + return v.(*Redis) + } + return nil +} diff --git a/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis.go b/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis.go new file mode 100644 index 00000000..04d72b47 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis.go @@ -0,0 +1,137 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gredis + +import ( + "context" + + "github.com/gogf/gf/v2/container/gvar" + "github.com/gogf/gf/v2/errors/gcode" + "github.com/gogf/gf/v2/errors/gerror" + "github.com/gogf/gf/v2/text/gstr" +) + +// Redis client. +type Redis struct { + config *Config + localAdapter + localGroup +} + +type ( + localGroup struct { + localGroupGeneric + localGroupHash + localGroupList + localGroupPubSub + localGroupScript + localGroupSet + localGroupSortedSet + localGroupString + } + localAdapter = Adapter + localGroupGeneric = IGroupGeneric + localGroupHash = IGroupHash + localGroupList = IGroupList + localGroupPubSub = IGroupPubSub + localGroupScript = IGroupScript + localGroupSet = IGroupSet + localGroupSortedSet = IGroupSortedSet + localGroupString = IGroupString +) + +const ( + errorNilRedis = `the Redis object is nil` +) + +var ( + errorNilAdapter = gstr.Trim(gstr.Replace(` +redis adapter is not set, missing configuration or adapter register? +possible reference: https://github.com/gogf/gf/tree/master/contrib/nosql/redis +`, "\n", "")) +) + +// initGroup initializes the group object of redis. +func (r *Redis) initGroup() *Redis { + r.localGroup = localGroup{ + localGroupGeneric: r.localAdapter.GroupGeneric(), + localGroupHash: r.localAdapter.GroupHash(), + localGroupList: r.localAdapter.GroupList(), + localGroupPubSub: r.localAdapter.GroupPubSub(), + localGroupScript: r.localAdapter.GroupScript(), + localGroupSet: r.localAdapter.GroupSet(), + localGroupSortedSet: r.localAdapter.GroupSortedSet(), + localGroupString: r.localAdapter.GroupString(), + } + return r +} + +// SetAdapter changes the underlying adapter with custom adapter for current redis client. +func (r *Redis) SetAdapter(adapter Adapter) { + if r == nil { + panic(gerror.NewCode(gcode.CodeInvalidParameter, errorNilRedis)) + } + r.localAdapter = adapter +} + +// GetAdapter returns the adapter that is set in current redis client. +func (r *Redis) GetAdapter() Adapter { + if r == nil { + return nil + } + return r.localAdapter +} + +// Conn retrieves and returns a connection object for continuous operations. +// Note that you should call Close function manually if you do not use this connection any further. +func (r *Redis) Conn(ctx context.Context) (Conn, error) { + if r == nil { + return nil, gerror.NewCode(gcode.CodeInvalidParameter, errorNilRedis) + } + if r.localAdapter == nil { + return nil, gerror.NewCode(gcode.CodeNecessaryPackageNotImport, errorNilAdapter) + } + return r.localAdapter.Conn(ctx) +} + +// Do send a command to the server and returns the received reply. +// It uses json.Marshal for struct/slice/map type values before committing them to redis. +func (r *Redis) Do(ctx context.Context, command string, args ...interface{}) (*gvar.Var, error) { + if r == nil { + return nil, gerror.NewCode(gcode.CodeInvalidParameter, errorNilRedis) + } + if r.localAdapter == nil { + return nil, gerror.NewCodef(gcode.CodeMissingConfiguration, errorNilAdapter) + } + return r.localAdapter.Do(ctx, command, args...) +} + +// MustConn performs as function Conn, but it panics if any error occurs internally. +func (r *Redis) MustConn(ctx context.Context) Conn { + c, err := r.Conn(ctx) + if err != nil { + panic(err) + } + return c +} + +// MustDo performs as function Do, but it panics if any error occurs internally. +func (r *Redis) MustDo(ctx context.Context, command string, args ...interface{}) *gvar.Var { + v, err := r.Do(ctx, command, args...) + if err != nil { + panic(err) + } + return v +} + +// Close closes current redis client, closes its connection pool and releases all its related resources. +func (r *Redis) Close(ctx context.Context) error { + if r == nil || r.localAdapter == nil { + return nil + } + return r.localAdapter.Close(ctx) +} diff --git a/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_generic.go b/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_generic.go new file mode 100644 index 00000000..fc5ace89 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_generic.go @@ -0,0 +1,62 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gredis + +import ( + "context" + "time" + + "github.com/gogf/gf/v2/container/gvar" +) + +// IGroupGeneric manages generic redis operations. +// Implements see redis.GroupGeneric. +type IGroupGeneric interface { + Copy(ctx context.Context, source, destination string, option ...CopyOption) (int64, error) + Exists(ctx context.Context, keys ...string) (int64, error) + Type(ctx context.Context, key string) (string, error) + Unlink(ctx context.Context, keys ...string) (int64, error) + Rename(ctx context.Context, key, newKey string) error + RenameNX(ctx context.Context, key, newKey string) (int64, error) + Move(ctx context.Context, key string, db int) (int64, error) + Del(ctx context.Context, keys ...string) (int64, error) + RandomKey(ctx context.Context) (string, error) + DBSize(ctx context.Context) (int64, error) + Keys(ctx context.Context, pattern string) ([]string, error) + FlushDB(ctx context.Context, option ...FlushOp) error + FlushAll(ctx context.Context, option ...FlushOp) error + Expire(ctx context.Context, key string, seconds int64, option ...ExpireOption) (int64, error) + ExpireAt(ctx context.Context, key string, time time.Time, option ...ExpireOption) (int64, error) + ExpireTime(ctx context.Context, key string) (*gvar.Var, error) + TTL(ctx context.Context, key string) (int64, error) + Persist(ctx context.Context, key string) (int64, error) + PExpire(ctx context.Context, key string, milliseconds int64, option ...ExpireOption) (int64, error) + PExpireAt(ctx context.Context, key string, time time.Time, option ...ExpireOption) (int64, error) + PExpireTime(ctx context.Context, key string) (*gvar.Var, error) + PTTL(ctx context.Context, key string) (int64, error) +} + +// CopyOption provides options for function Copy. +type CopyOption struct { + DB int // DB option allows specifying an alternative logical database index for the destination key. + REPLACE bool // REPLACE option removes the destination key before copying the value to it. +} + +type FlushOp string + +const ( + FlushAsync FlushOp = "ASYNC" // ASYNC: flushes the databases asynchronously + FlushSync FlushOp = "SYNC" // SYNC: flushes the databases synchronously +) + +// ExpireOption provides options for function Expire. +type ExpireOption struct { + NX bool // NX -- Set expiry only when the key has no expiry + XX bool // XX -- Set expiry only when the key has an existing expiry + GT bool // GT -- Set expiry only when the new expiry is greater than current one + LT bool // LT -- Set expiry only when the new expiry is less than current one +} diff --git a/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_hash.go b/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_hash.go new file mode 100644 index 00000000..ab398447 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_hash.go @@ -0,0 +1,32 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gredis + +import ( + "context" + + "github.com/gogf/gf/v2/container/gvar" +) + +// IGroupHash manages redis hash operations. +// Implements see redis.GroupHash. +type IGroupHash interface { + HSet(ctx context.Context, key string, fields map[string]interface{}) (int64, error) + HSetNX(ctx context.Context, key, field string, value interface{}) (int64, error) + HGet(ctx context.Context, key, field string) (*gvar.Var, error) + HStrLen(ctx context.Context, key, field string) (int64, error) + HExists(ctx context.Context, key, field string) (int64, error) + HDel(ctx context.Context, key string, fields ...string) (int64, error) + HLen(ctx context.Context, key string) (int64, error) + HIncrBy(ctx context.Context, key, field string, increment int64) (int64, error) + HIncrByFloat(ctx context.Context, key, field string, increment float64) (float64, error) + HMSet(ctx context.Context, key string, fields map[string]interface{}) error + HMGet(ctx context.Context, key string, fields ...string) (gvar.Vars, error) + HKeys(ctx context.Context, key string) ([]string, error) + HVals(ctx context.Context, key string) (gvar.Vars, error) + HGetAll(ctx context.Context, key string) (*gvar.Var, error) +} diff --git a/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_list.go b/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_list.go new file mode 100644 index 00000000..fa989170 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_list.go @@ -0,0 +1,43 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gredis + +import ( + "context" + + "github.com/gogf/gf/v2/container/gvar" +) + +// IGroupList manages redis list operations. +// Implements see redis.GroupList. +type IGroupList interface { + LPush(ctx context.Context, key string, values ...interface{}) (int64, error) + LPushX(ctx context.Context, key string, element interface{}, elements ...interface{}) (int64, error) + RPush(ctx context.Context, key string, values ...interface{}) (int64, error) + RPushX(ctx context.Context, key string, value interface{}) (int64, error) + LPop(ctx context.Context, key string, count ...int) (*gvar.Var, error) + RPop(ctx context.Context, key string, count ...int) (*gvar.Var, error) + LRem(ctx context.Context, key string, count int64, value interface{}) (int64, error) + LLen(ctx context.Context, key string) (int64, error) + LIndex(ctx context.Context, key string, index int64) (*gvar.Var, error) + LInsert(ctx context.Context, key string, op LInsertOp, pivot, value interface{}) (int64, error) + LSet(ctx context.Context, key string, index int64, value interface{}) (*gvar.Var, error) + LRange(ctx context.Context, key string, start, stop int64) (gvar.Vars, error) + LTrim(ctx context.Context, key string, start, stop int64) error + BLPop(ctx context.Context, timeout int64, keys ...string) (gvar.Vars, error) + BRPop(ctx context.Context, timeout int64, keys ...string) (gvar.Vars, error) + RPopLPush(ctx context.Context, source, destination string) (*gvar.Var, error) + BRPopLPush(ctx context.Context, source, destination string, timeout int64) (*gvar.Var, error) +} + +// LInsertOp defines the operation name for function LInsert. +type LInsertOp string + +const ( + LInsertBefore LInsertOp = "BEFORE" + LInsertAfter LInsertOp = "AFTER" +) diff --git a/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_pubsub.go b/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_pubsub.go new file mode 100644 index 00000000..2e4bd553 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_pubsub.go @@ -0,0 +1,40 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gredis + +import ( + "context" + "fmt" +) + +// IGroupPubSub manages redis pub/sub operations. +// Implements see redis.GroupPubSub. +type IGroupPubSub interface { + Publish(ctx context.Context, channel string, message interface{}) (int64, error) + Subscribe(ctx context.Context, channel string, channels ...string) (Conn, []*Subscription, error) + PSubscribe(ctx context.Context, pattern string, patterns ...string) (Conn, []*Subscription, error) +} + +// Message received as result of a PUBLISH command issued by another client. +type Message struct { + Channel string + Pattern string + Payload string + PayloadSlice []string +} + +// Subscription received after a successful subscription to channel. +type Subscription struct { + Kind string // Can be "subscribe", "unsubscribe", "psubscribe" or "punsubscribe". + Channel string // Channel name we have subscribed to. + Count int // Number of channels we are currently subscribed to. +} + +// String converts current object to a readable string. +func (m *Subscription) String() string { + return fmt.Sprintf("%s: %s", m.Kind, m.Channel) +} diff --git a/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_script.go b/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_script.go new file mode 100644 index 00000000..e2ef1fc4 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_script.go @@ -0,0 +1,30 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gredis + +import ( + "context" + + "github.com/gogf/gf/v2/container/gvar" +) + +// IGroupScript manages redis script operations. +// Implements see redis.GroupScript. +type IGroupScript interface { + Eval(ctx context.Context, script string, numKeys int64, keys []string, args []interface{}) (*gvar.Var, error) + EvalSha(ctx context.Context, sha1 string, numKeys int64, keys []string, args []interface{}) (*gvar.Var, error) + ScriptLoad(ctx context.Context, script string) (string, error) + ScriptExists(ctx context.Context, sha1 string, sha1s ...string) (map[string]bool, error) + ScriptFlush(ctx context.Context, option ...ScriptFlushOption) error + ScriptKill(ctx context.Context) error +} + +// ScriptFlushOption provides options for function ScriptFlush. +type ScriptFlushOption struct { + SYNC bool // SYNC flushes the cache synchronously. + ASYNC bool // ASYNC flushes the cache asynchronously. +} diff --git a/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_set.go b/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_set.go new file mode 100644 index 00000000..27e04fbf --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_set.go @@ -0,0 +1,33 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gredis + +import ( + "context" + + "github.com/gogf/gf/v2/container/gvar" +) + +// IGroupSet manages redis set operations. +// Implements see redis.GroupSet. +type IGroupSet interface { + SAdd(ctx context.Context, key string, member interface{}, members ...interface{}) (int64, error) + SIsMember(ctx context.Context, key string, member interface{}) (int64, error) + SPop(ctx context.Context, key string, count ...int) (*gvar.Var, error) + SRandMember(ctx context.Context, key string, count ...int) (*gvar.Var, error) + SRem(ctx context.Context, key string, member interface{}, members ...interface{}) (int64, error) + SMove(ctx context.Context, source, destination string, member interface{}) (int64, error) + SCard(ctx context.Context, key string) (int64, error) + SMembers(ctx context.Context, key string) (gvar.Vars, error) + SMIsMember(ctx context.Context, key, member interface{}, members ...interface{}) ([]int, error) + SInter(ctx context.Context, key string, keys ...string) (gvar.Vars, error) + SInterStore(ctx context.Context, destination string, key string, keys ...string) (int64, error) + SUnion(ctx context.Context, key string, keys ...string) (gvar.Vars, error) + SUnionStore(ctx context.Context, destination, key string, keys ...string) (int64, error) + SDiff(ctx context.Context, key string, keys ...string) (gvar.Vars, error) + SDiffStore(ctx context.Context, destination string, key string, keys ...string) (int64, error) +} diff --git a/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_sorted_set.go b/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_sorted_set.go new file mode 100644 index 00000000..83367136 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_sorted_set.go @@ -0,0 +1,85 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gredis + +import ( + "context" + + "github.com/gogf/gf/v2/container/gvar" +) + +// IGroupSortedSet manages redis sorted set operations. +// Implements see redis.GroupSortedSet. +type IGroupSortedSet interface { + ZAdd(ctx context.Context, key string, option *ZAddOption, member ZAddMember, members ...ZAddMember) (*gvar.Var, error) + ZScore(ctx context.Context, key string, member interface{}) (float64, error) + ZIncrBy(ctx context.Context, key string, increment float64, member interface{}) (float64, error) + ZCard(ctx context.Context, key string) (int64, error) + ZCount(ctx context.Context, key string, min, max string) (int64, error) + ZRange(ctx context.Context, key string, start, stop int64, option ...ZRangeOption) (gvar.Vars, error) + ZRevRange(ctx context.Context, key string, start, stop int64, option ...ZRevRangeOption) (*gvar.Var, error) + ZRank(ctx context.Context, key string, member interface{}) (int64, error) + ZRevRank(ctx context.Context, key string, member interface{}) (int64, error) + ZRem(ctx context.Context, key string, member interface{}, members ...interface{}) (int64, error) + ZRemRangeByRank(ctx context.Context, key string, start, stop int64) (int64, error) + ZRemRangeByScore(ctx context.Context, key string, min, max string) (int64, error) + ZRemRangeByLex(ctx context.Context, key string, min, max string) (int64, error) + ZLexCount(ctx context.Context, key, min, max string) (int64, error) +} + +// ZAddOption provides options for function ZAdd. +type ZAddOption struct { + XX bool // Only update elements that already exist. Don't add new elements. + NX bool // Only add new elements. Don't update already existing elements. + // Only update existing elements if the new score is less than the current score. + // This flag doesn't prevent adding new elements. + LT bool + + // Only update existing elements if the new score is greater than the current score. + // This flag doesn't prevent adding new elements. + GT bool + + // Modify the return value from the number of new elements added, to the total number of elements changed (CH is an abbreviation of changed). + // Changed elements are new elements added and elements already existing for which the score was updated. + // So elements specified in the command line having the same score as they had in the past are not counted. + // Note: normally the return value of ZAdd only counts the number of new elements added. + CH bool + + // When this option is specified ZAdd acts like ZIncrBy. Only one score-element pair can be specified in this mode. + INCR bool +} + +// ZAddMember is element struct for set. +type ZAddMember struct { + Score float64 + Member interface{} +} + +// ZRangeOption provides extra option for ZRange function. +type ZRangeOption struct { + ByScore bool + ByLex bool + // The optional REV argument reverses the ordering, so elements are ordered from highest to lowest score, + // and score ties are resolved by reverse lexicographical ordering. + Rev bool + Limit *ZRangeOptionLimit + // The optional WithScores argument supplements the command's reply with the scores of elements returned. + WithScores bool +} + +// ZRangeOptionLimit provides LIMIT argument for ZRange function. +// The optional LIMIT argument can be used to obtain a sub-range from the matching elements +// (similar to SELECT LIMIT offset, count in SQL). A negative `Count` returns all elements from the `Offset`. +type ZRangeOptionLimit struct { + Offset *int + Count *int +} + +// ZRevRangeOption provides options for function ZRevRange. +type ZRevRangeOption struct { + WithScores bool +} diff --git a/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_string.go b/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_string.go new file mode 100644 index 00000000..88e82704 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/database/gredis/gredis_redis_group_string.go @@ -0,0 +1,63 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gredis + +import ( + "context" + + "github.com/gogf/gf/v2/container/gvar" +) + +// IGroupString manages redis string operations. +// Implements see redis.GroupString. +type IGroupString interface { + Set(ctx context.Context, key string, value interface{}, option ...SetOption) (*gvar.Var, error) + SetNX(ctx context.Context, key string, value interface{}) (bool, error) + SetEX(ctx context.Context, key string, value interface{}, ttlInSeconds int64) error + Get(ctx context.Context, key string) (*gvar.Var, error) + GetDel(ctx context.Context, key string) (*gvar.Var, error) + GetEX(ctx context.Context, key string, option ...GetEXOption) (*gvar.Var, error) + GetSet(ctx context.Context, key string, value interface{}) (*gvar.Var, error) + StrLen(ctx context.Context, key string) (int64, error) + Append(ctx context.Context, key string, value string) (int64, error) + SetRange(ctx context.Context, key string, offset int64, value string) (int64, error) + GetRange(ctx context.Context, key string, start, end int64) (string, error) + Incr(ctx context.Context, key string) (int64, error) + IncrBy(ctx context.Context, key string, increment int64) (int64, error) + IncrByFloat(ctx context.Context, key string, increment float64) (float64, error) + Decr(ctx context.Context, key string) (int64, error) + DecrBy(ctx context.Context, key string, decrement int64) (int64, error) + MSet(ctx context.Context, keyValueMap map[string]interface{}) error + MSetNX(ctx context.Context, keyValueMap map[string]interface{}) (bool, error) + MGet(ctx context.Context, keys ...string) (map[string]*gvar.Var, error) +} + +// TTLOption provides extra option for TTL related functions. +type TTLOption struct { + EX *int64 // EX seconds -- Set the specified expire time, in seconds. + PX *int64 // PX milliseconds -- Set the specified expire time, in milliseconds. + EXAT *int64 // EXAT timestamp-seconds -- Set the specified Unix time at which the key will expire, in seconds. + PXAT *int64 // PXAT timestamp-milliseconds -- Set the specified Unix time at which the key will expire, in milliseconds. + KeepTTL bool // Retain the time to live associated with the key. +} + +// SetOption provides extra option for Set function. +type SetOption struct { + TTLOption + NX bool // Only set the key if it does not already exist. + XX bool // Only set the key if it already exists. + + // Return the old string stored at key, or nil if key did not exist. + // An error is returned and SET aborted if the value stored at key is not a string. + Get bool +} + +// GetEXOption provides extra option for GetEx function. +type GetEXOption struct { + TTLOption + Persist bool // Persist -- Remove the time to live associated with the key. +} diff --git a/vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug.go b/vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug.go new file mode 100644 index 00000000..7ff8e3db --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug.go @@ -0,0 +1,8 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package gdebug contains facilities for programs to debug themselves while they are running. +package gdebug diff --git a/vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug_caller.go b/vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug_caller.go new file mode 100644 index 00000000..d02ff4ab --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug_caller.go @@ -0,0 +1,196 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gdebug + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "reflect" + "runtime" + "strings" +) + +const ( + maxCallerDepth = 1000 + stackFilterKey = "/debug/gdebug/gdebug" +) + +var ( + goRootForFilter = runtime.GOROOT() // goRootForFilter is used for stack filtering purpose. + binaryVersion = "" // The version of current running binary(uint64 hex). + binaryVersionMd5 = "" // The version of current running binary(MD5). + selfPath = "" // Current running binary absolute path. +) + +func init() { + if goRootForFilter != "" { + goRootForFilter = strings.ReplaceAll(goRootForFilter, "\\", "/") + } + // Initialize internal package variable: selfPath. + selfPath, _ = exec.LookPath(os.Args[0]) + if selfPath != "" { + selfPath, _ = filepath.Abs(selfPath) + } + if selfPath == "" { + selfPath, _ = filepath.Abs(os.Args[0]) + } +} + +// Caller returns the function name and the absolute file path along with its line +// number of the caller. +func Caller(skip ...int) (function string, path string, line int) { + return CallerWithFilter(nil, skip...) +} + +// CallerWithFilter returns the function name and the absolute file path along with +// its line number of the caller. +// +// The parameter `filters` is used to filter the path of the caller. +func CallerWithFilter(filters []string, skip ...int) (function string, path string, line int) { + var ( + number = 0 + ok = true + ) + if len(skip) > 0 { + number = skip[0] + } + pc, file, line, start := callerFromIndex(filters) + if start != -1 { + for i := start + number; i < maxCallerDepth; i++ { + if i != start { + pc, file, line, ok = runtime.Caller(i) + } + if ok { + if filterFileByFilters(file, filters) { + continue + } + function = "" + if fn := runtime.FuncForPC(pc); fn == nil { + function = "unknown" + } else { + function = fn.Name() + } + return function, file, line + } else { + break + } + } + } + return "", "", -1 +} + +// callerFromIndex returns the caller position and according information exclusive of the +// debug package. +// +// VERY NOTE THAT, the returned index value should be `index - 1` as the caller's start point. +func callerFromIndex(filters []string) (pc uintptr, file string, line int, index int) { + var ok bool + for index = 0; index < maxCallerDepth; index++ { + if pc, file, line, ok = runtime.Caller(index); ok { + if filterFileByFilters(file, filters) { + continue + } + if index > 0 { + index-- + } + return + } + } + return 0, "", -1, -1 +} + +func filterFileByFilters(file string, filters []string) (filtered bool) { + // Filter empty file. + if file == "" { + return true + } + // Filter gdebug package callings. + if strings.Contains(file, stackFilterKey) { + return true + } + for _, filter := range filters { + if filter != "" && strings.Contains(file, filter) { + return true + } + } + // GOROOT filter. + if goRootForFilter != "" && len(file) >= len(goRootForFilter) && file[0:len(goRootForFilter)] == goRootForFilter { + // https://github.com/gogf/gf/issues/2047 + fileSeparator := file[len(goRootForFilter)] + if fileSeparator == filepath.Separator || fileSeparator == '\\' || fileSeparator == '/' { + return true + } + } + return false +} + +// CallerPackage returns the package name of the caller. +func CallerPackage() string { + function, _, _ := Caller() + indexSplit := strings.LastIndexByte(function, '/') + if indexSplit == -1 { + return function[:strings.IndexByte(function, '.')] + } else { + leftPart := function[:indexSplit+1] + rightPart := function[indexSplit+1:] + indexDot := strings.IndexByte(function, '.') + rightPart = rightPart[:indexDot-1] + return leftPart + rightPart + } +} + +// CallerFunction returns the function name of the caller. +func CallerFunction() string { + function, _, _ := Caller() + function = function[strings.LastIndexByte(function, '/')+1:] + function = function[strings.IndexByte(function, '.')+1:] + return function +} + +// CallerFilePath returns the file path of the caller. +func CallerFilePath() string { + _, path, _ := Caller() + return path +} + +// CallerDirectory returns the directory of the caller. +func CallerDirectory() string { + _, path, _ := Caller() + return filepath.Dir(path) +} + +// CallerFileLine returns the file path along with the line number of the caller. +func CallerFileLine() string { + _, path, line := Caller() + return fmt.Sprintf(`%s:%d`, path, line) +} + +// CallerFileLineShort returns the file name along with the line number of the caller. +func CallerFileLineShort() string { + _, path, line := Caller() + return fmt.Sprintf(`%s:%d`, filepath.Base(path), line) +} + +// FuncPath returns the complete function path of given `f`. +func FuncPath(f interface{}) string { + return runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name() +} + +// FuncName returns the function name of given `f`. +func FuncName(f interface{}) string { + path := FuncPath(f) + if path == "" { + return "" + } + index := strings.LastIndexByte(path, '/') + if index < 0 { + index = strings.LastIndexByte(path, '\\') + } + return path[index+1:] +} diff --git a/vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug_grid.go b/vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug_grid.go new file mode 100644 index 00000000..f43023d8 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug_grid.go @@ -0,0 +1,29 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gdebug + +import ( + "regexp" + "runtime" + "strconv" +) + +var ( + // gridRegex is the regular expression object for parsing goroutine id from stack information. + gridRegex = regexp.MustCompile(`^\w+\s+(\d+)\s+`) +) + +// GoroutineId retrieves and returns the current goroutine id from stack information. +// Be very aware that, it is with low performance as it uses runtime.Stack function. +// It is commonly used for debugging purpose. +func GoroutineId() int { + buf := make([]byte, 26) + runtime.Stack(buf, false) + match := gridRegex.FindSubmatch(buf) + id, _ := strconv.Atoi(string(match[1])) + return id +} diff --git a/vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug_stack.go b/vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug_stack.go new file mode 100644 index 00000000..20db7d80 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug_stack.go @@ -0,0 +1,77 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gdebug + +import ( + "bytes" + "fmt" + "runtime" +) + +// PrintStack prints to standard error the stack trace returned by runtime.Stack. +func PrintStack(skip ...int) { + fmt.Print(Stack(skip...)) +} + +// Stack returns a formatted stack trace of the goroutine that calls it. +// It calls runtime.Stack with a large enough buffer to capture the entire trace. +func Stack(skip ...int) string { + return StackWithFilter(nil, skip...) +} + +// StackWithFilter returns a formatted stack trace of the goroutine that calls it. +// It calls runtime.Stack with a large enough buffer to capture the entire trace. +// +// The parameter `filter` is used to filter the path of the caller. +func StackWithFilter(filters []string, skip ...int) string { + return StackWithFilters(filters, skip...) +} + +// StackWithFilters returns a formatted stack trace of the goroutine that calls it. +// It calls runtime.Stack with a large enough buffer to capture the entire trace. +// +// The parameter `filters` is a slice of strings, which are used to filter the path of the +// caller. +// +// TODO Improve the performance using debug.Stack. +func StackWithFilters(filters []string, skip ...int) string { + number := 0 + if len(skip) > 0 { + number = skip[0] + } + var ( + name string + space = " " + index = 1 + buffer = bytes.NewBuffer(nil) + ok = true + pc, file, line, start = callerFromIndex(filters) + ) + for i := start + number; i < maxCallerDepth; i++ { + if i != start { + pc, file, line, ok = runtime.Caller(i) + } + if ok { + if filterFileByFilters(file, filters) { + continue + } + if fn := runtime.FuncForPC(pc); fn == nil { + name = "unknown" + } else { + name = fn.Name() + } + if index > 9 { + space = " " + } + buffer.WriteString(fmt.Sprintf("%d.%s%s\n %s:%d\n", index, space, name, file, line)) + index++ + } else { + break + } + } + return buffer.String() +} diff --git a/vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug_version.go b/vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug_version.go new file mode 100644 index 00000000..5c883f68 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/debug/gdebug/gdebug_version.go @@ -0,0 +1,58 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gdebug + +import ( + "crypto/md5" + "fmt" + "io" + "io/ioutil" + "os" + "strconv" + + "github.com/gogf/gf/v2/encoding/ghash" + "github.com/gogf/gf/v2/errors/gerror" +) + +// BinVersion returns the version of current running binary. +// It uses ghash.BKDRHash+BASE36 algorithm to calculate the unique version of the binary. +func BinVersion() string { + if binaryVersion == "" { + binaryContent, _ := ioutil.ReadFile(selfPath) + binaryVersion = strconv.FormatInt( + int64(ghash.BKDR(binaryContent)), + 36, + ) + } + return binaryVersion +} + +// BinVersionMd5 returns the version of current running binary. +// It uses MD5 algorithm to calculate the unique version of the binary. +func BinVersionMd5() string { + if binaryVersionMd5 == "" { + binaryVersionMd5, _ = md5File(selfPath) + } + return binaryVersionMd5 +} + +// md5File encrypts file content of `path` using MD5 algorithms. +func md5File(path string) (encrypt string, err error) { + f, err := os.Open(path) + if err != nil { + err = gerror.Wrapf(err, `os.Open failed for name "%s"`, path) + return "", err + } + defer f.Close() + h := md5.New() + _, err = io.Copy(h, f) + if err != nil { + err = gerror.Wrap(err, `io.Copy failed`) + return "", err + } + return fmt.Sprintf("%x", h.Sum(nil)), nil +} diff --git a/vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary.go b/vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary.go new file mode 100644 index 00000000..5fbc4847 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary.go @@ -0,0 +1,134 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package gbinary provides useful API for handling binary/bytes data. +// +// Note that package gbinary encodes the data using LittleEndian in default. +package gbinary + +func Encode(values ...interface{}) []byte { + return LeEncode(values...) +} + +func EncodeByLength(length int, values ...interface{}) []byte { + return LeEncodeByLength(length, values...) +} + +func Decode(b []byte, values ...interface{}) error { + return LeDecode(b, values...) +} + +func EncodeString(s string) []byte { + return LeEncodeString(s) +} + +func DecodeToString(b []byte) string { + return LeDecodeToString(b) +} + +func EncodeBool(b bool) []byte { + return LeEncodeBool(b) +} + +func EncodeInt(i int) []byte { + return LeEncodeInt(i) +} + +func EncodeUint(i uint) []byte { + return LeEncodeUint(i) +} + +func EncodeInt8(i int8) []byte { + return LeEncodeInt8(i) +} + +func EncodeUint8(i uint8) []byte { + return LeEncodeUint8(i) +} + +func EncodeInt16(i int16) []byte { + return LeEncodeInt16(i) +} + +func EncodeUint16(i uint16) []byte { + return LeEncodeUint16(i) +} + +func EncodeInt32(i int32) []byte { + return LeEncodeInt32(i) +} + +func EncodeUint32(i uint32) []byte { + return LeEncodeUint32(i) +} + +func EncodeInt64(i int64) []byte { + return LeEncodeInt64(i) +} + +func EncodeUint64(i uint64) []byte { + return LeEncodeUint64(i) +} + +func EncodeFloat32(f float32) []byte { + return LeEncodeFloat32(f) +} + +func EncodeFloat64(f float64) []byte { + return LeEncodeFloat64(f) +} + +func DecodeToInt(b []byte) int { + return LeDecodeToInt(b) +} + +func DecodeToUint(b []byte) uint { + return LeDecodeToUint(b) +} + +func DecodeToBool(b []byte) bool { + return LeDecodeToBool(b) +} + +func DecodeToInt8(b []byte) int8 { + return LeDecodeToInt8(b) +} + +func DecodeToUint8(b []byte) uint8 { + return LeDecodeToUint8(b) +} + +func DecodeToInt16(b []byte) int16 { + return LeDecodeToInt16(b) +} + +func DecodeToUint16(b []byte) uint16 { + return LeDecodeToUint16(b) +} + +func DecodeToInt32(b []byte) int32 { + return LeDecodeToInt32(b) +} + +func DecodeToUint32(b []byte) uint32 { + return LeDecodeToUint32(b) +} + +func DecodeToInt64(b []byte) int64 { + return LeDecodeToInt64(b) +} + +func DecodeToUint64(b []byte) uint64 { + return LeDecodeToUint64(b) +} + +func DecodeToFloat32(b []byte) float32 { + return LeDecodeToFloat32(b) +} + +func DecodeToFloat64(b []byte) float64 { + return LeDecodeToFloat64(b) +} diff --git a/vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary_be.go b/vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary_be.go new file mode 100644 index 00000000..6f608548 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary_be.go @@ -0,0 +1,287 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gbinary + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + "math" + + "github.com/gogf/gf/v2/errors/gerror" + "github.com/gogf/gf/v2/internal/intlog" +) + +// BeEncode encodes one or multiple `values` into bytes using BigEndian. +// It uses type asserting checking the type of each value of `values` and internally +// calls corresponding converting function do the bytes converting. +// +// It supports common variable type asserting, and finally it uses fmt.Sprintf converting +// value to string and then to bytes. +func BeEncode(values ...interface{}) []byte { + buf := new(bytes.Buffer) + for i := 0; i < len(values); i++ { + if values[i] == nil { + return buf.Bytes() + } + + switch value := values[i].(type) { + case int: + buf.Write(BeEncodeInt(value)) + case int8: + buf.Write(BeEncodeInt8(value)) + case int16: + buf.Write(BeEncodeInt16(value)) + case int32: + buf.Write(BeEncodeInt32(value)) + case int64: + buf.Write(BeEncodeInt64(value)) + case uint: + buf.Write(BeEncodeUint(value)) + case uint8: + buf.Write(BeEncodeUint8(value)) + case uint16: + buf.Write(BeEncodeUint16(value)) + case uint32: + buf.Write(BeEncodeUint32(value)) + case uint64: + buf.Write(BeEncodeUint64(value)) + case bool: + buf.Write(BeEncodeBool(value)) + case string: + buf.Write(BeEncodeString(value)) + case []byte: + buf.Write(value) + case float32: + buf.Write(BeEncodeFloat32(value)) + case float64: + buf.Write(BeEncodeFloat64(value)) + default: + if err := binary.Write(buf, binary.BigEndian, value); err != nil { + intlog.Errorf(context.TODO(), `%+v`, err) + buf.Write(BeEncodeString(fmt.Sprintf("%v", value))) + } + } + } + return buf.Bytes() +} + +func BeEncodeByLength(length int, values ...interface{}) []byte { + b := BeEncode(values...) + if len(b) < length { + b = append(b, make([]byte, length-len(b))...) + } else if len(b) > length { + b = b[0:length] + } + return b +} + +func BeDecode(b []byte, values ...interface{}) error { + var ( + err error + buf = bytes.NewBuffer(b) + ) + for i := 0; i < len(values); i++ { + if err = binary.Read(buf, binary.BigEndian, values[i]); err != nil { + err = gerror.Wrap(err, `binary.Read failed`) + return err + } + } + return nil +} + +func BeEncodeString(s string) []byte { + return []byte(s) +} + +func BeDecodeToString(b []byte) string { + return string(b) +} + +func BeEncodeBool(b bool) []byte { + if b { + return []byte{1} + } else { + return []byte{0} + } +} + +func BeEncodeInt(i int) []byte { + if i <= math.MaxInt8 { + return BeEncodeInt8(int8(i)) + } else if i <= math.MaxInt16 { + return BeEncodeInt16(int16(i)) + } else if i <= math.MaxInt32 { + return BeEncodeInt32(int32(i)) + } else { + return BeEncodeInt64(int64(i)) + } +} + +func BeEncodeUint(i uint) []byte { + if i <= math.MaxUint8 { + return BeEncodeUint8(uint8(i)) + } else if i <= math.MaxUint16 { + return BeEncodeUint16(uint16(i)) + } else if i <= math.MaxUint32 { + return BeEncodeUint32(uint32(i)) + } else { + return BeEncodeUint64(uint64(i)) + } +} + +func BeEncodeInt8(i int8) []byte { + return []byte{byte(i)} +} + +func BeEncodeUint8(i uint8) []byte { + return []byte{i} +} + +func BeEncodeInt16(i int16) []byte { + b := make([]byte, 2) + binary.BigEndian.PutUint16(b, uint16(i)) + return b +} + +func BeEncodeUint16(i uint16) []byte { + b := make([]byte, 2) + binary.BigEndian.PutUint16(b, i) + return b +} + +func BeEncodeInt32(i int32) []byte { + b := make([]byte, 4) + binary.BigEndian.PutUint32(b, uint32(i)) + return b +} + +func BeEncodeUint32(i uint32) []byte { + b := make([]byte, 4) + binary.BigEndian.PutUint32(b, i) + return b +} + +func BeEncodeInt64(i int64) []byte { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, uint64(i)) + return b +} + +func BeEncodeUint64(i uint64) []byte { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, i) + return b +} + +func BeEncodeFloat32(f float32) []byte { + bits := math.Float32bits(f) + b := make([]byte, 4) + binary.BigEndian.PutUint32(b, bits) + return b +} + +func BeEncodeFloat64(f float64) []byte { + bits := math.Float64bits(f) + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, bits) + return b +} + +func BeDecodeToInt(b []byte) int { + if len(b) < 2 { + return int(BeDecodeToUint8(b)) + } else if len(b) < 3 { + return int(BeDecodeToUint16(b)) + } else if len(b) < 5 { + return int(BeDecodeToUint32(b)) + } else { + return int(BeDecodeToUint64(b)) + } +} + +func BeDecodeToUint(b []byte) uint { + if len(b) < 2 { + return uint(BeDecodeToUint8(b)) + } else if len(b) < 3 { + return uint(BeDecodeToUint16(b)) + } else if len(b) < 5 { + return uint(BeDecodeToUint32(b)) + } else { + return uint(BeDecodeToUint64(b)) + } +} + +func BeDecodeToBool(b []byte) bool { + if len(b) == 0 { + return false + } + if bytes.Equal(b, make([]byte, len(b))) { + return false + } + return true +} + +func BeDecodeToInt8(b []byte) int8 { + if len(b) == 0 { + panic(`empty slice given`) + } + return int8(b[0]) +} + +func BeDecodeToUint8(b []byte) uint8 { + if len(b) == 0 { + panic(`empty slice given`) + } + return b[0] +} + +func BeDecodeToInt16(b []byte) int16 { + return int16(binary.BigEndian.Uint16(BeFillUpSize(b, 2))) +} + +func BeDecodeToUint16(b []byte) uint16 { + return binary.BigEndian.Uint16(BeFillUpSize(b, 2)) +} + +func BeDecodeToInt32(b []byte) int32 { + return int32(binary.BigEndian.Uint32(BeFillUpSize(b, 4))) +} + +func BeDecodeToUint32(b []byte) uint32 { + return binary.BigEndian.Uint32(BeFillUpSize(b, 4)) +} + +func BeDecodeToInt64(b []byte) int64 { + return int64(binary.BigEndian.Uint64(BeFillUpSize(b, 8))) +} + +func BeDecodeToUint64(b []byte) uint64 { + return binary.BigEndian.Uint64(BeFillUpSize(b, 8)) +} + +func BeDecodeToFloat32(b []byte) float32 { + return math.Float32frombits(binary.BigEndian.Uint32(BeFillUpSize(b, 4))) +} + +func BeDecodeToFloat64(b []byte) float64 { + return math.Float64frombits(binary.BigEndian.Uint64(BeFillUpSize(b, 8))) +} + +// BeFillUpSize fills up the bytes `b` to given length `l` using big BigEndian. +// +// Note that it creates a new bytes slice by copying the original one to avoid changing +// the original parameter bytes. +func BeFillUpSize(b []byte, l int) []byte { + if len(b) >= l { + return b[:l] + } + c := make([]byte, l) + copy(c[l-len(b):], b) + return c +} diff --git a/vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary_bit.go b/vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary_bit.go new file mode 100644 index 00000000..3e93dcab --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary_bit.go @@ -0,0 +1,74 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gbinary + +// NOTE: THIS IS AN EXPERIMENTAL FEATURE! + +// Bit Binary bit (0 | 1) +type Bit int8 + +// EncodeBits does encode bits return bits Default coding +func EncodeBits(bits []Bit, i int, l int) []Bit { + return EncodeBitsWithUint(bits, uint(i), l) +} + +// EncodeBitsWithUint . Merge ui bitwise into the bits array and occupy the length bits +// (Note: binary 0 | 1 digits are stored in the uis array) +func EncodeBitsWithUint(bits []Bit, ui uint, l int) []Bit { + a := make([]Bit, l) + for i := l - 1; i >= 0; i-- { + a[i] = Bit(ui & 1) + ui >>= 1 + } + if bits != nil { + return append(bits, a...) + } + return a +} + +// EncodeBitsToBytes . does encode bits to bytes +// Convert bits to [] byte, encode from left to right, and add less than 1 byte from 0 to the end. +func EncodeBitsToBytes(bits []Bit) []byte { + if len(bits)%8 != 0 { + for i := 0; i < len(bits)%8; i++ { + bits = append(bits, 0) + } + } + b := make([]byte, 0) + for i := 0; i < len(bits); i += 8 { + b = append(b, byte(DecodeBitsToUint(bits[i:i+8]))) + } + return b +} + +// DecodeBits .does decode bits to int +// Resolve to int +func DecodeBits(bits []Bit) int { + v := 0 + for _, i := range bits { + v = v<<1 | int(i) + } + return v +} + +// DecodeBitsToUint .Resolve to uint +func DecodeBitsToUint(bits []Bit) uint { + v := uint(0) + for _, i := range bits { + v = v<<1 | uint(i) + } + return v +} + +// DecodeBytesToBits .Parsing [] byte into character array [] uint8 +func DecodeBytesToBits(bs []byte) []Bit { + bits := make([]Bit, 0) + for _, b := range bs { + bits = EncodeBitsWithUint(bits, uint(b), 8) + } + return bits +} diff --git a/vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary_func.go b/vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary_func.go new file mode 100644 index 00000000..6e1fba24 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary_func.go @@ -0,0 +1,7 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gbinary diff --git a/vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary_le.go b/vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary_le.go new file mode 100644 index 00000000..b648c09d --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/encoding/gbinary/gbinary_le.go @@ -0,0 +1,287 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gbinary + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + "math" + + "github.com/gogf/gf/v2/errors/gerror" + "github.com/gogf/gf/v2/internal/intlog" +) + +// LeEncode encodes one or multiple `values` into bytes using LittleEndian. +// It uses type asserting checking the type of each value of `values` and internally +// calls corresponding converting function do the bytes converting. +// +// It supports common variable type asserting, and finally it uses fmt.Sprintf converting +// value to string and then to bytes. +func LeEncode(values ...interface{}) []byte { + buf := new(bytes.Buffer) + for i := 0; i < len(values); i++ { + if values[i] == nil { + return buf.Bytes() + } + switch value := values[i].(type) { + case int: + buf.Write(LeEncodeInt(value)) + case int8: + buf.Write(LeEncodeInt8(value)) + case int16: + buf.Write(LeEncodeInt16(value)) + case int32: + buf.Write(LeEncodeInt32(value)) + case int64: + buf.Write(LeEncodeInt64(value)) + case uint: + buf.Write(LeEncodeUint(value)) + case uint8: + buf.Write(LeEncodeUint8(value)) + case uint16: + buf.Write(LeEncodeUint16(value)) + case uint32: + buf.Write(LeEncodeUint32(value)) + case uint64: + buf.Write(LeEncodeUint64(value)) + case bool: + buf.Write(LeEncodeBool(value)) + case string: + buf.Write(LeEncodeString(value)) + case []byte: + buf.Write(value) + case float32: + buf.Write(LeEncodeFloat32(value)) + case float64: + buf.Write(LeEncodeFloat64(value)) + + default: + if err := binary.Write(buf, binary.LittleEndian, value); err != nil { + intlog.Errorf(context.TODO(), `%+v`, err) + buf.Write(LeEncodeString(fmt.Sprintf("%v", value))) + } + } + } + return buf.Bytes() +} + +func LeEncodeByLength(length int, values ...interface{}) []byte { + b := LeEncode(values...) + if len(b) < length { + b = append(b, make([]byte, length-len(b))...) + } else if len(b) > length { + b = b[0:length] + } + return b +} + +func LeDecode(b []byte, values ...interface{}) error { + var ( + err error + buf = bytes.NewBuffer(b) + ) + for i := 0; i < len(values); i++ { + if err = binary.Read(buf, binary.LittleEndian, values[i]); err != nil { + err = gerror.Wrap(err, `binary.Read failed`) + return err + } + } + return nil +} + +func LeEncodeString(s string) []byte { + return []byte(s) +} + +func LeDecodeToString(b []byte) string { + return string(b) +} + +func LeEncodeBool(b bool) []byte { + if b { + return []byte{1} + } else { + return []byte{0} + } +} + +func LeEncodeInt(i int) []byte { + if i <= math.MaxInt8 { + return EncodeInt8(int8(i)) + } else if i <= math.MaxInt16 { + return EncodeInt16(int16(i)) + } else if i <= math.MaxInt32 { + return EncodeInt32(int32(i)) + } else { + return EncodeInt64(int64(i)) + } +} + +func LeEncodeUint(i uint) []byte { + if i <= math.MaxUint8 { + return EncodeUint8(uint8(i)) + } else if i <= math.MaxUint16 { + return EncodeUint16(uint16(i)) + } else if i <= math.MaxUint32 { + return EncodeUint32(uint32(i)) + } else { + return EncodeUint64(uint64(i)) + } +} + +func LeEncodeInt8(i int8) []byte { + return []byte{byte(i)} +} + +func LeEncodeUint8(i uint8) []byte { + return []byte{i} +} + +func LeEncodeInt16(i int16) []byte { + b := make([]byte, 2) + binary.LittleEndian.PutUint16(b, uint16(i)) + return b +} + +func LeEncodeUint16(i uint16) []byte { + b := make([]byte, 2) + binary.LittleEndian.PutUint16(b, i) + return b +} + +func LeEncodeInt32(i int32) []byte { + b := make([]byte, 4) + binary.LittleEndian.PutUint32(b, uint32(i)) + return b +} + +func LeEncodeUint32(i uint32) []byte { + b := make([]byte, 4) + binary.LittleEndian.PutUint32(b, i) + return b +} + +func LeEncodeInt64(i int64) []byte { + b := make([]byte, 8) + binary.LittleEndian.PutUint64(b, uint64(i)) + return b +} + +func LeEncodeUint64(i uint64) []byte { + b := make([]byte, 8) + binary.LittleEndian.PutUint64(b, i) + return b +} + +func LeEncodeFloat32(f float32) []byte { + bits := math.Float32bits(f) + b := make([]byte, 4) + binary.LittleEndian.PutUint32(b, bits) + return b +} + +func LeEncodeFloat64(f float64) []byte { + bits := math.Float64bits(f) + b := make([]byte, 8) + binary.LittleEndian.PutUint64(b, bits) + return b +} + +func LeDecodeToInt(b []byte) int { + if len(b) < 2 { + return int(LeDecodeToUint8(b)) + } else if len(b) < 3 { + return int(LeDecodeToUint16(b)) + } else if len(b) < 5 { + return int(LeDecodeToUint32(b)) + } else { + return int(LeDecodeToUint64(b)) + } +} + +func LeDecodeToUint(b []byte) uint { + if len(b) < 2 { + return uint(LeDecodeToUint8(b)) + } else if len(b) < 3 { + return uint(LeDecodeToUint16(b)) + } else if len(b) < 5 { + return uint(LeDecodeToUint32(b)) + } else { + return uint(LeDecodeToUint64(b)) + } +} + +func LeDecodeToBool(b []byte) bool { + if len(b) == 0 { + return false + } + if bytes.Equal(b, make([]byte, len(b))) { + return false + } + return true +} + +func LeDecodeToInt8(b []byte) int8 { + if len(b) == 0 { + panic(`empty slice given`) + } + return int8(b[0]) +} + +func LeDecodeToUint8(b []byte) uint8 { + if len(b) == 0 { + panic(`empty slice given`) + } + return b[0] +} + +func LeDecodeToInt16(b []byte) int16 { + return int16(binary.LittleEndian.Uint16(LeFillUpSize(b, 2))) +} + +func LeDecodeToUint16(b []byte) uint16 { + return binary.LittleEndian.Uint16(LeFillUpSize(b, 2)) +} + +func LeDecodeToInt32(b []byte) int32 { + return int32(binary.LittleEndian.Uint32(LeFillUpSize(b, 4))) +} + +func LeDecodeToUint32(b []byte) uint32 { + return binary.LittleEndian.Uint32(LeFillUpSize(b, 4)) +} + +func LeDecodeToInt64(b []byte) int64 { + return int64(binary.LittleEndian.Uint64(LeFillUpSize(b, 8))) +} + +func LeDecodeToUint64(b []byte) uint64 { + return binary.LittleEndian.Uint64(LeFillUpSize(b, 8)) +} + +func LeDecodeToFloat32(b []byte) float32 { + return math.Float32frombits(binary.LittleEndian.Uint32(LeFillUpSize(b, 4))) +} + +func LeDecodeToFloat64(b []byte) float64 { + return math.Float64frombits(binary.LittleEndian.Uint64(LeFillUpSize(b, 8))) +} + +// LeFillUpSize fills up the bytes `b` to given length `l` using LittleEndian. +// +// Note that it creates a new bytes slice by copying the original one to avoid changing +// the original parameter bytes. +func LeFillUpSize(b []byte, l int) []byte { + if len(b) >= l { + return b[:l] + } + c := make([]byte, l) + copy(c, b) + return c +} diff --git a/vendor/github.com/gogf/gf/v2/encoding/gcompress/gcompress.go b/vendor/github.com/gogf/gf/v2/encoding/gcompress/gcompress.go new file mode 100644 index 00000000..1b4ca942 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/encoding/gcompress/gcompress.go @@ -0,0 +1,8 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package gcompress provides kinds of compression algorithms for binary/bytes data. +package gcompress diff --git a/vendor/github.com/gogf/gf/v2/encoding/gcompress/gcompress_gzip.go b/vendor/github.com/gogf/gf/v2/encoding/gcompress/gcompress_gzip.go new file mode 100644 index 00000000..40464f92 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/encoding/gcompress/gcompress_gzip.go @@ -0,0 +1,135 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gcompress + +import ( + "bytes" + "compress/gzip" + "io" + + "github.com/gogf/gf/v2/errors/gerror" + "github.com/gogf/gf/v2/os/gfile" +) + +// Gzip compresses `data` using gzip algorithm. +// The optional parameter `level` specifies the compression level from +// 1 to 9 which means from none to the best compression. +// +// Note that it returns error if given `level` is invalid. +func Gzip(data []byte, level ...int) ([]byte, error) { + var ( + writer *gzip.Writer + buf bytes.Buffer + err error + ) + if len(level) > 0 { + writer, err = gzip.NewWriterLevel(&buf, level[0]) + if err != nil { + err = gerror.Wrapf(err, `gzip.NewWriterLevel failed for level "%d"`, level[0]) + return nil, err + } + } else { + writer = gzip.NewWriter(&buf) + } + if _, err = writer.Write(data); err != nil { + err = gerror.Wrap(err, `writer.Write failed`) + return nil, err + } + if err = writer.Close(); err != nil { + err = gerror.Wrap(err, `writer.Close failed`) + return nil, err + } + return buf.Bytes(), nil +} + +// GzipFile compresses the file `src` to `dst` using gzip algorithm. +func GzipFile(srcFilePath, dstFilePath string, level ...int) (err error) { + dstFile, err := gfile.Create(dstFilePath) + if err != nil { + return err + } + defer dstFile.Close() + + return GzipPathWriter(srcFilePath, dstFile, level...) +} + +// GzipPathWriter compresses `filePath` to `writer` using gzip compressing algorithm. +// +// Note that the parameter `path` can be either a directory or a file. +func GzipPathWriter(filePath string, writer io.Writer, level ...int) error { + var ( + gzipWriter *gzip.Writer + err error + ) + srcFile, err := gfile.Open(filePath) + if err != nil { + return err + } + defer srcFile.Close() + + if len(level) > 0 { + gzipWriter, err = gzip.NewWriterLevel(writer, level[0]) + if err != nil { + return gerror.Wrap(err, `gzip.NewWriterLevel failed`) + } + } else { + gzipWriter = gzip.NewWriter(writer) + } + defer gzipWriter.Close() + + if _, err = io.Copy(gzipWriter, srcFile); err != nil { + err = gerror.Wrap(err, `io.Copy failed`) + return err + } + return nil +} + +// UnGzip decompresses `data` with gzip algorithm. +func UnGzip(data []byte) ([]byte, error) { + var buf bytes.Buffer + reader, err := gzip.NewReader(bytes.NewReader(data)) + if err != nil { + err = gerror.Wrap(err, `gzip.NewReader failed`) + return nil, err + } + if _, err = io.Copy(&buf, reader); err != nil { + err = gerror.Wrap(err, `io.Copy failed`) + return nil, err + } + if err = reader.Close(); err != nil { + err = gerror.Wrap(err, `reader.Close failed`) + return buf.Bytes(), err + } + return buf.Bytes(), nil +} + +// UnGzipFile decompresses srcFilePath `src` to `dst` using gzip algorithm. +func UnGzipFile(srcFilePath, dstFilePath string) error { + srcFile, err := gfile.Open(srcFilePath) + if err != nil { + return err + } + defer srcFile.Close() + dstFile, err := gfile.Create(dstFilePath) + if err != nil { + return err + } + defer dstFile.Close() + + reader, err := gzip.NewReader(srcFile) + if err != nil { + err = gerror.Wrap(err, `gzip.NewReader failed`) + return err + } + defer reader.Close() + + if _, err = io.Copy(dstFile, reader); err != nil { + err = gerror.Wrap(err, `io.Copy failed`) + return err + } + return nil +} diff --git a/vendor/github.com/gogf/gf/v2/encoding/gcompress/gcompress_zip.go b/vendor/github.com/gogf/gf/v2/encoding/gcompress/gcompress_zip.go new file mode 100644 index 00000000..05bf5c70 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/encoding/gcompress/gcompress_zip.go @@ -0,0 +1,280 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gcompress + +import ( + "archive/zip" + "bytes" + "context" + "io" + "os" + "path/filepath" + "strings" + + "github.com/gogf/gf/v2/errors/gerror" + "github.com/gogf/gf/v2/internal/intlog" + "github.com/gogf/gf/v2/os/gfile" + "github.com/gogf/gf/v2/text/gstr" +) + +// ZipPath compresses `fileOrFolderPaths` to `dstFilePath` using zip compressing algorithm. +// +// The parameter `paths` can be either a directory or a file, which +// supports multiple paths join with ','. +// The unnecessary parameter `prefix` indicates the path prefix for zip file. +func ZipPath(fileOrFolderPaths, dstFilePath string, prefix ...string) error { + writer, err := os.Create(dstFilePath) + if err != nil { + err = gerror.Wrapf(err, `os.Create failed for name "%s"`, dstFilePath) + return err + } + defer writer.Close() + zipWriter := zip.NewWriter(writer) + defer zipWriter.Close() + for _, path := range strings.Split(fileOrFolderPaths, ",") { + path = strings.TrimSpace(path) + if err = doZipPathWriter(path, gfile.RealPath(dstFilePath), zipWriter, prefix...); err != nil { + return err + } + } + return nil +} + +// ZipPathWriter compresses `fileOrFolderPaths` to `writer` using zip compressing algorithm. +// +// Note that the parameter `fileOrFolderPaths` can be either a directory or a file, which +// supports multiple paths join with ','. +// The unnecessary parameter `prefix` indicates the path prefix for zip file. +func ZipPathWriter(fileOrFolderPaths string, writer io.Writer, prefix ...string) error { + zipWriter := zip.NewWriter(writer) + defer zipWriter.Close() + for _, path := range strings.Split(fileOrFolderPaths, ",") { + path = strings.TrimSpace(path) + if err := doZipPathWriter(path, "", zipWriter, prefix...); err != nil { + return err + } + } + return nil +} + +// ZipPathContent compresses `fileOrFolderPaths` to []byte using zip compressing algorithm. +// +// Note that the parameter `fileOrFolderPaths` can be either a directory or a file, which +// supports multiple paths join with ','. +// The unnecessary parameter `prefix` indicates the path prefix for zip file. +func ZipPathContent(fileOrFolderPaths string, prefix ...string) ([]byte, error) { + var ( + err error + buffer = bytes.NewBuffer(nil) + ) + if err = ZipPathWriter(fileOrFolderPaths, buffer, prefix...); err != nil { + return nil, err + } + return buffer.Bytes(), nil +} + +// doZipPathWriter compresses given `fileOrFolderPaths` and writes the content to `zipWriter`. +// +// The parameter `fileOrFolderPath` can be either a single file or folder path. +// The parameter `exclude` specifies the exclusive file path that is not compressed to `zipWriter`, +// commonly the destination zip file path. +// The unnecessary parameter `prefix` indicates the path prefix for zip file. +func doZipPathWriter(fileOrFolderPath string, exclude string, zipWriter *zip.Writer, prefix ...string) error { + var ( + err error + files []string + ) + fileOrFolderPath, err = gfile.Search(fileOrFolderPath) + if err != nil { + return err + } + if gfile.IsDir(fileOrFolderPath) { + files, err = gfile.ScanDir(fileOrFolderPath, "*", true) + if err != nil { + return err + } + } else { + files = []string{fileOrFolderPath} + } + headerPrefix := "" + if len(prefix) > 0 && prefix[0] != "" { + headerPrefix = prefix[0] + } + headerPrefix = strings.TrimRight(headerPrefix, "\\/") + if gfile.IsDir(fileOrFolderPath) { + if len(headerPrefix) > 0 { + headerPrefix += "/" + } else { + headerPrefix = gfile.Basename(fileOrFolderPath) + } + } + headerPrefix = strings.ReplaceAll(headerPrefix, "//", "/") + for _, file := range files { + if exclude == file { + intlog.Printf(context.TODO(), `exclude file path: %s`, file) + continue + } + dir := gfile.Dir(file[len(fileOrFolderPath):]) + if dir == "." { + dir = "" + } + if err = zipFile(file, headerPrefix+dir, zipWriter); err != nil { + return err + } + } + return nil +} + +// UnZipFile decompresses `archive` to `dstFolderPath` using zip compressing algorithm. +// +// The parameter `dstFolderPath` should be a directory. +// The optional parameter `zippedPrefix` specifies the unzipped path of `zippedFilePath`, +// which can be used to specify part of the archive file to unzip. +func UnZipFile(zippedFilePath, dstFolderPath string, zippedPrefix ...string) error { + readerCloser, err := zip.OpenReader(zippedFilePath) + if err != nil { + err = gerror.Wrapf(err, `zip.OpenReader failed for name "%s"`, dstFolderPath) + return err + } + defer readerCloser.Close() + return unZipFileWithReader(&readerCloser.Reader, dstFolderPath, zippedPrefix...) +} + +// UnZipContent decompresses `zippedContent` to `dstFolderPath` using zip compressing algorithm. +// +// The parameter `dstFolderPath` should be a directory. +// The parameter `zippedPrefix` specifies the unzipped path of `zippedContent`, +// which can be used to specify part of the archive file to unzip. +func UnZipContent(zippedContent []byte, dstFolderPath string, zippedPrefix ...string) error { + reader, err := zip.NewReader(bytes.NewReader(zippedContent), int64(len(zippedContent))) + if err != nil { + err = gerror.Wrapf(err, `zip.NewReader failed`) + return err + } + return unZipFileWithReader(reader, dstFolderPath, zippedPrefix...) +} + +func unZipFileWithReader(reader *zip.Reader, dstFolderPath string, zippedPrefix ...string) error { + prefix := "" + if len(zippedPrefix) > 0 { + prefix = gstr.Replace(zippedPrefix[0], `\`, `/`) + } + if err := os.MkdirAll(dstFolderPath, 0755); err != nil { + return err + } + var ( + name string + dstPath string + dstDir string + ) + for _, file := range reader.File { + name = gstr.Replace(file.Name, `\`, `/`) + name = gstr.Trim(name, "/") + if prefix != "" { + if name[0:len(prefix)] != prefix { + continue + } + name = name[len(prefix):] + } + dstPath = filepath.Join(dstFolderPath, name) + if file.FileInfo().IsDir() { + _ = os.MkdirAll(dstPath, file.Mode()) + continue + } + dstDir = filepath.Dir(dstPath) + if len(dstDir) > 0 { + if _, err := os.Stat(dstDir); os.IsNotExist(err) { + if err = os.MkdirAll(dstDir, 0755); err != nil { + err = gerror.Wrapf(err, `os.MkdirAll failed for path "%s"`, dstDir) + return err + } + } + } + fileReader, err := file.Open() + if err != nil { + err = gerror.Wrapf(err, `file.Open failed`) + return err + } + // The fileReader is closed in function doCopyForUnZipFileWithReader. + if err = doCopyForUnZipFileWithReader(file, fileReader, dstPath); err != nil { + return err + } + } + return nil +} + +func doCopyForUnZipFileWithReader(file *zip.File, fileReader io.ReadCloser, dstPath string) error { + defer fileReader.Close() + targetFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, file.Mode()) + if err != nil { + err = gerror.Wrapf(err, `os.OpenFile failed for name "%s"`, dstPath) + return err + } + defer targetFile.Close() + + if _, err = io.Copy(targetFile, fileReader); err != nil { + err = gerror.Wrapf(err, `io.Copy failed from "%s" to "%s"`, file.Name, dstPath) + return err + } + return nil +} + +// zipFile compresses the file of given `filePath` and writes the content to `zw`. +// The parameter `prefix` indicates the path prefix for zip file. +func zipFile(filePath string, prefix string, zw *zip.Writer) error { + file, err := os.Open(filePath) + if err != nil { + err = gerror.Wrapf(err, `os.Open failed for name "%s"`, filePath) + return err + } + defer file.Close() + + info, err := file.Stat() + if err != nil { + err = gerror.Wrapf(err, `file.Stat failed for name "%s"`, filePath) + return err + } + + header, err := createFileHeader(info, prefix) + if err != nil { + return err + } + + if info.IsDir() { + header.Name += "/" + } else { + header.Method = zip.Deflate + } + + writer, err := zw.CreateHeader(header) + if err != nil { + err = gerror.Wrapf(err, `zip.Writer.CreateHeader failed for header "%#v"`, header) + return err + } + if !info.IsDir() { + if _, err = io.Copy(writer, file); err != nil { + err = gerror.Wrapf(err, `io.Copy failed from "%s" to "%s"`, filePath, header.Name) + return err + } + } + return nil +} + +func createFileHeader(info os.FileInfo, prefix string) (*zip.FileHeader, error) { + header, err := zip.FileInfoHeader(info) + if err != nil { + err = gerror.Wrapf(err, `zip.FileInfoHeader failed for info "%#v"`, info) + return nil, err + } + + if len(prefix) > 0 { + prefix = strings.ReplaceAll(prefix, `\`, `/`) + prefix = strings.TrimRight(prefix, `/`) + header.Name = prefix + `/` + header.Name + } + return header, nil +} diff --git a/vendor/github.com/gogf/gf/v2/encoding/gcompress/gcompress_zlib.go b/vendor/github.com/gogf/gf/v2/encoding/gcompress/gcompress_zlib.go new file mode 100644 index 00000000..c45b3d2b --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/encoding/gcompress/gcompress_zlib.go @@ -0,0 +1,59 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package gcompress provides kinds of compression algorithms for binary/bytes data. +package gcompress + +import ( + "bytes" + "compress/zlib" + "io" + + "github.com/gogf/gf/v2/errors/gerror" +) + +// Zlib compresses `data` with zlib algorithm. +func Zlib(data []byte) ([]byte, error) { + if data == nil || len(data) < 13 { + return data, nil + } + var ( + err error + in bytes.Buffer + writer = zlib.NewWriter(&in) + ) + + if _, err = writer.Write(data); err != nil { + err = gerror.Wrapf(err, `zlib.Writer.Write failed`) + return nil, err + } + if err = writer.Close(); err != nil { + err = gerror.Wrapf(err, `zlib.Writer.Close failed`) + return in.Bytes(), err + } + return in.Bytes(), nil +} + +// UnZlib decompresses `data` with zlib algorithm. +func UnZlib(data []byte) ([]byte, error) { + if data == nil || len(data) < 13 { + return data, nil + } + var ( + out bytes.Buffer + bytesReader = bytes.NewReader(data) + zlibReader, err = zlib.NewReader(bytesReader) + ) + if err != nil { + err = gerror.Wrapf(err, `zlib.NewReader failed`) + return nil, err + } + if _, err = io.Copy(&out, zlibReader); err != nil { + err = gerror.Wrapf(err, `io.Copy failed`) + return nil, err + } + return out.Bytes(), nil +} diff --git a/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash.go b/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash.go new file mode 100644 index 00000000..de7d1573 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash.go @@ -0,0 +1,8 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package ghash provides some classic hash functions(uint32/uint64) in go. +package ghash diff --git a/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_ap.go b/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_ap.go new file mode 100644 index 00000000..9ce369e8 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_ap.go @@ -0,0 +1,33 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package ghash + +// AP implements the classic AP hash algorithm for 32 bits. +func AP(str []byte) uint32 { + var hash uint32 + for i := 0; i < len(str); i++ { + if (i & 1) == 0 { + hash ^= (hash << 7) ^ uint32(str[i]) ^ (hash >> 3) + } else { + hash ^= ^((hash << 11) ^ uint32(str[i]) ^ (hash >> 5)) + 1 + } + } + return hash +} + +// AP64 implements the classic AP hash algorithm for 64 bits. +func AP64(str []byte) uint64 { + var hash uint64 + for i := 0; i < len(str); i++ { + if (i & 1) == 0 { + hash ^= (hash << 7) ^ uint64(str[i]) ^ (hash >> 3) + } else { + hash ^= ^((hash << 11) ^ uint64(str[i]) ^ (hash >> 5)) + 1 + } + } + return hash +} diff --git a/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_bkdr.go b/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_bkdr.go new file mode 100644 index 00000000..2f4cc969 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_bkdr.go @@ -0,0 +1,31 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package ghash + +// BKDR implements the classic BKDR hash algorithm for 32 bits. +func BKDR(str []byte) uint32 { + var ( + seed uint32 = 131 // 31 131 1313 13131 131313 etc.. + hash uint32 = 0 + ) + for i := 0; i < len(str); i++ { + hash = hash*seed + uint32(str[i]) + } + return hash +} + +// BKDR64 implements the classic BKDR hash algorithm for 64 bits. +func BKDR64(str []byte) uint64 { + var ( + seed uint64 = 131 // 31 131 1313 13131 131313 etc.. + hash uint64 = 0 + ) + for i := 0; i < len(str); i++ { + hash = hash*seed + uint64(str[i]) + } + return hash +} diff --git a/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_djb.go b/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_djb.go new file mode 100644 index 00000000..da5f0646 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_djb.go @@ -0,0 +1,25 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package ghash + +// DJB implements the classic DJB hash algorithm for 32 bits. +func DJB(str []byte) uint32 { + var hash uint32 = 5381 + for i := 0; i < len(str); i++ { + hash += (hash << 5) + uint32(str[i]) + } + return hash +} + +// DJB64 implements the classic DJB hash algorithm for 64 bits. +func DJB64(str []byte) uint64 { + var hash uint64 = 5381 + for i := 0; i < len(str); i++ { + hash += (hash << 5) + uint64(str[i]) + } + return hash +} diff --git a/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_elf.go b/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_elf.go new file mode 100644 index 00000000..3562fc45 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_elf.go @@ -0,0 +1,39 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package ghash + +// ELF implements the classic ELF hash algorithm for 32 bits. +func ELF(str []byte) uint32 { + var ( + hash uint32 + x uint32 + ) + for i := 0; i < len(str); i++ { + hash = (hash << 4) + uint32(str[i]) + if x = hash & 0xF0000000; x != 0 { + hash ^= x >> 24 + hash &= ^x + 1 + } + } + return hash +} + +// ELF64 implements the classic ELF hash algorithm for 64 bits. +func ELF64(str []byte) uint64 { + var ( + hash uint64 + x uint64 + ) + for i := 0; i < len(str); i++ { + hash = (hash << 4) + uint64(str[i]) + if x = hash & 0xF000000000000000; x != 0 { + hash ^= x >> 24 + hash &= ^x + 1 + } + } + return hash +} diff --git a/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_jshash.go b/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_jshash.go new file mode 100644 index 00000000..91220c71 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_jshash.go @@ -0,0 +1,25 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package ghash + +// JS implements the classic JS hash algorithm for 32 bits. +func JS(str []byte) uint32 { + var hash uint32 = 1315423911 + for i := 0; i < len(str); i++ { + hash ^= (hash << 5) + uint32(str[i]) + (hash >> 2) + } + return hash +} + +// JS64 implements the classic JS hash algorithm for 64 bits. +func JS64(str []byte) uint64 { + var hash uint64 = 1315423911 + for i := 0; i < len(str); i++ { + hash ^= (hash << 5) + uint64(str[i]) + (hash >> 2) + } + return hash +} diff --git a/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_pjw.go b/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_pjw.go new file mode 100644 index 00000000..5b82ca1e --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_pjw.go @@ -0,0 +1,45 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package ghash + +// PJW implements the classic PJW hash algorithm for 32 bits. +func PJW(str []byte) uint32 { + var ( + BitsInUnsignedInt uint32 = 32 // 4 * 8 + ThreeQuarters = (BitsInUnsignedInt * 3) / 4 + OneEighth = BitsInUnsignedInt / 8 + HighBits uint32 = (0xFFFFFFFF) << (BitsInUnsignedInt - OneEighth) + hash uint32 + test uint32 + ) + for i := 0; i < len(str); i++ { + hash = (hash << OneEighth) + uint32(str[i]) + if test = hash & HighBits; test != 0 { + hash = (hash ^ (test >> ThreeQuarters)) & (^HighBits + 1) + } + } + return hash +} + +// PJW64 implements the classic PJW hash algorithm for 64 bits. +func PJW64(str []byte) uint64 { + var ( + BitsInUnsignedInt uint64 = 32 // 4 * 8 + ThreeQuarters = (BitsInUnsignedInt * 3) / 4 + OneEighth = BitsInUnsignedInt / 8 + HighBits uint64 = (0xFFFFFFFFFFFFFFFF) << (BitsInUnsignedInt - OneEighth) + hash uint64 + test uint64 + ) + for i := 0; i < len(str); i++ { + hash = (hash << OneEighth) + uint64(str[i]) + if test = hash & HighBits; test != 0 { + hash = (hash ^ (test >> ThreeQuarters)) & (^HighBits + 1) + } + } + return hash +} diff --git a/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_rs.go b/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_rs.go new file mode 100644 index 00000000..e9e95563 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_rs.go @@ -0,0 +1,35 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package ghash + +// RS implements the classic RS hash algorithm for 32 bits. +func RS(str []byte) uint32 { + var ( + b uint32 = 378551 + a uint32 = 63689 + hash uint32 = 0 + ) + for i := 0; i < len(str); i++ { + hash = hash*a + uint32(str[i]) + a *= b + } + return hash +} + +// RS64 implements the classic RS hash algorithm for 64 bits. +func RS64(str []byte) uint64 { + var ( + b uint64 = 378551 + a uint64 = 63689 + hash uint64 = 0 + ) + for i := 0; i < len(str); i++ { + hash = hash*a + uint64(str[i]) + a *= b + } + return hash +} diff --git a/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_sdbm.go b/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_sdbm.go new file mode 100644 index 00000000..bbda9437 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/encoding/ghash/ghash_sdbm.go @@ -0,0 +1,27 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package ghash + +// SDBM implements the classic SDBM hash algorithm for 32 bits. +func SDBM(str []byte) uint32 { + var hash uint32 + for i := 0; i < len(str); i++ { + // equivalent to: hash = 65599*hash + uint32(str[i]); + hash = uint32(str[i]) + (hash << 6) + (hash << 16) - hash + } + return hash +} + +// SDBM64 implements the classic SDBM hash algorithm for 64 bits. +func SDBM64(str []byte) uint64 { + var hash uint64 + for i := 0; i < len(str); i++ { + // equivalent to: hash = 65599*hash + uint32(str[i]) + hash = uint64(str[i]) + (hash << 6) + (hash << 16) - hash + } + return hash +} diff --git a/vendor/github.com/gogf/gf/v2/errors/gcode/gcode.go b/vendor/github.com/gogf/gf/v2/errors/gcode/gcode.go new file mode 100644 index 00000000..7e307af9 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/errors/gcode/gcode.go @@ -0,0 +1,70 @@ +// Copyright GoFrame gf Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package gcode provides universal error code definition and common error codes implements. +package gcode + +// Code is universal error code interface definition. +type Code interface { + // Code returns the integer number of current error code. + Code() int + + // Message returns the brief message for current error code. + Message() string + + // Detail returns the detailed information of current error code, + // which is mainly designed as an extension field for error code. + Detail() interface{} +} + +// ================================================================================================================ +// Common error code definition. +// There are reserved internal error code by framework: code < 1000. +// ================================================================================================================ + +var ( + CodeNil = localCode{-1, "", nil} // No error code specified. + CodeOK = localCode{0, "OK", nil} // It is OK. + CodeInternalError = localCode{50, "Internal Error", nil} // An error occurred internally. + CodeValidationFailed = localCode{51, "Validation Failed", nil} // Data validation failed. + CodeDbOperationError = localCode{52, "Database Operation Error", nil} // Database operation error. + CodeInvalidParameter = localCode{53, "Invalid Parameter", nil} // The given parameter for current operation is invalid. + CodeMissingParameter = localCode{54, "Missing Parameter", nil} // Parameter for current operation is missing. + CodeInvalidOperation = localCode{55, "Invalid Operation", nil} // The function cannot be used like this. + CodeInvalidConfiguration = localCode{56, "Invalid Configuration", nil} // The configuration is invalid for current operation. + CodeMissingConfiguration = localCode{57, "Missing Configuration", nil} // The configuration is missing for current operation. + CodeNotImplemented = localCode{58, "Not Implemented", nil} // The operation is not implemented yet. + CodeNotSupported = localCode{59, "Not Supported", nil} // The operation is not supported yet. + CodeOperationFailed = localCode{60, "Operation Failed", nil} // I tried, but I cannot give you what you want. + CodeNotAuthorized = localCode{61, "Not Authorized", nil} // Not Authorized. + CodeSecurityReason = localCode{62, "Security Reason", nil} // Security Reason. + CodeServerBusy = localCode{63, "Server Is Busy", nil} // Server is busy, please try again later. + CodeUnknown = localCode{64, "Unknown Error", nil} // Unknown error. + CodeNotFound = localCode{65, "Not Found", nil} // Resource does not exist. + CodeInvalidRequest = localCode{66, "Invalid Request", nil} // Invalid request. + CodeNecessaryPackageNotImport = localCode{67, "Necessary Package Not Import", nil} // It needs necessary package import. + CodeBusinessValidationFailed = localCode{300, "Business Validation Failed", nil} // Business validation failed. +) + +// New creates and returns an error code. +// Note that it returns an interface object of Code. +func New(code int, message string, detail interface{}) Code { + return localCode{ + code: code, + message: message, + detail: detail, + } +} + +// WithCode creates and returns a new error code based on given Code. +// The code and message is from given `code`, but the detail if from given `detail`. +func WithCode(code Code, detail interface{}) Code { + return localCode{ + code: code.Code(), + message: code.Message(), + detail: detail, + } +} diff --git a/vendor/github.com/gogf/gf/v2/errors/gcode/gcode_local.go b/vendor/github.com/gogf/gf/v2/errors/gcode/gcode_local.go new file mode 100644 index 00000000..1ec1d1ea --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/errors/gcode/gcode_local.go @@ -0,0 +1,43 @@ +// Copyright GoFrame gf Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gcode + +import "fmt" + +// localCode is an implementer for interface Code for internal usage only. +type localCode struct { + code int // Error code, usually an integer. + message string // Brief message for this error code. + detail interface{} // As type of interface, it is mainly designed as an extension field for error code. +} + +// Code returns the integer number of current error code. +func (c localCode) Code() int { + return c.code +} + +// Message returns the brief message for current error code. +func (c localCode) Message() string { + return c.message +} + +// Detail returns the detailed information of current error code, +// which is mainly designed as an extension field for error code. +func (c localCode) Detail() interface{} { + return c.detail +} + +// String returns current error code as a string. +func (c localCode) String() string { + if c.detail != nil { + return fmt.Sprintf(`%d:%s %v`, c.code, c.message, c.detail) + } + if c.message != "" { + return fmt.Sprintf(`%d:%s`, c.code, c.message) + } + return fmt.Sprintf(`%d`, c.code) +} diff --git a/vendor/github.com/gogf/gf/v2/errors/gerror/gerror.go b/vendor/github.com/gogf/gf/v2/errors/gerror/gerror.go new file mode 100644 index 00000000..6bb4614c --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/errors/gerror/gerror.go @@ -0,0 +1,79 @@ +// Copyright GoFrame gf Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package gerror provides rich functionalities to manipulate errors. +// +// For maintainers, please very note that, +// this package is quite a basic package, which SHOULD NOT import extra packages +// except standard packages and internal packages, to avoid cycle imports. +package gerror + +import ( + "github.com/gogf/gf/v2/errors/gcode" + "github.com/gogf/gf/v2/internal/command" +) + +// IIs is the interface for Is feature. +type IIs interface { + Error() string + Is(target error) bool +} + +// IEqual is the interface for Equal feature. +type IEqual interface { + Error() string + Equal(target error) bool +} + +// ICode is the interface for Code feature. +type ICode interface { + Error() string + Code() gcode.Code +} + +// IStack is the interface for Stack feature. +type IStack interface { + Error() string + Stack() string +} + +// ICause is the interface for Cause feature. +type ICause interface { + Error() string + Cause() error +} + +// ICurrent is the interface for Current feature. +type ICurrent interface { + Error() string + Current() error +} + +// IUnwrap is the interface for Unwrap feature. +type IUnwrap interface { + Error() string + Unwrap() error +} + +const ( + // commandEnvKeyForBrief is the command environment name for switch key for brief error stack. + commandEnvKeyForBrief = "gf.gerror.brief" + + // commaSeparatorSpace is the comma separator with space. + commaSeparatorSpace = ", " +) + +var ( + // isUsingBriefStack is the switch key for brief error stack. + isUsingBriefStack bool +) + +func init() { + value := command.GetOptWithEnv(commandEnvKeyForBrief) + if value == "1" || value == "true" { + isUsingBriefStack = true + } +} diff --git a/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_api.go b/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_api.go new file mode 100644 index 00000000..9f6a8c9e --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_api.go @@ -0,0 +1,110 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gerror + +import ( + "fmt" + + "github.com/gogf/gf/v2/errors/gcode" +) + +// New creates and returns an error which is formatted from given text. +func New(text string) error { + return &Error{ + stack: callers(), + text: text, + code: gcode.CodeNil, + } +} + +// Newf returns an error that formats as the given format and args. +func Newf(format string, args ...interface{}) error { + return &Error{ + stack: callers(), + text: fmt.Sprintf(format, args...), + code: gcode.CodeNil, + } +} + +// NewSkip creates and returns an error which is formatted from given text. +// The parameter `skip` specifies the stack callers skipped amount. +func NewSkip(skip int, text string) error { + return &Error{ + stack: callers(skip), + text: text, + code: gcode.CodeNil, + } +} + +// NewSkipf returns an error that formats as the given format and args. +// The parameter `skip` specifies the stack callers skipped amount. +func NewSkipf(skip int, format string, args ...interface{}) error { + return &Error{ + stack: callers(skip), + text: fmt.Sprintf(format, args...), + code: gcode.CodeNil, + } +} + +// Wrap wraps error with text. It returns nil if given err is nil. +// Note that it does not lose the error code of wrapped error, as it inherits the error code from it. +func Wrap(err error, text string) error { + if err == nil { + return nil + } + return &Error{ + error: err, + stack: callers(), + text: text, + code: Code(err), + } +} + +// Wrapf returns an error annotating err with a stack trace at the point Wrapf is called, and the format specifier. +// It returns nil if given `err` is nil. +// Note that it does not lose the error code of wrapped error, as it inherits the error code from it. +func Wrapf(err error, format string, args ...interface{}) error { + if err == nil { + return nil + } + return &Error{ + error: err, + stack: callers(), + text: fmt.Sprintf(format, args...), + code: Code(err), + } +} + +// WrapSkip wraps error with text. It returns nil if given err is nil. +// The parameter `skip` specifies the stack callers skipped amount. +// Note that it does not lose the error code of wrapped error, as it inherits the error code from it. +func WrapSkip(skip int, err error, text string) error { + if err == nil { + return nil + } + return &Error{ + error: err, + stack: callers(skip), + text: text, + code: Code(err), + } +} + +// WrapSkipf wraps error with text that is formatted with given format and args. It returns nil if given err is nil. +// The parameter `skip` specifies the stack callers skipped amount. +// Note that it does not lose the error code of wrapped error, as it inherits the error code from it. +func WrapSkipf(skip int, err error, format string, args ...interface{}) error { + if err == nil { + return nil + } + return &Error{ + error: err, + stack: callers(skip), + text: fmt.Sprintf(format, args...), + code: Code(err), + } +} diff --git a/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_api_code.go b/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_api_code.go new file mode 100644 index 00000000..e4e4a2b6 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_api_code.go @@ -0,0 +1,139 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gerror + +import ( + "fmt" + "strings" + + "github.com/gogf/gf/v2/errors/gcode" +) + +// NewCode creates and returns an error that has error code and given text. +func NewCode(code gcode.Code, text ...string) error { + return &Error{ + stack: callers(), + text: strings.Join(text, commaSeparatorSpace), + code: code, + } +} + +// NewCodef returns an error that has error code and formats as the given format and args. +func NewCodef(code gcode.Code, format string, args ...interface{}) error { + return &Error{ + stack: callers(), + text: fmt.Sprintf(format, args...), + code: code, + } +} + +// NewCodeSkip creates and returns an error which has error code and is formatted from given text. +// The parameter `skip` specifies the stack callers skipped amount. +func NewCodeSkip(code gcode.Code, skip int, text ...string) error { + return &Error{ + stack: callers(skip), + text: strings.Join(text, commaSeparatorSpace), + code: code, + } +} + +// NewCodeSkipf returns an error that has error code and formats as the given format and args. +// The parameter `skip` specifies the stack callers skipped amount. +func NewCodeSkipf(code gcode.Code, skip int, format string, args ...interface{}) error { + return &Error{ + stack: callers(skip), + text: fmt.Sprintf(format, args...), + code: code, + } +} + +// WrapCode wraps error with code and text. +// It returns nil if given err is nil. +func WrapCode(code gcode.Code, err error, text ...string) error { + if err == nil { + return nil + } + return &Error{ + error: err, + stack: callers(), + text: strings.Join(text, commaSeparatorSpace), + code: code, + } +} + +// WrapCodef wraps error with code and format specifier. +// It returns nil if given `err` is nil. +func WrapCodef(code gcode.Code, err error, format string, args ...interface{}) error { + if err == nil { + return nil + } + return &Error{ + error: err, + stack: callers(), + text: fmt.Sprintf(format, args...), + code: code, + } +} + +// WrapCodeSkip wraps error with code and text. +// It returns nil if given err is nil. +// The parameter `skip` specifies the stack callers skipped amount. +func WrapCodeSkip(code gcode.Code, skip int, err error, text ...string) error { + if err == nil { + return nil + } + return &Error{ + error: err, + stack: callers(skip), + text: strings.Join(text, commaSeparatorSpace), + code: code, + } +} + +// WrapCodeSkipf wraps error with code and text that is formatted with given format and args. +// It returns nil if given err is nil. +// The parameter `skip` specifies the stack callers skipped amount. +func WrapCodeSkipf(code gcode.Code, skip int, err error, format string, args ...interface{}) error { + if err == nil { + return nil + } + return &Error{ + error: err, + stack: callers(skip), + text: fmt.Sprintf(format, args...), + code: code, + } +} + +// Code returns the error code of current error. +// It returns `CodeNil` if it has no error code neither it does not implement interface Code. +func Code(err error) gcode.Code { + if err == nil { + return gcode.CodeNil + } + if e, ok := err.(ICode); ok { + return e.Code() + } + if e, ok := err.(IUnwrap); ok { + return Code(e.Unwrap()) + } + return gcode.CodeNil +} + +// HasCode checks and reports whether `err` has `code` in its chaining errors. +func HasCode(err error, code gcode.Code) bool { + if err == nil { + return false + } + if e, ok := err.(ICode); ok { + return code == e.Code() + } + if e, ok := err.(IUnwrap); ok { + return HasCode(e.Unwrap(), code) + } + return false +} diff --git a/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_api_option.go b/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_api_option.go new file mode 100644 index 00000000..33ed881f --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_api_option.go @@ -0,0 +1,31 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gerror + +import "github.com/gogf/gf/v2/errors/gcode" + +// Option is option for creating error. +type Option struct { + Error error // Wrapped error if any. + Stack bool // Whether recording stack information into error. + Text string // Error text, which is created by New* functions. + Code gcode.Code // Error code if necessary. +} + +// NewOption creates and returns a custom error with Option. +// It is the senior usage for creating error, which is often used internally in framework. +func NewOption(option Option) error { + err := &Error{ + error: option.Error, + text: option.Text, + code: option.Code, + } + if option.Stack { + err.stack = callers() + } + return err +} diff --git a/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_api_stack.go b/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_api_stack.go new file mode 100644 index 00000000..79b4d6b0 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_api_stack.go @@ -0,0 +1,118 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gerror + +import ( + "runtime" +) + +// stack represents a stack of program counters. +type stack []uintptr + +const ( + // maxStackDepth marks the max stack depth for error back traces. + maxStackDepth = 64 +) + +// Cause returns the root cause error of `err`. +func Cause(err error) error { + if err == nil { + return nil + } + if e, ok := err.(ICause); ok { + return e.Cause() + } + if e, ok := err.(IUnwrap); ok { + return Cause(e.Unwrap()) + } + return err +} + +// Stack returns the stack callers as string. +// It returns the error string directly if the `err` does not support stacks. +func Stack(err error) string { + if err == nil { + return "" + } + if e, ok := err.(IStack); ok { + return e.Stack() + } + return err.Error() +} + +// Current creates and returns the current level error. +// It returns nil if current level error is nil. +func Current(err error) error { + if err == nil { + return nil + } + if e, ok := err.(ICurrent); ok { + return e.Current() + } + return err +} + +// Unwrap returns the next level error. +// It returns nil if current level error or the next level error is nil. +func Unwrap(err error) error { + if err == nil { + return nil + } + if e, ok := err.(IUnwrap); ok { + return e.Unwrap() + } + return nil +} + +// HasStack checks and reports whether `err` implemented interface `gerror.IStack`. +func HasStack(err error) bool { + _, ok := err.(IStack) + return ok +} + +// Equal reports whether current error `err` equals to error `target`. +// Please note that, in default comparison logic for `Error`, +// the errors are considered the same if both the `code` and `text` of them are the same. +func Equal(err, target error) bool { + if err == target { + return true + } + if e, ok := err.(IEqual); ok { + return e.Equal(target) + } + if e, ok := target.(IEqual); ok { + return e.Equal(err) + } + return false +} + +// Is reports whether current error `err` has error `target` in its chaining errors. +// It is just for implements for stdlib errors.Is from Go version 1.17. +func Is(err, target error) bool { + if e, ok := err.(IIs); ok { + return e.Is(target) + } + return false +} + +// HasError is alias of Is, which more easily understanding semantics. +func HasError(err, target error) bool { + return Is(err, target) +} + +// callers returns the stack callers. +// Note that it here just retrieves the caller memory address array not the caller information. +func callers(skip ...int) stack { + var ( + pcs [maxStackDepth]uintptr + n = 3 + ) + if len(skip) > 0 { + n += skip[0] + } + return pcs[:runtime.Callers(n, pcs[:])] +} diff --git a/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error.go b/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error.go new file mode 100644 index 00000000..b05bfd1f --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error.go @@ -0,0 +1,146 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gerror + +import ( + "errors" + "fmt" + "runtime" + "strings" + + "github.com/gogf/gf/v2/errors/gcode" +) + +// Error is custom error for additional features. +type Error struct { + error error // Wrapped error. + stack stack // Stack array, which records the stack information when this error is created or wrapped. + text string // Custom Error text when Error is created, might be empty when its code is not nil. + code gcode.Code // Error code if necessary. +} + +const ( + // Filtering key for current error module paths. + stackFilterKeyLocal = "/errors/gerror/gerror" +) + +var ( + // goRootForFilter is used for stack filtering in development environment purpose. + goRootForFilter = runtime.GOROOT() +) + +func init() { + if goRootForFilter != "" { + goRootForFilter = strings.ReplaceAll(goRootForFilter, "\\", "/") + } +} + +// Error implements the interface of Error, it returns all the error as string. +func (err *Error) Error() string { + if err == nil { + return "" + } + errStr := err.text + if errStr == "" && err.code != nil { + errStr = err.code.Message() + } + if err.error != nil { + if errStr != "" { + errStr += ": " + } + errStr += err.error.Error() + } + return errStr +} + +// Cause returns the root cause error. +func (err *Error) Cause() error { + if err == nil { + return nil + } + loop := err + for loop != nil { + if loop.error != nil { + if e, ok := loop.error.(*Error); ok { + // Internal Error struct. + loop = e + } else if e, ok := loop.error.(ICause); ok { + // Other Error that implements ApiCause interface. + return e.Cause() + } else { + return loop.error + } + } else { + // return loop + // + // To be compatible with Case of https://github.com/pkg/errors. + return errors.New(loop.text) + } + } + return nil +} + +// Current creates and returns the current level error. +// It returns nil if current level error is nil. +func (err *Error) Current() error { + if err == nil { + return nil + } + return &Error{ + error: nil, + stack: err.stack, + text: err.text, + code: err.code, + } +} + +// Unwrap is alias of function `Next`. +// It is just for implements for stdlib errors.Unwrap from Go version 1.17. +func (err *Error) Unwrap() error { + if err == nil { + return nil + } + return err.error +} + +// Equal reports whether current error `err` equals to error `target`. +// Please note that, in default comparison for `Error`, +// the errors are considered the same if both the `code` and `text` of them are the same. +func (err *Error) Equal(target error) bool { + if err == target { + return true + } + // Code should be the same. + // Note that if both errors have `nil` code, they are also considered equal. + if err.code != Code(target) { + return false + } + // Text should be the same. + if err.text != fmt.Sprintf(`%-s`, target) { + return false + } + return true +} + +// Is reports whether current error `err` has error `target` in its chaining errors. +// It is just for implements for stdlib errors.Is from Go version 1.17. +func (err *Error) Is(target error) bool { + if Equal(err, target) { + return true + } + nextErr := err.Unwrap() + if nextErr == nil { + return false + } + if Equal(nextErr, target) { + return true + } + if e, ok := nextErr.(IIs); ok { + return e.Is(target) + } + return false +} diff --git a/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error_code.go b/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error_code.go new file mode 100644 index 00000000..1000e9f9 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error_code.go @@ -0,0 +1,31 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gerror + +import ( + "github.com/gogf/gf/v2/errors/gcode" +) + +// Code returns the error code. +// It returns CodeNil if it has no error code. +func (err *Error) Code() gcode.Code { + if err == nil { + return gcode.CodeNil + } + if err.code == gcode.CodeNil { + return Code(err.Unwrap()) + } + return err.code +} + +// SetCode updates the internal code with given code. +func (err *Error) SetCode(code gcode.Code) { + if err == nil { + return + } + err.code = code +} diff --git a/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error_format.go b/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error_format.go new file mode 100644 index 00000000..16be393e --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error_format.go @@ -0,0 +1,40 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gerror + +import ( + "fmt" + "io" +) + +// Format formats the frame according to the fmt.Formatter interface. +// +// %v, %s : Print all the error string; +// %-v, %-s : Print current level error string; +// %+s : Print full stack error list; +// %+v : Print the error string and full stack error list +func (err *Error) Format(s fmt.State, verb rune) { + switch verb { + case 's', 'v': + switch { + case s.Flag('-'): + if err.text != "" { + _, _ = io.WriteString(s, err.text) + } else { + _, _ = io.WriteString(s, err.Error()) + } + case s.Flag('+'): + if verb == 's' { + _, _ = io.WriteString(s, err.Stack()) + } else { + _, _ = io.WriteString(s, err.Error()+"\n"+err.Stack()) + } + default: + _, _ = io.WriteString(s, err.Error()) + } + } +} diff --git a/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error_json.go b/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error_json.go new file mode 100644 index 00000000..5c290d7a --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error_json.go @@ -0,0 +1,13 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gerror + +// MarshalJSON implements the interface MarshalJSON for json.Marshal. +// Note that do not use pointer as its receiver here. +func (err Error) MarshalJSON() ([]byte, error) { + return []byte(`"` + err.Error() + `"`), nil +} diff --git a/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error_stack.go b/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error_stack.go new file mode 100644 index 00000000..598d8cac --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/errors/gerror/gerror_error_stack.go @@ -0,0 +1,171 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gerror + +import ( + "bytes" + "container/list" + "fmt" + "runtime" + "strings" + + "github.com/gogf/gf/v2/internal/consts" +) + +// stackInfo manages stack info of certain error. +type stackInfo struct { + Index int // Index is the index of current error in whole error stacks. + Message string // Error information string. + Lines *list.List // Lines contains all error stack lines of current error stack in sequence. +} + +// stackLine manages each line info of stack. +type stackLine struct { + Function string // Function name, which contains its full package path. + FileLine string // FileLine is the source file name and its line number of Function. +} + +// Stack returns the error stack information as string. +func (err *Error) Stack() string { + if err == nil { + return "" + } + var ( + loop = err + index = 1 + infos []*stackInfo + ) + for loop != nil { + info := &stackInfo{ + Index: index, + Message: fmt.Sprintf("%-v", loop), + } + index++ + infos = append(infos, info) + loopLinesOfStackInfo(loop.stack, info) + if loop.error != nil { + if e, ok := loop.error.(*Error); ok { + loop = e + } else { + infos = append(infos, &stackInfo{ + Index: index, + Message: loop.error.Error(), + }) + index++ + break + } + } else { + break + } + } + filterLinesOfStackInfos(infos) + return formatStackInfos(infos) +} + +// filterLinesOfStackInfos removes repeated lines, which exist in subsequent stacks, from top errors. +func filterLinesOfStackInfos(infos []*stackInfo) { + var ( + ok bool + set = make(map[string]struct{}) + info *stackInfo + line *stackLine + removes []*list.Element + ) + for i := len(infos) - 1; i >= 0; i-- { + info = infos[i] + if info.Lines == nil { + continue + } + for n, e := 0, info.Lines.Front(); n < info.Lines.Len(); n, e = n+1, e.Next() { + line = e.Value.(*stackLine) + if _, ok = set[line.FileLine]; ok { + removes = append(removes, e) + } else { + set[line.FileLine] = struct{}{} + } + } + if len(removes) > 0 { + for _, e := range removes { + info.Lines.Remove(e) + } + } + removes = removes[:0] + } +} + +// formatStackInfos formats and returns error stack information as string. +func formatStackInfos(infos []*stackInfo) string { + var buffer = bytes.NewBuffer(nil) + for i, info := range infos { + buffer.WriteString(fmt.Sprintf("%d. %s\n", i+1, info.Message)) + if info.Lines != nil && info.Lines.Len() > 0 { + formatStackLines(buffer, info.Lines) + } + } + return buffer.String() +} + +// formatStackLines formats and returns error stack lines as string. +func formatStackLines(buffer *bytes.Buffer, lines *list.List) string { + var ( + line *stackLine + space = " " + length = lines.Len() + ) + for i, e := 0, lines.Front(); i < length; i, e = i+1, e.Next() { + line = e.Value.(*stackLine) + // Graceful indent. + if i >= 9 { + space = " " + } + buffer.WriteString(fmt.Sprintf( + " %d).%s%s\n %s\n", + i+1, space, line.Function, line.FileLine, + )) + } + return buffer.String() +} + +// loopLinesOfStackInfo iterates the stack info lines and produces the stack line info. +func loopLinesOfStackInfo(st stack, info *stackInfo) { + if st == nil { + return + } + for _, p := range st { + if fn := runtime.FuncForPC(p - 1); fn != nil { + file, line := fn.FileLine(p - 1) + if isUsingBriefStack { + // filter whole GoFrame packages stack paths. + if strings.Contains(file, consts.StackFilterKeyForGoFrame) { + continue + } + } else { + // package path stack filtering. + if strings.Contains(file, stackFilterKeyLocal) { + continue + } + } + // Avoid stack string like "`autogenerated`" + if strings.Contains(file, "<") { + continue + } + // Ignore GO ROOT paths. + if goRootForFilter != "" && + len(file) >= len(goRootForFilter) && + file[0:len(goRootForFilter)] == goRootForFilter { + continue + } + if info.Lines == nil { + info.Lines = list.New() + } + info.Lines.PushBack(&stackLine{ + Function: fn.Name(), + FileLine: fmt.Sprintf(`%s:%d`, file, line), + }) + } + } +} diff --git a/vendor/github.com/gogf/gf/v2/internal/command/command.go b/vendor/github.com/gogf/gf/v2/internal/command/command.go new file mode 100644 index 00000000..688201eb --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/internal/command/command.go @@ -0,0 +1,135 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. +// + +// Package command provides console operations, like options/arguments reading. +package command + +import ( + "os" + "regexp" + "strings" +) + +var ( + defaultParsedArgs = make([]string, 0) + defaultParsedOptions = make(map[string]string) + argumentRegex = regexp.MustCompile(`^\-{1,2}([\w\?\.\-]+)(=){0,1}(.*)$`) +) + +// Init does custom initialization. +func Init(args ...string) { + if len(args) == 0 { + if len(defaultParsedArgs) == 0 && len(defaultParsedOptions) == 0 { + args = os.Args + } else { + return + } + } else { + defaultParsedArgs = make([]string, 0) + defaultParsedOptions = make(map[string]string) + } + // Parsing os.Args with default algorithm. + defaultParsedArgs, defaultParsedOptions = ParseUsingDefaultAlgorithm(args...) +} + +// ParseUsingDefaultAlgorithm parses arguments using default algorithm. +func ParseUsingDefaultAlgorithm(args ...string) (parsedArgs []string, parsedOptions map[string]string) { + parsedArgs = make([]string, 0) + parsedOptions = make(map[string]string) + for i := 0; i < len(args); { + array := argumentRegex.FindStringSubmatch(args[i]) + if len(array) > 2 { + if array[2] == "=" { + parsedOptions[array[1]] = array[3] + } else if i < len(args)-1 { + if len(args[i+1]) > 0 && args[i+1][0] == '-' { + // Eg: gf gen -d -n 1 + parsedOptions[array[1]] = array[3] + } else { + // Eg: gf gen -n 2 + parsedOptions[array[1]] = args[i+1] + i += 2 + continue + } + } else { + // Eg: gf gen -h + parsedOptions[array[1]] = array[3] + } + } else { + parsedArgs = append(parsedArgs, args[i]) + } + i++ + } + return +} + +// GetOpt returns the option value named `name`. +func GetOpt(name string, def ...string) string { + Init() + if v, ok := defaultParsedOptions[name]; ok { + return v + } + if len(def) > 0 { + return def[0] + } + return "" +} + +// GetOptAll returns all parsed options. +func GetOptAll() map[string]string { + Init() + return defaultParsedOptions +} + +// ContainsOpt checks whether option named `name` exist in the arguments. +func ContainsOpt(name string) bool { + Init() + _, ok := defaultParsedOptions[name] + return ok +} + +// GetArg returns the argument at `index`. +func GetArg(index int, def ...string) string { + Init() + if index < len(defaultParsedArgs) { + return defaultParsedArgs[index] + } + if len(def) > 0 { + return def[0] + } + return "" +} + +// GetArgAll returns all parsed arguments. +func GetArgAll() []string { + Init() + return defaultParsedArgs +} + +// GetOptWithEnv returns the command line argument of the specified `key`. +// If the argument does not exist, then it returns the environment variable with specified `key`. +// It returns the default value `def` if none of them exists. +// +// Fetching Rules: +// 1. Command line arguments are in lowercase format, eg: gf.package.variable; +// 2. Environment arguments are in uppercase format, eg: GF_PACKAGE_VARIABLE; +func GetOptWithEnv(key string, def ...string) string { + cmdKey := strings.ToLower(strings.ReplaceAll(key, "_", ".")) + if ContainsOpt(cmdKey) { + return GetOpt(cmdKey) + } else { + envKey := strings.ToUpper(strings.ReplaceAll(key, ".", "_")) + if r, ok := os.LookupEnv(envKey); ok { + return r + } else { + if len(def) > 0 { + return def[0] + } + } + } + return "" +} diff --git a/vendor/github.com/gogf/gf/v2/internal/consts/consts.go b/vendor/github.com/gogf/gf/v2/internal/consts/consts.go new file mode 100644 index 00000000..8b1bd37f --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/internal/consts/consts.go @@ -0,0 +1,21 @@ +// Copyright GoFrame gf Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package consts defines constants that are shared all among packages of framework. +package consts + +const ( + ConfigNodeNameDatabase = "database" + ConfigNodeNameLogger = "logger" + ConfigNodeNameRedis = "redis" + ConfigNodeNameViewer = "viewer" + ConfigNodeNameServer = "server" // General version configuration item name. + ConfigNodeNameServerSecondary = "httpserver" // New version configuration item name support from v2. + + // StackFilterKeyForGoFrame is the stack filtering key for all GoFrame module paths. + // Eg: .../pkg/mod/github.com/gogf/gf/v2@v2.0.0-20211011134327-54dd11f51122/debug/gdebug/gdebug_caller.go + StackFilterKeyForGoFrame = "github.com/gogf/gf/" +) diff --git a/vendor/github.com/gogf/gf/v2/internal/deepcopy/deepcopy.go b/vendor/github.com/gogf/gf/v2/internal/deepcopy/deepcopy.go new file mode 100644 index 00000000..e379f5fd --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/internal/deepcopy/deepcopy.go @@ -0,0 +1,136 @@ +// Copyright GoFrame gf Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package deepcopy makes deep copies of things using reflection. +// +// This package is maintained from: https://github.com/mohae/deepcopy +package deepcopy + +import ( + "reflect" + "time" +) + +// Interface for delegating copy process to type +type Interface interface { + DeepCopy() interface{} +} + +// Copy creates a deep copy of whatever is passed to it and returns the copy +// in an interface{}. The returned value will need to be asserted to the +// correct type. +func Copy(src interface{}) interface{} { + if src == nil { + return nil + } + + // Copy by type assertion. + switch r := src.(type) { + case + int, int8, int16, int32, int64, + uint, uint8, uint16, uint32, uint64, + float32, float64, + complex64, complex128, + string, + bool: + return r + + default: + if v, ok := src.(Interface); ok { + return v.DeepCopy() + } + var ( + original = reflect.ValueOf(src) // Make the interface a reflect.Value + dst = reflect.New(original.Type()).Elem() // Make a copy of the same type as the original. + ) + // Recursively copy the original. + copyRecursive(original, dst) + // Return the copy as an interface. + return dst.Interface() + } +} + +// copyRecursive does the actual copying of the interface. It currently has +// limited support for what it can handle. Add as needed. +func copyRecursive(original, cpy reflect.Value) { + // check for implement deepcopy.Interface + if original.CanInterface() && original.IsValid() && !original.IsZero() { + if copier, ok := original.Interface().(Interface); ok { + cpy.Set(reflect.ValueOf(copier.DeepCopy())) + return + } + } + + // handle according to original's Kind + switch original.Kind() { + case reflect.Ptr: + // Get the actual value being pointed to. + originalValue := original.Elem() + + // if it isn't valid, return. + if !originalValue.IsValid() { + return + } + cpy.Set(reflect.New(originalValue.Type())) + copyRecursive(originalValue, cpy.Elem()) + + case reflect.Interface: + // If this is a nil, don't do anything + if original.IsNil() { + return + } + // Get the value for the interface, not the pointer. + originalValue := original.Elem() + + // Get the value by calling Elem(). + copyValue := reflect.New(originalValue.Type()).Elem() + copyRecursive(originalValue, copyValue) + cpy.Set(copyValue) + + case reflect.Struct: + t, ok := original.Interface().(time.Time) + if ok { + cpy.Set(reflect.ValueOf(t)) + return + } + // Go through each field of the struct and copy it. + for i := 0; i < original.NumField(); i++ { + // The Type's StructField for a given field is checked to see if StructField.PkgPath + // is set to determine if the field is exported or not because CanSet() returns false + // for settable fields. I'm not sure why. -mohae + if original.Type().Field(i).PkgPath != "" { + continue + } + copyRecursive(original.Field(i), cpy.Field(i)) + } + + case reflect.Slice: + if original.IsNil() { + return + } + // Make a new slice and copy each element. + cpy.Set(reflect.MakeSlice(original.Type(), original.Len(), original.Cap())) + for i := 0; i < original.Len(); i++ { + copyRecursive(original.Index(i), cpy.Index(i)) + } + + case reflect.Map: + if original.IsNil() { + return + } + cpy.Set(reflect.MakeMap(original.Type())) + for _, key := range original.MapKeys() { + originalValue := original.MapIndex(key) + copyValue := reflect.New(originalValue.Type()).Elem() + copyRecursive(originalValue, copyValue) + copyKey := Copy(key.Interface()) + cpy.SetMapIndex(reflect.ValueOf(copyKey), copyValue) + } + + default: + cpy.Set(original) + } +} diff --git a/vendor/github.com/gogf/gf/v2/internal/empty/empty.go b/vendor/github.com/gogf/gf/v2/internal/empty/empty.go new file mode 100644 index 00000000..4e42d1c9 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/internal/empty/empty.go @@ -0,0 +1,224 @@ +// Copyright GoFrame gf Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package empty provides functions for checking empty/nil variables. +package empty + +import ( + "reflect" + "time" + + "github.com/gogf/gf/v2/internal/reflection" +) + +// iString is used for type assert api for String(). +type iString interface { + String() string +} + +// iInterfaces is used for type assert api for Interfaces. +type iInterfaces interface { + Interfaces() []interface{} +} + +// iMapStrAny is the interface support for converting struct parameter to map. +type iMapStrAny interface { + MapStrAny() map[string]interface{} +} + +type iTime interface { + Date() (year int, month time.Month, day int) + IsZero() bool +} + +// IsEmpty checks whether given `value` empty. +// It returns true if `value` is in: 0, nil, false, "", len(slice/map/chan) == 0, +// or else it returns false. +func IsEmpty(value interface{}) bool { + if value == nil { + return true + } + // It firstly checks the variable as common types using assertion to enhance the performance, + // and then using reflection. + switch result := value.(type) { + case int: + return result == 0 + case int8: + return result == 0 + case int16: + return result == 0 + case int32: + return result == 0 + case int64: + return result == 0 + case uint: + return result == 0 + case uint8: + return result == 0 + case uint16: + return result == 0 + case uint32: + return result == 0 + case uint64: + return result == 0 + case float32: + return result == 0 + case float64: + return result == 0 + case bool: + return !result + case string: + return result == "" + case []byte: + return len(result) == 0 + case []rune: + return len(result) == 0 + case []int: + return len(result) == 0 + case []string: + return len(result) == 0 + case []float32: + return len(result) == 0 + case []float64: + return len(result) == 0 + case map[string]interface{}: + return len(result) == 0 + + default: + // ========================= + // Common interfaces checks. + // ========================= + if f, ok := value.(iTime); ok { + if f == (*time.Time)(nil) { + return true + } + return f.IsZero() + } + if f, ok := value.(iString); ok { + if f == nil { + return true + } + return f.String() == "" + } + if f, ok := value.(iInterfaces); ok { + if f == nil { + return true + } + return len(f.Interfaces()) == 0 + } + if f, ok := value.(iMapStrAny); ok { + if f == nil { + return true + } + return len(f.MapStrAny()) == 0 + } + // Finally, using reflect. + var rv reflect.Value + if v, ok := value.(reflect.Value); ok { + rv = v + } else { + rv = reflect.ValueOf(value) + } + + switch rv.Kind() { + case reflect.Bool: + return !rv.Bool() + + case + reflect.Int, + reflect.Int8, + reflect.Int16, + reflect.Int32, + reflect.Int64: + return rv.Int() == 0 + + case + reflect.Uint, + reflect.Uint8, + reflect.Uint16, + reflect.Uint32, + reflect.Uint64, + reflect.Uintptr: + return rv.Uint() == 0 + + case + reflect.Float32, + reflect.Float64: + return rv.Float() == 0 + + case reflect.String: + return rv.Len() == 0 + + case reflect.Struct: + var fieldValueInterface interface{} + for i := 0; i < rv.NumField(); i++ { + fieldValueInterface, _ = reflection.ValueToInterface(rv.Field(i)) + if !IsEmpty(fieldValueInterface) { + return false + } + } + return true + + case + reflect.Chan, + reflect.Map, + reflect.Slice, + reflect.Array: + return rv.Len() == 0 + + case + reflect.Func, + reflect.Ptr, + reflect.Interface, + reflect.UnsafePointer: + if rv.IsNil() { + return true + } + } + } + return false +} + +// IsNil checks whether given `value` is nil, especially for interface{} type value. +// Parameter `traceSource` is used for tracing to the source variable if given `value` is type of pinter +// that also points to a pointer. It returns nil if the source is nil when `traceSource` is true. +// Note that it might use reflect feature which affects performance a little. +func IsNil(value interface{}, traceSource ...bool) bool { + if value == nil { + return true + } + var rv reflect.Value + if v, ok := value.(reflect.Value); ok { + rv = v + } else { + rv = reflect.ValueOf(value) + } + switch rv.Kind() { + case reflect.Chan, + reflect.Map, + reflect.Slice, + reflect.Func, + reflect.Interface, + reflect.UnsafePointer: + return !rv.IsValid() || rv.IsNil() + + case reflect.Ptr: + if len(traceSource) > 0 && traceSource[0] { + for rv.Kind() == reflect.Ptr { + rv = rv.Elem() + } + if !rv.IsValid() { + return true + } + if rv.Kind() == reflect.Ptr { + return rv.IsNil() + } + } else { + return !rv.IsValid() || rv.IsNil() + } + } + return false +} diff --git a/vendor/github.com/gogf/gf/v2/internal/intlog/intlog.go b/vendor/github.com/gogf/gf/v2/internal/intlog/intlog.go new file mode 100644 index 00000000..e86380dc --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/internal/intlog/intlog.go @@ -0,0 +1,125 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package intlog provides internal logging for GoFrame development usage only. +package intlog + +import ( + "bytes" + "context" + "fmt" + "path/filepath" + "time" + + "go.opentelemetry.io/otel/trace" + + "github.com/gogf/gf/v2/debug/gdebug" + "github.com/gogf/gf/v2/internal/utils" +) + +const ( + stackFilterKey = "/internal/intlog" +) + +// Print prints `v` with newline using fmt.Println. +// The parameter `v` can be multiple variables. +func Print(ctx context.Context, v ...interface{}) { + if !utils.IsDebugEnabled() { + return + } + doPrint(ctx, fmt.Sprint(v...), false) +} + +// Printf prints `v` with format `format` using fmt.Printf. +// The parameter `v` can be multiple variables. +func Printf(ctx context.Context, format string, v ...interface{}) { + if !utils.IsDebugEnabled() { + return + } + doPrint(ctx, fmt.Sprintf(format, v...), false) +} + +// Error prints `v` with newline using fmt.Println. +// The parameter `v` can be multiple variables. +func Error(ctx context.Context, v ...interface{}) { + if !utils.IsDebugEnabled() { + return + } + doPrint(ctx, fmt.Sprint(v...), true) +} + +// Errorf prints `v` with format `format` using fmt.Printf. +func Errorf(ctx context.Context, format string, v ...interface{}) { + if !utils.IsDebugEnabled() { + return + } + doPrint(ctx, fmt.Sprintf(format, v...), true) +} + +// PrintFunc prints the output from function `f`. +// It only calls function `f` if debug mode is enabled. +func PrintFunc(ctx context.Context, f func() string) { + if !utils.IsDebugEnabled() { + return + } + s := f() + if s == "" { + return + } + doPrint(ctx, s, false) +} + +// ErrorFunc prints the output from function `f`. +// It only calls function `f` if debug mode is enabled. +func ErrorFunc(ctx context.Context, f func() string) { + if !utils.IsDebugEnabled() { + return + } + s := f() + if s == "" { + return + } + doPrint(ctx, s, true) +} + +func doPrint(ctx context.Context, content string, stack bool) { + if !utils.IsDebugEnabled() { + return + } + buffer := bytes.NewBuffer(nil) + buffer.WriteString(time.Now().Format("2006-01-02 15:04:05.000")) + buffer.WriteString(" [INTE] ") + buffer.WriteString(file()) + buffer.WriteString(" ") + if s := traceIdStr(ctx); s != "" { + buffer.WriteString(s + " ") + } + buffer.WriteString(content) + buffer.WriteString("\n") + if stack { + buffer.WriteString("Caller Stack:\n") + buffer.WriteString(gdebug.StackWithFilter([]string{stackFilterKey})) + } + fmt.Print(buffer.String()) +} + +// traceIdStr retrieves and returns the trace id string for logging output. +func traceIdStr(ctx context.Context) string { + if ctx == nil { + return "" + } + spanCtx := trace.SpanContextFromContext(ctx) + if traceId := spanCtx.TraceID(); traceId.IsValid() { + return "{" + traceId.String() + "}" + } + return "" +} + +// file returns caller file name along with its line number. +func file() string { + _, p, l := gdebug.CallerWithFilter([]string{stackFilterKey}) + return fmt.Sprintf(`%s:%d`, filepath.Base(p), l) +} diff --git a/vendor/github.com/gogf/gf/v2/internal/json/json.go b/vendor/github.com/gogf/gf/v2/internal/json/json.go new file mode 100644 index 00000000..374aec68 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/internal/json/json.go @@ -0,0 +1,85 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package json provides json operations wrapping ignoring stdlib or third-party lib json. +package json + +import ( + "bytes" + "encoding/json" + "io" + + "github.com/gogf/gf/v2/errors/gerror" +) + +// RawMessage is a raw encoded JSON value. +// It implements Marshaler and Unmarshaler and can +// be used to delay JSON decoding or precompute a JSON encoding. +type RawMessage = json.RawMessage + +// Marshal adapts to json/encoding Marshal API. +// +// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API +// Refer to https://godoc.org/encoding/json#Marshal for more information. +func Marshal(v interface{}) (marshaledBytes []byte, err error) { + marshaledBytes, err = json.Marshal(v) + if err != nil { + err = gerror.Wrap(err, `json.Marshal failed`) + } + return +} + +// MarshalIndent same as json.MarshalIndent. +func MarshalIndent(v interface{}, prefix, indent string) (marshaledBytes []byte, err error) { + marshaledBytes, err = json.MarshalIndent(v, prefix, indent) + if err != nil { + err = gerror.Wrap(err, `json.MarshalIndent failed`) + } + return +} + +// Unmarshal adapts to json/encoding Unmarshal API +// +// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v. +// Refer to https://godoc.org/encoding/json#Unmarshal for more information. +func Unmarshal(data []byte, v interface{}) (err error) { + err = json.Unmarshal(data, v) + if err != nil { + err = gerror.Wrap(err, `json.Unmarshal failed`) + } + return +} + +// UnmarshalUseNumber decodes the json data bytes to target interface using number option. +func UnmarshalUseNumber(data []byte, v interface{}) (err error) { + decoder := NewDecoder(bytes.NewReader(data)) + decoder.UseNumber() + err = decoder.Decode(v) + if err != nil { + err = gerror.Wrap(err, `json.UnmarshalUseNumber failed`) + } + return +} + +// NewEncoder same as json.NewEncoder +func NewEncoder(writer io.Writer) *json.Encoder { + return json.NewEncoder(writer) +} + +// NewDecoder adapts to json/stream NewDecoder API. +// +// NewDecoder returns a new decoder that reads from r. +// +// Instead of a json/encoding Decoder, a Decoder is returned +// Refer to https://godoc.org/encoding/json#NewDecoder for more information. +func NewDecoder(reader io.Reader) *json.Decoder { + return json.NewDecoder(reader) +} + +// Valid reports whether data is a valid JSON encoding. +func Valid(data []byte) bool { + return json.Valid(data) +} diff --git a/vendor/github.com/gogf/gf/v2/internal/reflection/reflection.go b/vendor/github.com/gogf/gf/v2/internal/reflection/reflection.go new file mode 100644 index 00000000..30a4cded --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/internal/reflection/reflection.go @@ -0,0 +1,94 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package reflection provides some reflection functions for internal usage. +package reflection + +import ( + "reflect" +) + +type OriginValueAndKindOutput struct { + InputValue reflect.Value + InputKind reflect.Kind + OriginValue reflect.Value + OriginKind reflect.Kind +} + +// OriginValueAndKind retrieves and returns the original reflect value and kind. +func OriginValueAndKind(value interface{}) (out OriginValueAndKindOutput) { + if v, ok := value.(reflect.Value); ok { + out.InputValue = v + } else { + out.InputValue = reflect.ValueOf(value) + } + out.InputKind = out.InputValue.Kind() + out.OriginValue = out.InputValue + out.OriginKind = out.InputKind + for out.OriginKind == reflect.Ptr { + out.OriginValue = out.OriginValue.Elem() + out.OriginKind = out.OriginValue.Kind() + } + return +} + +type OriginTypeAndKindOutput struct { + InputType reflect.Type + InputKind reflect.Kind + OriginType reflect.Type + OriginKind reflect.Kind +} + +// OriginTypeAndKind retrieves and returns the original reflect type and kind. +func OriginTypeAndKind(value interface{}) (out OriginTypeAndKindOutput) { + if value == nil { + return + } + if reflectType, ok := value.(reflect.Type); ok { + out.InputType = reflectType + } else { + if reflectValue, ok := value.(reflect.Value); ok { + out.InputType = reflectValue.Type() + } else { + out.InputType = reflect.TypeOf(value) + } + } + out.InputKind = out.InputType.Kind() + out.OriginType = out.InputType + out.OriginKind = out.InputKind + for out.OriginKind == reflect.Ptr { + out.OriginType = out.OriginType.Elem() + out.OriginKind = out.OriginType.Kind() + } + return +} + +// ValueToInterface converts reflect value to its interface type. +func ValueToInterface(v reflect.Value) (value interface{}, ok bool) { + if v.IsValid() && v.CanInterface() { + return v.Interface(), true + } + switch v.Kind() { + case reflect.Bool: + return v.Bool(), true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint(), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Complex64, reflect.Complex128: + return v.Complex(), true + case reflect.String: + return v.String(), true + case reflect.Ptr: + return ValueToInterface(v.Elem()) + case reflect.Interface: + return ValueToInterface(v.Elem()) + default: + return nil, false + } +} diff --git a/vendor/github.com/gogf/gf/v2/internal/rwmutex/rwmutex.go b/vendor/github.com/gogf/gf/v2/internal/rwmutex/rwmutex.go new file mode 100644 index 00000000..17a67fec --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/internal/rwmutex/rwmutex.go @@ -0,0 +1,77 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package rwmutex provides switch of concurrent safety feature for sync.RWMutex. +package rwmutex + +import ( + "sync" +) + +// RWMutex is a sync.RWMutex with a switch for concurrent safe feature. +// If its attribute *sync.RWMutex is not nil, it means it's in concurrent safety usage. +// Its attribute *sync.RWMutex is nil in default, which makes this struct mush lightweight. +type RWMutex struct { + // Underlying mutex. + mutex *sync.RWMutex +} + +// New creates and returns a new *RWMutex. +// The parameter `safe` is used to specify whether using this mutex in concurrent safety, +// which is false in default. +func New(safe ...bool) *RWMutex { + mu := Create(safe...) + return &mu +} + +// Create creates and returns a new RWMutex object. +// The parameter `safe` is used to specify whether using this mutex in concurrent safety, +// which is false in default. +func Create(safe ...bool) RWMutex { + if len(safe) > 0 && safe[0] { + return RWMutex{ + mutex: new(sync.RWMutex), + } + } + return RWMutex{} +} + +// IsSafe checks and returns whether current mutex is in concurrent-safe usage. +func (mu *RWMutex) IsSafe() bool { + return mu.mutex != nil +} + +// Lock locks mutex for writing. +// It does nothing if it is not in concurrent-safe usage. +func (mu *RWMutex) Lock() { + if mu.mutex != nil { + mu.mutex.Lock() + } +} + +// Unlock unlocks mutex for writing. +// It does nothing if it is not in concurrent-safe usage. +func (mu *RWMutex) Unlock() { + if mu.mutex != nil { + mu.mutex.Unlock() + } +} + +// RLock locks mutex for reading. +// It does nothing if it is not in concurrent-safe usage. +func (mu *RWMutex) RLock() { + if mu.mutex != nil { + mu.mutex.RLock() + } +} + +// RUnlock unlocks mutex for reading. +// It does nothing if it is not in concurrent-safe usage. +func (mu *RWMutex) RUnlock() { + if mu.mutex != nil { + mu.mutex.RUnlock() + } +} diff --git a/vendor/github.com/gogf/gf/v2/internal/tracing/tracing.go b/vendor/github.com/gogf/gf/v2/internal/tracing/tracing.go new file mode 100644 index 00000000..c9d7a6f4 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/internal/tracing/tracing.go @@ -0,0 +1,49 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package tracing provides some utility functions for tracing functionality. +package tracing + +import ( + "math" + "time" + + "go.opentelemetry.io/otel/trace" + + "github.com/gogf/gf/v2/container/gtype" + "github.com/gogf/gf/v2/encoding/gbinary" + "github.com/gogf/gf/v2/util/grand" +) + +var ( + randomInitSequence = int32(grand.Intn(math.MaxInt32)) + sequence = gtype.NewInt32(randomInitSequence) +) + +// NewIDs creates and returns a new trace and span ID. +func NewIDs() (traceID trace.TraceID, spanID trace.SpanID) { + return NewTraceID(), NewSpanID() +} + +// NewTraceID creates and returns a trace ID. +func NewTraceID() (traceID trace.TraceID) { + var ( + timestampNanoBytes = gbinary.EncodeInt64(time.Now().UnixNano()) + sequenceBytes = gbinary.EncodeInt32(sequence.Add(1)) + randomBytes = grand.B(4) + ) + copy(traceID[:], timestampNanoBytes) + copy(traceID[8:], sequenceBytes) + copy(traceID[12:], randomBytes) + return +} + +// NewSpanID creates and returns a span ID. +func NewSpanID() (spanID trace.SpanID) { + copy(spanID[:], gbinary.EncodeInt64(time.Now().UnixNano()/1e3)) + copy(spanID[4:], grand.B(4)) + return +} diff --git a/vendor/github.com/gogf/gf/v2/internal/utils/utils.go b/vendor/github.com/gogf/gf/v2/internal/utils/utils.go new file mode 100644 index 00000000..414a90ca --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/internal/utils/utils.go @@ -0,0 +1,8 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package utils provides some utility functions for internal usage. +package utils diff --git a/vendor/github.com/gogf/gf/v2/internal/utils/utils_array.go b/vendor/github.com/gogf/gf/v2/internal/utils/utils_array.go new file mode 100644 index 00000000..b96e039e --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/internal/utils/utils_array.go @@ -0,0 +1,26 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package utils + +import "reflect" + +// IsArray checks whether given value is array/slice. +// Note that it uses reflect internally implementing this feature. +func IsArray(value interface{}) bool { + rv := reflect.ValueOf(value) + kind := rv.Kind() + if kind == reflect.Ptr { + rv = rv.Elem() + kind = rv.Kind() + } + switch kind { + case reflect.Array, reflect.Slice: + return true + default: + return false + } +} diff --git a/vendor/github.com/gogf/gf/v2/internal/utils/utils_debug.go b/vendor/github.com/gogf/gf/v2/internal/utils/utils_debug.go new file mode 100644 index 00000000..5584341b --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/internal/utils/utils_debug.go @@ -0,0 +1,42 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package utils + +import ( + "github.com/gogf/gf/v2/internal/command" +) + +const ( + // Debug key for checking if in debug mode. + commandEnvKeyForDebugKey = "gf.debug" +) + +var ( + // isDebugEnabled marks whether GoFrame debug mode is enabled. + isDebugEnabled = false +) + +func init() { + // Debugging configured. + value := command.GetOptWithEnv(commandEnvKeyForDebugKey) + if value == "" || value == "0" || value == "false" { + isDebugEnabled = false + } else { + isDebugEnabled = true + } +} + +// IsDebugEnabled checks and returns whether debug mode is enabled. +// The debug mode is enabled when command argument "gf.debug" or environment "GF_DEBUG" is passed. +func IsDebugEnabled() bool { + return isDebugEnabled +} + +// SetDebugEnabled enables/disables the internal debug info. +func SetDebugEnabled(enabled bool) { + isDebugEnabled = enabled +} diff --git a/vendor/github.com/gogf/gf/v2/internal/utils/utils_io.go b/vendor/github.com/gogf/gf/v2/internal/utils/utils_io.go new file mode 100644 index 00000000..f51bbe3b --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/internal/utils/utils_io.go @@ -0,0 +1,66 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package utils + +import ( + "io" + "io/ioutil" + + "github.com/gogf/gf/v2/errors/gerror" +) + +// ReadCloser implements the io.ReadCloser interface +// which is used for reading request body content multiple times. +// +// Note that it cannot be closed. +type ReadCloser struct { + index int // Current read position. + content []byte // Content. + repeatable bool +} + +// NewReadCloser creates and returns a RepeatReadCloser object. +func NewReadCloser(content []byte, repeatable bool) io.ReadCloser { + return &ReadCloser{ + content: content, + repeatable: repeatable, + } +} + +// NewReadCloserWithReadCloser creates and returns a RepeatReadCloser object +// with given io.ReadCloser. +func NewReadCloserWithReadCloser(r io.ReadCloser, repeatable bool) (io.ReadCloser, error) { + content, err := ioutil.ReadAll(r) + if err != nil { + err = gerror.Wrapf(err, `ioutil.ReadAll failed`) + return nil, err + } + defer r.Close() + return &ReadCloser{ + content: content, + repeatable: repeatable, + }, nil +} + +// Read implements the io.ReadCloser interface. +func (b *ReadCloser) Read(p []byte) (n int, err error) { + n = copy(p, b.content[b.index:]) + b.index += n + if b.index >= len(b.content) { + // Make it repeatable reading. + if b.repeatable { + b.index = 0 + } + return n, io.EOF + } + return n, nil +} + +// Close implements the io.ReadCloser interface. +func (b *ReadCloser) Close() error { + return nil +} diff --git a/vendor/github.com/gogf/gf/v2/internal/utils/utils_is.go b/vendor/github.com/gogf/gf/v2/internal/utils/utils_is.go new file mode 100644 index 00000000..d90f9215 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/internal/utils/utils_is.go @@ -0,0 +1,100 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package utils + +import ( + "reflect" + + "github.com/gogf/gf/v2/internal/empty" +) + +// IsNil checks whether `value` is nil, especially for interface{} type value. +func IsNil(value interface{}) bool { + return empty.IsNil(value) +} + +// IsEmpty checks whether `value` is empty. +func IsEmpty(value interface{}) bool { + return empty.IsEmpty(value) +} + +// IsInt checks whether `value` is type of int. +func IsInt(value interface{}) bool { + switch value.(type) { + case int, *int, int8, *int8, int16, *int16, int32, *int32, int64, *int64: + return true + } + return false +} + +// IsUint checks whether `value` is type of uint. +func IsUint(value interface{}) bool { + switch value.(type) { + case uint, *uint, uint8, *uint8, uint16, *uint16, uint32, *uint32, uint64, *uint64: + return true + } + return false +} + +// IsFloat checks whether `value` is type of float. +func IsFloat(value interface{}) bool { + switch value.(type) { + case float32, *float32, float64, *float64: + return true + } + return false +} + +// IsSlice checks whether `value` is type of slice. +func IsSlice(value interface{}) bool { + var ( + reflectValue = reflect.ValueOf(value) + reflectKind = reflectValue.Kind() + ) + for reflectKind == reflect.Ptr { + reflectValue = reflectValue.Elem() + } + switch reflectKind { + case reflect.Slice, reflect.Array: + return true + } + return false +} + +// IsMap checks whether `value` is type of map. +func IsMap(value interface{}) bool { + var ( + reflectValue = reflect.ValueOf(value) + reflectKind = reflectValue.Kind() + ) + for reflectKind == reflect.Ptr { + reflectValue = reflectValue.Elem() + } + switch reflectKind { + case reflect.Map: + return true + } + return false +} + +// IsStruct checks whether `value` is type of struct. +func IsStruct(value interface{}) bool { + var reflectType = reflect.TypeOf(value) + if reflectType == nil { + return false + } + var reflectKind = reflectType.Kind() + for reflectKind == reflect.Ptr { + reflectType = reflectType.Elem() + reflectKind = reflectType.Kind() + } + switch reflectKind { + case reflect.Struct: + return true + } + return false +} diff --git a/vendor/github.com/gogf/gf/v2/internal/utils/utils_list.go b/vendor/github.com/gogf/gf/v2/internal/utils/utils_list.go new file mode 100644 index 00000000..355ad9f8 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/internal/utils/utils_list.go @@ -0,0 +1,37 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package utils + +import "fmt" + +// ListToMapByKey converts `list` to a map[string]interface{} of which key is specified by `key`. +// Note that the item value may be type of slice. +func ListToMapByKey(list []map[string]interface{}, key string) map[string]interface{} { + var ( + s = "" + m = make(map[string]interface{}) + tempMap = make(map[string][]interface{}) + hasMultiValues bool + ) + for _, item := range list { + if k, ok := item[key]; ok { + s = fmt.Sprintf(`%v`, k) + tempMap[s] = append(tempMap[s], item) + if len(tempMap[s]) > 1 { + hasMultiValues = true + } + } + } + for k, v := range tempMap { + if hasMultiValues { + m[k] = v + } else { + m[k] = v[0] + } + } + return m +} diff --git a/vendor/github.com/gogf/gf/v2/internal/utils/utils_map.go b/vendor/github.com/gogf/gf/v2/internal/utils/utils_map.go new file mode 100644 index 00000000..fba7da77 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/internal/utils/utils_map.go @@ -0,0 +1,37 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package utils + +// MapPossibleItemByKey tries to find the possible key-value pair for given key ignoring cases and symbols. +// +// Note that this function might be of low performance. +func MapPossibleItemByKey(data map[string]interface{}, key string) (foundKey string, foundValue interface{}) { + if len(data) == 0 { + return + } + if v, ok := data[key]; ok { + return key, v + } + // Loop checking. + for k, v := range data { + if EqualFoldWithoutChars(k, key) { + return k, v + } + } + return "", nil +} + +// MapContainsPossibleKey checks if the given `key` is contained in given map `data`. +// It checks the key ignoring cases and symbols. +// +// Note that this function might be of low performance. +func MapContainsPossibleKey(data map[string]interface{}, key string) bool { + if k, _ := MapPossibleItemByKey(data, key); k != "" { + return true + } + return false +} diff --git a/vendor/github.com/gogf/gf/v2/internal/utils/utils_str.go b/vendor/github.com/gogf/gf/v2/internal/utils/utils_str.go new file mode 100644 index 00000000..44c9f0db --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/internal/utils/utils_str.go @@ -0,0 +1,171 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package utils + +import ( + "bytes" + "strings" +) + +var ( + // DefaultTrimChars are the characters which are stripped by Trim* functions in default. + DefaultTrimChars = string([]byte{ + '\t', // Tab. + '\v', // Vertical tab. + '\n', // New line (line feed). + '\r', // Carriage return. + '\f', // New page. + ' ', // Ordinary space. + 0x00, // NUL-byte. + 0x85, // Delete. + 0xA0, // Non-breaking space. + }) +) + +// IsLetterUpper checks whether the given byte b is in upper case. +func IsLetterUpper(b byte) bool { + if b >= byte('A') && b <= byte('Z') { + return true + } + return false +} + +// IsLetterLower checks whether the given byte b is in lower case. +func IsLetterLower(b byte) bool { + if b >= byte('a') && b <= byte('z') { + return true + } + return false +} + +// IsLetter checks whether the given byte b is a letter. +func IsLetter(b byte) bool { + return IsLetterUpper(b) || IsLetterLower(b) +} + +// IsNumeric checks whether the given string s is numeric. +// Note that float string like "123.456" is also numeric. +func IsNumeric(s string) bool { + var ( + dotCount = 0 + length = len(s) + ) + if length == 0 { + return false + } + for i := 0; i < length; i++ { + if s[i] == '-' && i == 0 { + continue + } + if s[i] == '.' { + dotCount++ + if i > 0 && i < length-1 { + continue + } else { + return false + } + } + if s[i] < '0' || s[i] > '9' { + return false + } + } + if dotCount > 1 { + return false + } + return true +} + +// UcFirst returns a copy of the string s with the first letter mapped to its upper case. +func UcFirst(s string) string { + if len(s) == 0 { + return s + } + if IsLetterLower(s[0]) { + return string(s[0]-32) + s[1:] + } + return s +} + +// ReplaceByMap returns a copy of `origin`, +// which is replaced by a map in unordered way, case-sensitively. +func ReplaceByMap(origin string, replaces map[string]string) string { + for k, v := range replaces { + origin = strings.ReplaceAll(origin, k, v) + } + return origin +} + +// RemoveSymbols removes all symbols from string and lefts only numbers and letters. +func RemoveSymbols(s string) string { + var b = make([]rune, 0, len(s)) + for _, c := range s { + if c > 127 { + b = append(b, c) + } else if (c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') { + b = append(b, c) + } + } + return string(b) +} + +// EqualFoldWithoutChars checks string `s1` and `s2` equal case-insensitively, +// with/without chars '-'/'_'/'.'/' '. +func EqualFoldWithoutChars(s1, s2 string) bool { + return strings.EqualFold(RemoveSymbols(s1), RemoveSymbols(s2)) +} + +// SplitAndTrim splits string `str` by a string `delimiter` to an array, +// and calls Trim to every element of this array. It ignores the elements +// which are empty after Trim. +func SplitAndTrim(str, delimiter string, characterMask ...string) []string { + array := make([]string, 0) + for _, v := range strings.Split(str, delimiter) { + v = Trim(v, characterMask...) + if v != "" { + array = append(array, v) + } + } + return array +} + +// Trim strips whitespace (or other characters) from the beginning and end of a string. +// The optional parameter `characterMask` specifies the additional stripped characters. +func Trim(str string, characterMask ...string) string { + trimChars := DefaultTrimChars + if len(characterMask) > 0 { + trimChars += characterMask[0] + } + return strings.Trim(str, trimChars) +} + +// FormatCmdKey formats string `s` as command key using uniformed format. +func FormatCmdKey(s string) string { + return strings.ToLower(strings.ReplaceAll(s, "_", ".")) +} + +// FormatEnvKey formats string `s` as environment key using uniformed format. +func FormatEnvKey(s string) string { + return strings.ToUpper(strings.ReplaceAll(s, ".", "_")) +} + +// StripSlashes un-quotes a quoted string by AddSlashes. +func StripSlashes(str string) string { + var buf bytes.Buffer + l, skip := len(str), false + for i, char := range str { + if skip { + skip = false + } else if char == '\\' { + if i+1 < l && str[i+1] == '\\' { + skip = true + } + continue + } + buf.WriteRune(char) + } + return buf.String() +} diff --git a/vendor/github.com/gogf/gf/v2/net/gipv4/gipv4.go b/vendor/github.com/gogf/gf/v2/net/gipv4/gipv4.go new file mode 100644 index 00000000..ea52f913 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/net/gipv4/gipv4.go @@ -0,0 +1,60 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. +// + +// Package gipv4 provides useful API for IPv4 address handling. +package gipv4 + +import ( + "encoding/binary" + "fmt" + "net" + "strconv" + + "github.com/gogf/gf/v2/text/gregex" +) + +// Ip2long converts ip address to an uint32 integer. +func Ip2long(ip string) uint32 { + netIp := net.ParseIP(ip) + if netIp == nil { + return 0 + } + return binary.BigEndian.Uint32(netIp.To4()) +} + +// Long2ip converts an uint32 integer ip address to its string type address. +func Long2ip(long uint32) string { + ipByte := make([]byte, 4) + binary.BigEndian.PutUint32(ipByte, long) + return net.IP(ipByte).String() +} + +// Validate checks whether given `ip` a valid IPv4 address. +func Validate(ip string) bool { + return gregex.IsMatchString(`^((25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(25[0-5]|2[0-4]\d|[01]?\d\d?)$`, ip) +} + +// ParseAddress parses `address` to its ip and port. +// Eg: 192.168.1.1:80 -> 192.168.1.1, 80 +func ParseAddress(address string) (string, int) { + match, err := gregex.MatchString(`^(.+):(\d+)$`, address) + if err == nil { + i, _ := strconv.Atoi(match[2]) + return match[1], i + } + return "", 0 +} + +// GetSegment returns the segment of given ip address. +// Eg: 192.168.2.102 -> 192.168.2 +func GetSegment(ip string) string { + match, err := gregex.MatchString(`^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})$`, ip) + if err != nil || len(match) < 4 { + return "" + } + return fmt.Sprintf("%s.%s.%s", match[1], match[2], match[3]) +} diff --git a/vendor/github.com/gogf/gf/v2/net/gipv4/gipv4_ip.go b/vendor/github.com/gogf/gf/v2/net/gipv4/gipv4_ip.go new file mode 100644 index 00000000..95bdb848 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/net/gipv4/gipv4_ip.go @@ -0,0 +1,145 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. +// + +package gipv4 + +import ( + "net" + "strconv" + "strings" + + "github.com/gogf/gf/v2/errors/gerror" +) + +// GetIpArray retrieves and returns all the ip of current host. +func GetIpArray() (ips []string, err error) { + interfaceAddr, err := net.InterfaceAddrs() + if err != nil { + err = gerror.Wrap(err, `net.InterfaceAddrs failed`) + return nil, err + } + for _, address := range interfaceAddr { + ipNet, isValidIpNet := address.(*net.IPNet) + if isValidIpNet && !ipNet.IP.IsLoopback() { + if ipNet.IP.To4() != nil { + ips = append(ips, ipNet.IP.String()) + } + } + } + return ips, nil +} + +// MustGetIntranetIp performs as GetIntranetIp, but it panics if any error occurs. +func MustGetIntranetIp() string { + ip, err := GetIntranetIp() + if err != nil { + panic(err) + } + return ip +} + +// GetIntranetIp retrieves and returns the first intranet ip of current machine. +func GetIntranetIp() (ip string, err error) { + ips, err := GetIntranetIpArray() + if err != nil { + return "", err + } + if len(ips) == 0 { + return "", gerror.New("no intranet ip found") + } + return ips[0], nil +} + +// GetIntranetIpArray retrieves and returns the intranet ip list of current machine. +func GetIntranetIpArray() (ips []string, err error) { + var ( + addresses []net.Addr + interFaces []net.Interface + ) + interFaces, err = net.Interfaces() + if err != nil { + err = gerror.Wrap(err, `net.Interfaces failed`) + return ips, err + } + for _, interFace := range interFaces { + if interFace.Flags&net.FlagUp == 0 { + // interface down + continue + } + if interFace.Flags&net.FlagLoopback != 0 { + // loop back interface + continue + } + // ignore warden bridge + if strings.HasPrefix(interFace.Name, "w-") { + continue + } + addresses, err = interFace.Addrs() + if err != nil { + err = gerror.Wrap(err, `interFace.Addrs failed`) + return ips, err + } + for _, addr := range addresses { + var ip net.IP + switch v := addr.(type) { + case *net.IPNet: + ip = v.IP + case *net.IPAddr: + ip = v.IP + } + + if ip == nil || ip.IsLoopback() { + continue + } + ip = ip.To4() + if ip == nil { + // not an ipv4 address + continue + } + ipStr := ip.String() + if IsIntranet(ipStr) { + ips = append(ips, ipStr) + } + } + } + return ips, nil +} + +// IsIntranet checks and returns whether given ip an intranet ip. +// +// Local: 127.0.0.1 +// A: 10.0.0.0--10.255.255.255 +// B: 172.16.0.0--172.31.255.255 +// C: 192.168.0.0--192.168.255.255 +func IsIntranet(ip string) bool { + if ip == "127.0.0.1" { + return true + } + array := strings.Split(ip, ".") + if len(array) != 4 { + return false + } + // A + if array[0] == "10" || (array[0] == "192" && array[1] == "168") { + return true + } + // C + if array[0] == "192" && array[1] == "168" { + return true + } + // B + if array[0] == "172" { + second, err := strconv.ParseInt(array[1], 10, 64) + if err != nil { + return false + } + if second >= 16 && second <= 31 { + return true + } + } + return false +} diff --git a/vendor/github.com/gogf/gf/v2/net/gipv4/gipv4_lookup.go b/vendor/github.com/gogf/gf/v2/net/gipv4/gipv4_lookup.go new file mode 100644 index 00000000..f6aed4b0 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/net/gipv4/gipv4_lookup.go @@ -0,0 +1,52 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. +// + +package gipv4 + +import ( + "net" + "strings" +) + +// GetHostByName returns the IPv4 address corresponding to a given Internet host name. +func GetHostByName(hostname string) (string, error) { + ips, err := net.LookupIP(hostname) + if ips != nil { + for _, v := range ips { + if v.To4() != nil { + return v.String(), nil + } + } + return "", nil + } + return "", err +} + +// GetHostsByName returns a list of IPv4 addresses corresponding to a given Internet +// host name. +func GetHostsByName(hostname string) ([]string, error) { + ips, err := net.LookupIP(hostname) + if ips != nil { + var ipStrings []string + for _, v := range ips { + if v.To4() != nil { + ipStrings = append(ipStrings, v.String()) + } + } + return ipStrings, nil + } + return nil, err +} + +// GetNameByAddr returns the Internet host name corresponding to a given IP address. +func GetNameByAddr(ipAddress string) (string, error) { + names, err := net.LookupAddr(ipAddress) + if names != nil { + return strings.TrimRight(names[0], "."), nil + } + return "", err +} diff --git a/vendor/github.com/gogf/gf/v2/net/gipv4/gipv4_mac.go b/vendor/github.com/gogf/gf/v2/net/gipv4/gipv4_mac.go new file mode 100644 index 00000000..a0952055 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/net/gipv4/gipv4_mac.go @@ -0,0 +1,43 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. +// + +package gipv4 + +import ( + "net" + + "github.com/gogf/gf/v2/errors/gerror" +) + +// GetMac retrieves and returns the first mac address of current host. +func GetMac() (mac string, err error) { + macs, err := GetMacArray() + if err != nil { + return "", err + } + if len(macs) > 0 { + return macs[0], nil + } + return "", nil +} + +// GetMacArray retrieves and returns all the mac address of current host. +func GetMacArray() (macs []string, err error) { + netInterfaces, err := net.Interfaces() + if err != nil { + err = gerror.Wrap(err, `net.Interfaces failed`) + return nil, err + } + for _, netInterface := range netInterfaces { + macAddr := netInterface.HardwareAddr.String() + if len(macAddr) == 0 { + continue + } + macs = append(macs, macAddr) + } + return macs, nil +} diff --git a/vendor/github.com/gogf/gf/v2/net/gtrace/gtrace.go b/vendor/github.com/gogf/gf/v2/net/gtrace/gtrace.go new file mode 100644 index 00000000..0eb96eba --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/net/gtrace/gtrace.go @@ -0,0 +1,180 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package gtrace provides convenience wrapping functionality for tracing feature using OpenTelemetry. +package gtrace + +import ( + "context" + "os" + "strings" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/propagation" + semconv "go.opentelemetry.io/otel/semconv/v1.4.0" + "go.opentelemetry.io/otel/trace" + + "github.com/gogf/gf/v2/container/gmap" + "github.com/gogf/gf/v2/container/gvar" + "github.com/gogf/gf/v2/errors/gcode" + "github.com/gogf/gf/v2/errors/gerror" + "github.com/gogf/gf/v2/internal/command" + "github.com/gogf/gf/v2/net/gipv4" + "github.com/gogf/gf/v2/net/gtrace/internal/provider" + "github.com/gogf/gf/v2/text/gstr" + "github.com/gogf/gf/v2/util/gconv" +) + +const ( + tracingCommonKeyIpIntranet = `ip.intranet` + tracingCommonKeyIpHostname = `hostname` + commandEnvKeyForMaxContentLogSize = "gf.gtrace.max.content.log.size" // To avoid too big tracing content. + commandEnvKeyForTracingInternal = "gf.gtrace.tracing.internal" // For detailed controlling for tracing content. +) + +var ( + intranetIps, _ = gipv4.GetIntranetIpArray() + intranetIpStr = strings.Join(intranetIps, ",") + hostname, _ = os.Hostname() + tracingInternal = true // tracingInternal enables tracing for internal type spans. + tracingMaxContentLogSize = 512 * 1024 // Max log size for request and response body, especially for HTTP/RPC request. + // defaultTextMapPropagator is the default propagator for context propagation between peers. + defaultTextMapPropagator = propagation.NewCompositeTextMapPropagator( + propagation.TraceContext{}, + propagation.Baggage{}, + ) +) + +func init() { + tracingInternal = gconv.Bool(command.GetOptWithEnv(commandEnvKeyForTracingInternal, "true")) + if maxContentLogSize := gconv.Int(command.GetOptWithEnv(commandEnvKeyForMaxContentLogSize)); maxContentLogSize > 0 { + tracingMaxContentLogSize = maxContentLogSize + } + // Default trace provider. + otel.SetTracerProvider(provider.New()) + CheckSetDefaultTextMapPropagator() +} + +// IsUsingDefaultProvider checks and return if currently using default trace provider. +func IsUsingDefaultProvider() bool { + _, ok := otel.GetTracerProvider().(*provider.TracerProvider) + return ok +} + +// IsTracingInternal returns whether tracing spans of internal components. +func IsTracingInternal() bool { + return tracingInternal +} + +// MaxContentLogSize returns the max log size for request and response body, especially for HTTP/RPC request. +func MaxContentLogSize() int { + return tracingMaxContentLogSize +} + +// CommonLabels returns common used attribute labels: +// ip.intranet, hostname. +func CommonLabels() []attribute.KeyValue { + return []attribute.KeyValue{ + attribute.String(tracingCommonKeyIpHostname, hostname), + attribute.String(tracingCommonKeyIpIntranet, intranetIpStr), + semconv.HostNameKey.String(hostname), + } +} + +// CheckSetDefaultTextMapPropagator sets the default TextMapPropagator if it is not set previously. +func CheckSetDefaultTextMapPropagator() { + p := otel.GetTextMapPropagator() + if len(p.Fields()) == 0 { + otel.SetTextMapPropagator(GetDefaultTextMapPropagator()) + } +} + +// GetDefaultTextMapPropagator returns the default propagator for context propagation between peers. +func GetDefaultTextMapPropagator() propagation.TextMapPropagator { + return defaultTextMapPropagator +} + +// GetTraceID retrieves and returns TraceId from context. +// It returns an empty string is tracing feature is not activated. +func GetTraceID(ctx context.Context) string { + if ctx == nil { + return "" + } + traceID := trace.SpanContextFromContext(ctx).TraceID() + if traceID.IsValid() { + return traceID.String() + } + return "" +} + +// GetSpanID retrieves and returns SpanId from context. +// It returns an empty string is tracing feature is not activated. +func GetSpanID(ctx context.Context) string { + if ctx == nil { + return "" + } + spanID := trace.SpanContextFromContext(ctx).SpanID() + if spanID.IsValid() { + return spanID.String() + } + return "" +} + +// SetBaggageValue is a convenient function for adding one key-value pair to baggage. +// Note that it uses attribute.Any to set the key-value pair. +func SetBaggageValue(ctx context.Context, key string, value interface{}) context.Context { + return NewBaggage(ctx).SetValue(key, value) +} + +// SetBaggageMap is a convenient function for adding map key-value pairs to baggage. +// Note that it uses attribute.Any to set the key-value pair. +func SetBaggageMap(ctx context.Context, data map[string]interface{}) context.Context { + return NewBaggage(ctx).SetMap(data) +} + +// GetBaggageMap retrieves and returns the baggage values as map. +func GetBaggageMap(ctx context.Context) *gmap.StrAnyMap { + return NewBaggage(ctx).GetMap() +} + +// GetBaggageVar retrieves value and returns a *gvar.Var for specified key from baggage. +func GetBaggageVar(ctx context.Context, key string) *gvar.Var { + return NewBaggage(ctx).GetVar(key) +} + +// WithUUID injects custom trace id with UUID into context to propagate. +func WithUUID(ctx context.Context, uuid string) (context.Context, error) { + return WithTraceID(ctx, gstr.Replace(uuid, "-", "")) +} + +// WithTraceID injects custom trace id into context to propagate. +func WithTraceID(ctx context.Context, traceID string) (context.Context, error) { + generatedTraceID, err := trace.TraceIDFromHex(traceID) + if err != nil { + return ctx, gerror.WrapCodef( + gcode.CodeInvalidParameter, + err, + `invalid custom traceID "%s", a traceID string should be composed with [0-f] and fixed length 32`, + traceID, + ) + } + sc := trace.SpanContextFromContext(ctx) + if !sc.HasTraceID() { + var span trace.Span + ctx, span = NewSpan(ctx, "gtrace.WithTraceID") + defer span.End() + sc = trace.SpanContextFromContext(ctx) + } + ctx = trace.ContextWithRemoteSpanContext(ctx, trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: generatedTraceID, + SpanID: sc.SpanID(), + TraceFlags: sc.TraceFlags(), + TraceState: sc.TraceState(), + Remote: sc.IsRemote(), + })) + return ctx, nil +} diff --git a/vendor/github.com/gogf/gf/v2/net/gtrace/gtrace_baggage.go b/vendor/github.com/gogf/gf/v2/net/gtrace/gtrace_baggage.go new file mode 100644 index 00000000..26a9eb86 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/net/gtrace/gtrace_baggage.go @@ -0,0 +1,75 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gtrace + +import ( + "context" + + "go.opentelemetry.io/otel/baggage" + + "github.com/gogf/gf/v2/container/gmap" + "github.com/gogf/gf/v2/container/gvar" + "github.com/gogf/gf/v2/util/gconv" +) + +// Baggage holds the data through all tracing spans. +type Baggage struct { + ctx context.Context +} + +// NewBaggage creates and returns a new Baggage object from given tracing context. +func NewBaggage(ctx context.Context) *Baggage { + if ctx == nil { + ctx = context.Background() + } + return &Baggage{ + ctx: ctx, + } +} + +// Ctx returns the context that Baggage holds. +func (b *Baggage) Ctx() context.Context { + return b.ctx +} + +// SetValue is a convenient function for adding one key-value pair to baggage. +// Note that it uses attribute.Any to set the key-value pair. +func (b *Baggage) SetValue(key string, value interface{}) context.Context { + member, _ := baggage.NewMember(key, gconv.String(value)) + bag, _ := baggage.New(member) + b.ctx = baggage.ContextWithBaggage(b.ctx, bag) + return b.ctx +} + +// SetMap is a convenient function for adding map key-value pairs to baggage. +// Note that it uses attribute.Any to set the key-value pair. +func (b *Baggage) SetMap(data map[string]interface{}) context.Context { + members := make([]baggage.Member, 0) + for k, v := range data { + member, _ := baggage.NewMember(k, gconv.String(v)) + members = append(members, member) + } + bag, _ := baggage.New(members...) + b.ctx = baggage.ContextWithBaggage(b.ctx, bag) + return b.ctx +} + +// GetMap retrieves and returns the baggage values as map. +func (b *Baggage) GetMap() *gmap.StrAnyMap { + m := gmap.NewStrAnyMap() + members := baggage.FromContext(b.ctx).Members() + for i := range members { + m.Set(members[i].Key(), members[i].Value()) + } + return m +} + +// GetVar retrieves value and returns a *gvar.Var for specified key from baggage. +func (b *Baggage) GetVar(key string) *gvar.Var { + value := baggage.FromContext(b.ctx).Member(key).Value() + return gvar.New(value) +} diff --git a/vendor/github.com/gogf/gf/v2/net/gtrace/gtrace_carrier.go b/vendor/github.com/gogf/gf/v2/net/gtrace/gtrace_carrier.go new file mode 100644 index 00000000..e29d7007 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/net/gtrace/gtrace_carrier.go @@ -0,0 +1,62 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gtrace + +import ( + "github.com/gogf/gf/v2/internal/json" + "github.com/gogf/gf/v2/util/gconv" +) + +// Carrier is the storage medium used by a TextMapPropagator. +type Carrier map[string]interface{} + +// NewCarrier creates and returns a Carrier. +func NewCarrier(data ...map[string]interface{}) Carrier { + if len(data) > 0 && data[0] != nil { + return data[0] + } + return make(map[string]interface{}) +} + +// Get returns the value associated with the passed key. +func (c Carrier) Get(k string) string { + return gconv.String(c[k]) +} + +// Set stores the key-value pair. +func (c Carrier) Set(k, v string) { + c[k] = v +} + +// Keys lists the keys stored in this carrier. +func (c Carrier) Keys() []string { + keys := make([]string, 0, len(c)) + for k := range c { + keys = append(keys, k) + } + return keys +} + +// MustMarshal .returns the JSON encoding of c +func (c Carrier) MustMarshal() []byte { + b, err := json.Marshal(c) + if err != nil { + panic(err) + } + return b +} + +// String converts and returns current Carrier as string. +func (c Carrier) String() string { + return string(c.MustMarshal()) +} + +// UnmarshalJSON implements interface UnmarshalJSON for package json. +func (c Carrier) UnmarshalJSON(b []byte) error { + carrier := NewCarrier(nil) + return json.UnmarshalUseNumber(b, carrier) +} diff --git a/vendor/github.com/gogf/gf/v2/net/gtrace/gtrace_span.go b/vendor/github.com/gogf/gf/v2/net/gtrace/gtrace_span.go new file mode 100644 index 00000000..0d7fb240 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/net/gtrace/gtrace_span.go @@ -0,0 +1,26 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gtrace + +import ( + "context" + + "go.opentelemetry.io/otel/trace" +) + +// Span warps trace.Span for compatibility and extension. +type Span struct { + trace.Span +} + +// NewSpan creates a span using default tracer. +func NewSpan(ctx context.Context, spanName string, opts ...trace.SpanStartOption) (context.Context, *Span) { + ctx, span := NewTracer().Start(ctx, spanName, opts...) + return ctx, &Span{ + Span: span, + } +} diff --git a/vendor/github.com/gogf/gf/v2/net/gtrace/gtrace_tracer.go b/vendor/github.com/gogf/gf/v2/net/gtrace/gtrace_tracer.go new file mode 100644 index 00000000..47d2baa7 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/net/gtrace/gtrace_tracer.go @@ -0,0 +1,28 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gtrace + +import ( + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/trace" +) + +// Tracer warps trace.Tracer for compatibility and extension. +type Tracer struct { + trace.Tracer +} + +// NewTracer Tracer is a short function for retrieving Tracer. +func NewTracer(name ...string) *Tracer { + tracerName := "" + if len(name) > 0 { + tracerName = name[0] + } + return &Tracer{ + Tracer: otel.Tracer(tracerName), + } +} diff --git a/vendor/github.com/gogf/gf/v2/net/gtrace/internal/provider/provider.go b/vendor/github.com/gogf/gf/v2/net/gtrace/internal/provider/provider.go new file mode 100644 index 00000000..28159d5a --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/net/gtrace/internal/provider/provider.go @@ -0,0 +1,33 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package provider + +import ( + sdkTrace "go.opentelemetry.io/otel/sdk/trace" +) + +type TracerProvider struct { + *sdkTrace.TracerProvider +} + +// New returns a new and configured TracerProvider, which has no SpanProcessor. +// +// In default the returned TracerProvider is configured with: +// - a ParentBased(AlwaysSample) Sampler; +// - a unix nano timestamp and random umber based IDGenerator; +// - the resource.Default() Resource; +// - the default SpanLimits. +// +// The passed opts are used to override these default values and configure the +// returned TracerProvider appropriately. +func New() *TracerProvider { + return &TracerProvider{ + TracerProvider: sdkTrace.NewTracerProvider( + sdkTrace.WithIDGenerator(NewIDGenerator()), + ), + } +} diff --git a/vendor/github.com/gogf/gf/v2/net/gtrace/internal/provider/provider_idgenerator.go b/vendor/github.com/gogf/gf/v2/net/gtrace/internal/provider/provider_idgenerator.go new file mode 100644 index 00000000..6c5baec3 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/net/gtrace/internal/provider/provider_idgenerator.go @@ -0,0 +1,33 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package provider + +import ( + "context" + + "go.opentelemetry.io/otel/trace" + + "github.com/gogf/gf/v2/internal/tracing" +) + +// IDGenerator is a trace ID generator. +type IDGenerator struct{} + +// NewIDGenerator returns a new IDGenerator. +func NewIDGenerator() *IDGenerator { + return &IDGenerator{} +} + +// NewIDs creates and returns a new trace and span ID. +func (id *IDGenerator) NewIDs(ctx context.Context) (traceID trace.TraceID, spanID trace.SpanID) { + return tracing.NewIDs() +} + +// NewSpanID returns an ID for a new span in the trace with traceID. +func (id *IDGenerator) NewSpanID(ctx context.Context, traceID trace.TraceID) (spanID trace.SpanID) { + return tracing.NewSpanID() +} diff --git a/vendor/github.com/gogf/gf/v2/os/gcache/gcache.go b/vendor/github.com/gogf/gf/v2/os/gcache/gcache.go new file mode 100644 index 00000000..a0f8b1cc --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gcache/gcache.go @@ -0,0 +1,240 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package gcache provides kinds of cache management for process. +// +// It provides a concurrent-safe in-memory cache adapter for process in default. +package gcache + +import ( + "context" + "time" + + "github.com/gogf/gf/v2/container/gvar" +) + +// Func is the cache function that calculates and returns the value. +type Func func(ctx context.Context) (value interface{}, err error) + +// Default cache object. +var defaultCache = New() + +// Set sets cache with `key`-`value` pair, which is expired after `duration`. +// +// It does not expire if `duration` == 0. +// It deletes the keys of `data` if `duration` < 0 or given `value` is nil. +func Set(ctx context.Context, key interface{}, value interface{}, duration time.Duration) error { + return defaultCache.Set(ctx, key, value, duration) +} + +// SetMap batch sets cache with key-value pairs by `data` map, which is expired after `duration`. +// +// It does not expire if `duration` == 0. +// It deletes the keys of `data` if `duration` < 0 or given `value` is nil. +func SetMap(ctx context.Context, data map[interface{}]interface{}, duration time.Duration) error { + return defaultCache.SetMap(ctx, data, duration) +} + +// SetIfNotExist sets cache with `key`-`value` pair which is expired after `duration` +// if `key` does not exist in the cache. It returns true the `key` does not exist in the +// cache, and it sets `value` successfully to the cache, or else it returns false. +// +// It does not expire if `duration` == 0. +// It deletes the `key` if `duration` < 0 or given `value` is nil. +func SetIfNotExist(ctx context.Context, key interface{}, value interface{}, duration time.Duration) (bool, error) { + return defaultCache.SetIfNotExist(ctx, key, value, duration) +} + +// SetIfNotExistFunc sets `key` with result of function `f` and returns true +// if `key` does not exist in the cache, or else it does nothing and returns false if `key` already exists. +// +// The parameter `value` can be type of `func() interface{}`, but it does nothing if its +// result is nil. +// +// It does not expire if `duration` == 0. +// It deletes the `key` if `duration` < 0 or given `value` is nil. +func SetIfNotExistFunc(ctx context.Context, key interface{}, f Func, duration time.Duration) (bool, error) { + return defaultCache.SetIfNotExistFunc(ctx, key, f, duration) +} + +// SetIfNotExistFuncLock sets `key` with result of function `f` and returns true +// if `key` does not exist in the cache, or else it does nothing and returns false if `key` already exists. +// +// It does not expire if `duration` == 0. +// It deletes the `key` if `duration` < 0 or given `value` is nil. +// +// Note that it differs from function `SetIfNotExistFunc` is that the function `f` is executed within +// writing mutex lock for concurrent safety purpose. +func SetIfNotExistFuncLock(ctx context.Context, key interface{}, f Func, duration time.Duration) (bool, error) { + return defaultCache.SetIfNotExistFuncLock(ctx, key, f, duration) +} + +// Get retrieves and returns the associated value of given `key`. +// It returns nil if it does not exist, or its value is nil, or it's expired. +// If you would like to check if the `key` exists in the cache, it's better using function Contains. +func Get(ctx context.Context, key interface{}) (*gvar.Var, error) { + return defaultCache.Get(ctx, key) +} + +// GetOrSet retrieves and returns the value of `key`, or sets `key`-`value` pair and +// returns `value` if `key` does not exist in the cache. The key-value pair expires +// after `duration`. +// +// It does not expire if `duration` == 0. +// It deletes the `key` if `duration` < 0 or given `value` is nil, but it does nothing +// if `value` is a function and the function result is nil. +func GetOrSet(ctx context.Context, key interface{}, value interface{}, duration time.Duration) (*gvar.Var, error) { + return defaultCache.GetOrSet(ctx, key, value, duration) +} + +// GetOrSetFunc retrieves and returns the value of `key`, or sets `key` with result of +// function `f` and returns its result if `key` does not exist in the cache. The key-value +// pair expires after `duration`. +// +// It does not expire if `duration` == 0. +// It deletes the `key` if `duration` < 0 or given `value` is nil, but it does nothing +// if `value` is a function and the function result is nil. +func GetOrSetFunc(ctx context.Context, key interface{}, f Func, duration time.Duration) (*gvar.Var, error) { + return defaultCache.GetOrSetFunc(ctx, key, f, duration) +} + +// GetOrSetFuncLock retrieves and returns the value of `key`, or sets `key` with result of +// function `f` and returns its result if `key` does not exist in the cache. The key-value +// pair expires after `duration`. +// +// It does not expire if `duration` == 0. +// It deletes the `key` if `duration` < 0 or given `value` is nil, but it does nothing +// if `value` is a function and the function result is nil. +// +// Note that it differs from function `GetOrSetFunc` is that the function `f` is executed within +// writing mutex lock for concurrent safety purpose. +func GetOrSetFuncLock(ctx context.Context, key interface{}, f Func, duration time.Duration) (*gvar.Var, error) { + return defaultCache.GetOrSetFuncLock(ctx, key, f, duration) +} + +// Contains checks and returns true if `key` exists in the cache, or else returns false. +func Contains(ctx context.Context, key interface{}) (bool, error) { + return defaultCache.Contains(ctx, key) +} + +// GetExpire retrieves and returns the expiration of `key` in the cache. +// +// Note that, +// It returns 0 if the `key` does not expire. +// It returns -1 if the `key` does not exist in the cache. +func GetExpire(ctx context.Context, key interface{}) (time.Duration, error) { + return defaultCache.GetExpire(ctx, key) +} + +// Remove deletes one or more keys from cache, and returns its value. +// If multiple keys are given, it returns the value of the last deleted item. +func Remove(ctx context.Context, keys ...interface{}) (value *gvar.Var, err error) { + return defaultCache.Remove(ctx, keys...) +} + +// Removes deletes `keys` in the cache. +func Removes(ctx context.Context, keys []interface{}) error { + return defaultCache.Removes(ctx, keys) +} + +// Update updates the value of `key` without changing its expiration and returns the old value. +// The returned value `exist` is false if the `key` does not exist in the cache. +// +// It deletes the `key` if given `value` is nil. +// It does nothing if `key` does not exist in the cache. +func Update(ctx context.Context, key interface{}, value interface{}) (oldValue *gvar.Var, exist bool, err error) { + return defaultCache.Update(ctx, key, value) +} + +// UpdateExpire updates the expiration of `key` and returns the old expiration duration value. +// +// It returns -1 and does nothing if the `key` does not exist in the cache. +// It deletes the `key` if `duration` < 0. +func UpdateExpire(ctx context.Context, key interface{}, duration time.Duration) (oldDuration time.Duration, err error) { + return defaultCache.UpdateExpire(ctx, key, duration) +} + +// Size returns the number of items in the cache. +func Size(ctx context.Context) (int, error) { + return defaultCache.Size(ctx) +} + +// Data returns a copy of all key-value pairs in the cache as map type. +// Note that this function may lead lots of memory usage, you can implement this function +// if necessary. +func Data(ctx context.Context) (map[interface{}]interface{}, error) { + return defaultCache.Data(ctx) +} + +// Keys returns all keys in the cache as slice. +func Keys(ctx context.Context) ([]interface{}, error) { + return defaultCache.Keys(ctx) +} + +// KeyStrings returns all keys in the cache as string slice. +func KeyStrings(ctx context.Context) ([]string, error) { + return defaultCache.KeyStrings(ctx) +} + +// Values returns all values in the cache as slice. +func Values(ctx context.Context) ([]interface{}, error) { + return defaultCache.Values(ctx) +} + +// MustGet acts like Get, but it panics if any error occurs. +func MustGet(ctx context.Context, key interface{}) *gvar.Var { + return defaultCache.MustGet(ctx, key) +} + +// MustGetOrSet acts like GetOrSet, but it panics if any error occurs. +func MustGetOrSet(ctx context.Context, key interface{}, value interface{}, duration time.Duration) *gvar.Var { + return defaultCache.MustGetOrSet(ctx, key, value, duration) +} + +// MustGetOrSetFunc acts like GetOrSetFunc, but it panics if any error occurs. +func MustGetOrSetFunc(ctx context.Context, key interface{}, f Func, duration time.Duration) *gvar.Var { + return defaultCache.MustGetOrSetFunc(ctx, key, f, duration) +} + +// MustGetOrSetFuncLock acts like GetOrSetFuncLock, but it panics if any error occurs. +func MustGetOrSetFuncLock(ctx context.Context, key interface{}, f Func, duration time.Duration) *gvar.Var { + return defaultCache.MustGetOrSetFuncLock(ctx, key, f, duration) +} + +// MustContains acts like Contains, but it panics if any error occurs. +func MustContains(ctx context.Context, key interface{}) bool { + return defaultCache.MustContains(ctx, key) +} + +// MustGetExpire acts like GetExpire, but it panics if any error occurs. +func MustGetExpire(ctx context.Context, key interface{}) time.Duration { + return defaultCache.MustGetExpire(ctx, key) +} + +// MustSize acts like Size, but it panics if any error occurs. +func MustSize(ctx context.Context) int { + return defaultCache.MustSize(ctx) +} + +// MustData acts like Data, but it panics if any error occurs. +func MustData(ctx context.Context) map[interface{}]interface{} { + return defaultCache.MustData(ctx) +} + +// MustKeys acts like Keys, but it panics if any error occurs. +func MustKeys(ctx context.Context) []interface{} { + return defaultCache.MustKeys(ctx) +} + +// MustKeyStrings acts like KeyStrings, but it panics if any error occurs. +func MustKeyStrings(ctx context.Context) []string { + return defaultCache.MustKeyStrings(ctx) +} + +// MustValues acts like Values, but it panics if any error occurs. +func MustValues(ctx context.Context) []interface{} { + return defaultCache.MustValues(ctx) +} diff --git a/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter.go b/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter.go new file mode 100644 index 00000000..3c98011b --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter.go @@ -0,0 +1,142 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gcache + +import ( + "context" + "time" + + "github.com/gogf/gf/v2/container/gvar" +) + +// Adapter is the core adapter for cache features implements. +// +// Note that the implementer itself should guarantee the concurrent safety of these functions. +type Adapter interface { + // Set sets cache with `key`-`value` pair, which is expired after `duration`. + // + // It does not expire if `duration` == 0. + // It deletes the keys of `data` if `duration` < 0 or given `value` is nil. + Set(ctx context.Context, key interface{}, value interface{}, duration time.Duration) error + + // SetMap batch sets cache with key-value pairs by `data` map, which is expired after `duration`. + // + // It does not expire if `duration` == 0. + // It deletes the keys of `data` if `duration` < 0 or given `value` is nil. + SetMap(ctx context.Context, data map[interface{}]interface{}, duration time.Duration) error + + // SetIfNotExist sets cache with `key`-`value` pair which is expired after `duration` + // if `key` does not exist in the cache. It returns true the `key` does not exist in the + // cache, and it sets `value` successfully to the cache, or else it returns false. + // + // It does not expire if `duration` == 0. + // It deletes the `key` if `duration` < 0 or given `value` is nil. + SetIfNotExist(ctx context.Context, key interface{}, value interface{}, duration time.Duration) (ok bool, err error) + + // SetIfNotExistFunc sets `key` with result of function `f` and returns true + // if `key` does not exist in the cache, or else it does nothing and returns false if `key` already exists. + // + // The parameter `value` can be type of `func() interface{}`, but it does nothing if its + // result is nil. + // + // It does not expire if `duration` == 0. + // It deletes the `key` if `duration` < 0 or given `value` is nil. + SetIfNotExistFunc(ctx context.Context, key interface{}, f Func, duration time.Duration) (ok bool, err error) + + // SetIfNotExistFuncLock sets `key` with result of function `f` and returns true + // if `key` does not exist in the cache, or else it does nothing and returns false if `key` already exists. + // + // It does not expire if `duration` == 0. + // It deletes the `key` if `duration` < 0 or given `value` is nil. + // + // Note that it differs from function `SetIfNotExistFunc` is that the function `f` is executed within + // writing mutex lock for concurrent safety purpose. + SetIfNotExistFuncLock(ctx context.Context, key interface{}, f Func, duration time.Duration) (ok bool, err error) + + // Get retrieves and returns the associated value of given `key`. + // It returns nil if it does not exist, or its value is nil, or it's expired. + // If you would like to check if the `key` exists in the cache, it's better using function Contains. + Get(ctx context.Context, key interface{}) (*gvar.Var, error) + + // GetOrSet retrieves and returns the value of `key`, or sets `key`-`value` pair and + // returns `value` if `key` does not exist in the cache. The key-value pair expires + // after `duration`. + // + // It does not expire if `duration` == 0. + // It deletes the `key` if `duration` < 0 or given `value` is nil, but it does nothing + // if `value` is a function and the function result is nil. + GetOrSet(ctx context.Context, key interface{}, value interface{}, duration time.Duration) (result *gvar.Var, err error) + + // GetOrSetFunc retrieves and returns the value of `key`, or sets `key` with result of + // function `f` and returns its result if `key` does not exist in the cache. The key-value + // pair expires after `duration`. + // + // It does not expire if `duration` == 0. + // It deletes the `key` if `duration` < 0 or given `value` is nil, but it does nothing + // if `value` is a function and the function result is nil. + GetOrSetFunc(ctx context.Context, key interface{}, f Func, duration time.Duration) (result *gvar.Var, err error) + + // GetOrSetFuncLock retrieves and returns the value of `key`, or sets `key` with result of + // function `f` and returns its result if `key` does not exist in the cache. The key-value + // pair expires after `duration`. + // + // It does not expire if `duration` == 0. + // It deletes the `key` if `duration` < 0 or given `value` is nil, but it does nothing + // if `value` is a function and the function result is nil. + // + // Note that it differs from function `GetOrSetFunc` is that the function `f` is executed within + // writing mutex lock for concurrent safety purpose. + GetOrSetFuncLock(ctx context.Context, key interface{}, f Func, duration time.Duration) (result *gvar.Var, err error) + + // Contains checks and returns true if `key` exists in the cache, or else returns false. + Contains(ctx context.Context, key interface{}) (bool, error) + + // Size returns the number of items in the cache. + Size(ctx context.Context) (size int, err error) + + // Data returns a copy of all key-value pairs in the cache as map type. + // Note that this function may lead lots of memory usage, you can implement this function + // if necessary. + Data(ctx context.Context) (data map[interface{}]interface{}, err error) + + // Keys returns all keys in the cache as slice. + Keys(ctx context.Context) (keys []interface{}, err error) + + // Values returns all values in the cache as slice. + Values(ctx context.Context) (values []interface{}, err error) + + // Update updates the value of `key` without changing its expiration and returns the old value. + // The returned value `exist` is false if the `key` does not exist in the cache. + // + // It deletes the `key` if given `value` is nil. + // It does nothing if `key` does not exist in the cache. + Update(ctx context.Context, key interface{}, value interface{}) (oldValue *gvar.Var, exist bool, err error) + + // UpdateExpire updates the expiration of `key` and returns the old expiration duration value. + // + // It returns -1 and does nothing if the `key` does not exist in the cache. + // It deletes the `key` if `duration` < 0. + UpdateExpire(ctx context.Context, key interface{}, duration time.Duration) (oldDuration time.Duration, err error) + + // GetExpire retrieves and returns the expiration of `key` in the cache. + // + // Note that, + // It returns 0 if the `key` does not expire. + // It returns -1 if the `key` does not exist in the cache. + GetExpire(ctx context.Context, key interface{}) (time.Duration, error) + + // Remove deletes one or more keys from cache, and returns its value. + // If multiple keys are given, it returns the value of the last deleted item. + Remove(ctx context.Context, keys ...interface{}) (lastValue *gvar.Var, err error) + + // Clear clears all data of the cache. + // Note that this function is sensitive and should be carefully used. + Clear(ctx context.Context) error + + // Close closes the cache if necessary. + Close(ctx context.Context) error +} diff --git a/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory.go b/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory.go new file mode 100644 index 00000000..707b04bd --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory.go @@ -0,0 +1,476 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gcache + +import ( + "context" + "math" + "time" + + "github.com/gogf/gf/v2/container/glist" + "github.com/gogf/gf/v2/container/gset" + "github.com/gogf/gf/v2/container/gtype" + "github.com/gogf/gf/v2/container/gvar" + "github.com/gogf/gf/v2/os/gtime" + "github.com/gogf/gf/v2/os/gtimer" +) + +// AdapterMemory is an adapter implements using memory. +type AdapterMemory struct { + // cap limits the size of the cache pool. + // If the size of the cache exceeds the cap, + // the cache expiration process performs according to the LRU algorithm. + // It is 0 in default which means no limits. + cap int + data *adapterMemoryData // data is the underlying cache data which is stored in a hash table. + expireTimes *adapterMemoryExpireTimes // expireTimes is the expiring key to its timestamp mapping, which is used for quick indexing and deleting. + expireSets *adapterMemoryExpireSets // expireSets is the expiring timestamp to its key set mapping, which is used for quick indexing and deleting. + lru *adapterMemoryLru // lru is the LRU manager, which is enabled when attribute cap > 0. + lruGetList *glist.List // lruGetList is the LRU history according to Get function. + eventList *glist.List // eventList is the asynchronous event list for internal data synchronization. + closed *gtype.Bool // closed controls the cache closed or not. +} + +// Internal cache item. +type adapterMemoryItem struct { + v interface{} // Value. + e int64 // Expire timestamp in milliseconds. +} + +// Internal event item. +type adapterMemoryEvent struct { + k interface{} // Key. + e int64 // Expire time in milliseconds. +} + +const ( + // defaultMaxExpire is the default expire time for no expiring items. + // It equals to math.MaxInt64/1000000. + defaultMaxExpire = 9223372036854 +) + +// NewAdapterMemory creates and returns a new memory cache object. +func NewAdapterMemory(lruCap ...int) Adapter { + c := &AdapterMemory{ + data: newAdapterMemoryData(), + lruGetList: glist.New(true), + expireTimes: newAdapterMemoryExpireTimes(), + expireSets: newAdapterMemoryExpireSets(), + eventList: glist.New(true), + closed: gtype.NewBool(), + } + if len(lruCap) > 0 { + c.cap = lruCap[0] + c.lru = newMemCacheLru(c) + } + return c +} + +// Set sets cache with `key`-`value` pair, which is expired after `duration`. +// +// It does not expire if `duration` == 0. +// It deletes the keys of `data` if `duration` < 0 or given `value` is nil. +func (c *AdapterMemory) Set(ctx context.Context, key interface{}, value interface{}, duration time.Duration) error { + expireTime := c.getInternalExpire(duration) + c.data.Set(key, adapterMemoryItem{ + v: value, + e: expireTime, + }) + c.eventList.PushBack(&adapterMemoryEvent{ + k: key, + e: expireTime, + }) + return nil +} + +// SetMap batch sets cache with key-value pairs by `data` map, which is expired after `duration`. +// +// It does not expire if `duration` == 0. +// It deletes the keys of `data` if `duration` < 0 or given `value` is nil. +func (c *AdapterMemory) SetMap(ctx context.Context, data map[interface{}]interface{}, duration time.Duration) error { + var ( + expireTime = c.getInternalExpire(duration) + err = c.data.SetMap(data, expireTime) + ) + if err != nil { + return err + } + for k := range data { + c.eventList.PushBack(&adapterMemoryEvent{ + k: k, + e: expireTime, + }) + } + return nil +} + +// SetIfNotExist sets cache with `key`-`value` pair which is expired after `duration` +// if `key` does not exist in the cache. It returns true the `key` does not exist in the +// cache, and it sets `value` successfully to the cache, or else it returns false. +// +// It does not expire if `duration` == 0. +// It deletes the `key` if `duration` < 0 or given `value` is nil. +func (c *AdapterMemory) SetIfNotExist(ctx context.Context, key interface{}, value interface{}, duration time.Duration) (bool, error) { + isContained, err := c.Contains(ctx, key) + if err != nil { + return false, err + } + if !isContained { + if _, err = c.doSetWithLockCheck(ctx, key, value, duration); err != nil { + return false, err + } + return true, nil + } + return false, nil +} + +// SetIfNotExistFunc sets `key` with result of function `f` and returns true +// if `key` does not exist in the cache, or else it does nothing and returns false if `key` already exists. +// +// The parameter `value` can be type of `func() interface{}`, but it does nothing if its +// result is nil. +// +// It does not expire if `duration` == 0. +// It deletes the `key` if `duration` < 0 or given `value` is nil. +func (c *AdapterMemory) SetIfNotExistFunc(ctx context.Context, key interface{}, f Func, duration time.Duration) (bool, error) { + isContained, err := c.Contains(ctx, key) + if err != nil { + return false, err + } + if !isContained { + value, err := f(ctx) + if err != nil { + return false, err + } + if _, err = c.doSetWithLockCheck(ctx, key, value, duration); err != nil { + return false, err + } + return true, nil + } + return false, nil +} + +// SetIfNotExistFuncLock sets `key` with result of function `f` and returns true +// if `key` does not exist in the cache, or else it does nothing and returns false if `key` already exists. +// +// It does not expire if `duration` == 0. +// It deletes the `key` if `duration` < 0 or given `value` is nil. +// +// Note that it differs from function `SetIfNotExistFunc` is that the function `f` is executed within +// writing mutex lock for concurrent safety purpose. +func (c *AdapterMemory) SetIfNotExistFuncLock(ctx context.Context, key interface{}, f Func, duration time.Duration) (bool, error) { + isContained, err := c.Contains(ctx, key) + if err != nil { + return false, err + } + if !isContained { + if _, err = c.doSetWithLockCheck(ctx, key, f, duration); err != nil { + return false, err + } + return true, nil + } + return false, nil +} + +// Get retrieves and returns the associated value of given `key`. +// It returns nil if it does not exist, or its value is nil, or it's expired. +// If you would like to check if the `key` exists in the cache, it's better using function Contains. +func (c *AdapterMemory) Get(ctx context.Context, key interface{}) (*gvar.Var, error) { + item, ok := c.data.Get(key) + if ok && !item.IsExpired() { + // Adding to LRU history if LRU feature is enabled. + if c.cap > 0 { + c.lruGetList.PushBack(key) + } + return gvar.New(item.v), nil + } + return nil, nil +} + +// GetOrSet retrieves and returns the value of `key`, or sets `key`-`value` pair and +// returns `value` if `key` does not exist in the cache. The key-value pair expires +// after `duration`. +// +// It does not expire if `duration` == 0. +// It deletes the `key` if `duration` < 0 or given `value` is nil, but it does nothing +// if `value` is a function and the function result is nil. +func (c *AdapterMemory) GetOrSet(ctx context.Context, key interface{}, value interface{}, duration time.Duration) (*gvar.Var, error) { + v, err := c.Get(ctx, key) + if err != nil { + return nil, err + } + if v == nil { + return c.doSetWithLockCheck(ctx, key, value, duration) + } + return v, nil +} + +// GetOrSetFunc retrieves and returns the value of `key`, or sets `key` with result of +// function `f` and returns its result if `key` does not exist in the cache. The key-value +// pair expires after `duration`. +// +// It does not expire if `duration` == 0. +// It deletes the `key` if `duration` < 0 or given `value` is nil, but it does nothing +// if `value` is a function and the function result is nil. +func (c *AdapterMemory) GetOrSetFunc(ctx context.Context, key interface{}, f Func, duration time.Duration) (*gvar.Var, error) { + v, err := c.Get(ctx, key) + if err != nil { + return nil, err + } + if v == nil { + value, err := f(ctx) + if err != nil { + return nil, err + } + if value == nil { + return nil, nil + } + return c.doSetWithLockCheck(ctx, key, value, duration) + } + return v, nil +} + +// GetOrSetFuncLock retrieves and returns the value of `key`, or sets `key` with result of +// function `f` and returns its result if `key` does not exist in the cache. The key-value +// pair expires after `duration`. +// +// It does not expire if `duration` == 0. +// It deletes the `key` if `duration` < 0 or given `value` is nil, but it does nothing +// if `value` is a function and the function result is nil. +// +// Note that it differs from function `GetOrSetFunc` is that the function `f` is executed within +// writing mutex lock for concurrent safety purpose. +func (c *AdapterMemory) GetOrSetFuncLock(ctx context.Context, key interface{}, f Func, duration time.Duration) (*gvar.Var, error) { + v, err := c.Get(ctx, key) + if err != nil { + return nil, err + } + if v == nil { + return c.doSetWithLockCheck(ctx, key, f, duration) + } + return v, nil +} + +// Contains checks and returns true if `key` exists in the cache, or else returns false. +func (c *AdapterMemory) Contains(ctx context.Context, key interface{}) (bool, error) { + v, err := c.Get(ctx, key) + if err != nil { + return false, err + } + return v != nil, nil +} + +// GetExpire retrieves and returns the expiration of `key` in the cache. +// +// Note that, +// It returns 0 if the `key` does not expire. +// It returns -1 if the `key` does not exist in the cache. +func (c *AdapterMemory) GetExpire(ctx context.Context, key interface{}) (time.Duration, error) { + if item, ok := c.data.Get(key); ok { + return time.Duration(item.e-gtime.TimestampMilli()) * time.Millisecond, nil + } + return -1, nil +} + +// Remove deletes one or more keys from cache, and returns its value. +// If multiple keys are given, it returns the value of the last deleted item. +func (c *AdapterMemory) Remove(ctx context.Context, keys ...interface{}) (*gvar.Var, error) { + var removedKeys []interface{} + removedKeys, value, err := c.data.Remove(keys...) + if err != nil { + return nil, err + } + for _, key := range removedKeys { + c.eventList.PushBack(&adapterMemoryEvent{ + k: key, + e: gtime.TimestampMilli() - 1000000, + }) + } + return gvar.New(value), nil +} + +// Update updates the value of `key` without changing its expiration and returns the old value. +// The returned value `exist` is false if the `key` does not exist in the cache. +// +// It deletes the `key` if given `value` is nil. +// It does nothing if `key` does not exist in the cache. +func (c *AdapterMemory) Update(ctx context.Context, key interface{}, value interface{}) (oldValue *gvar.Var, exist bool, err error) { + v, exist, err := c.data.Update(key, value) + return gvar.New(v), exist, err +} + +// UpdateExpire updates the expiration of `key` and returns the old expiration duration value. +// +// It returns -1 and does nothing if the `key` does not exist in the cache. +// It deletes the `key` if `duration` < 0. +func (c *AdapterMemory) UpdateExpire(ctx context.Context, key interface{}, duration time.Duration) (oldDuration time.Duration, err error) { + newExpireTime := c.getInternalExpire(duration) + oldDuration, err = c.data.UpdateExpire(key, newExpireTime) + if err != nil { + return + } + if oldDuration != -1 { + c.eventList.PushBack(&adapterMemoryEvent{ + k: key, + e: newExpireTime, + }) + } + return +} + +// Size returns the size of the cache. +func (c *AdapterMemory) Size(ctx context.Context) (size int, err error) { + return c.data.Size() +} + +// Data returns a copy of all key-value pairs in the cache as map type. +func (c *AdapterMemory) Data(ctx context.Context) (map[interface{}]interface{}, error) { + return c.data.Data() +} + +// Keys returns all keys in the cache as slice. +func (c *AdapterMemory) Keys(ctx context.Context) ([]interface{}, error) { + return c.data.Keys() +} + +// Values returns all values in the cache as slice. +func (c *AdapterMemory) Values(ctx context.Context) ([]interface{}, error) { + return c.data.Values() +} + +// Clear clears all data of the cache. +// Note that this function is sensitive and should be carefully used. +func (c *AdapterMemory) Clear(ctx context.Context) error { + return c.data.Clear() +} + +// Close closes the cache. +func (c *AdapterMemory) Close(ctx context.Context) error { + if c.cap > 0 { + c.lru.Close() + } + c.closed.Set(true) + return nil +} + +// doSetWithLockCheck sets cache with `key`-`value` pair if `key` does not exist in the +// cache, which is expired after `duration`. +// +// It does not expire if `duration` == 0. +// The parameter `value` can be type of , but it does nothing if the +// function result is nil. +// +// It doubly checks the `key` whether exists in the cache using mutex writing lock +// before setting it to the cache. +func (c *AdapterMemory) doSetWithLockCheck(ctx context.Context, key interface{}, value interface{}, duration time.Duration) (result *gvar.Var, err error) { + expireTimestamp := c.getInternalExpire(duration) + v, err := c.data.SetWithLock(ctx, key, value, expireTimestamp) + c.eventList.PushBack(&adapterMemoryEvent{k: key, e: expireTimestamp}) + return gvar.New(v), err +} + +// getInternalExpire converts and returns the expiration time with given expired duration in milliseconds. +func (c *AdapterMemory) getInternalExpire(duration time.Duration) int64 { + if duration == 0 { + return defaultMaxExpire + } + return gtime.TimestampMilli() + duration.Nanoseconds()/1000000 +} + +// makeExpireKey groups the `expire` in milliseconds to its according seconds. +func (c *AdapterMemory) makeExpireKey(expire int64) int64 { + return int64(math.Ceil(float64(expire/1000)+1) * 1000) +} + +// syncEventAndClearExpired does the asynchronous task loop: +// 1. Asynchronously process the data in the event list, +// and synchronize the results to the `expireTimes` and `expireSets` properties. +// 2. Clean up the expired key-value pair data. +func (c *AdapterMemory) syncEventAndClearExpired(ctx context.Context) { + if c.closed.Val() { + gtimer.Exit() + return + } + var ( + event *adapterMemoryEvent + oldExpireTime int64 + newExpireTime int64 + ) + // ======================== + // Data Synchronization. + // ======================== + for { + v := c.eventList.PopFront() + if v == nil { + break + } + event = v.(*adapterMemoryEvent) + // Fetching the old expire set. + oldExpireTime = c.expireTimes.Get(event.k) + // Calculating the new expiration time set. + newExpireTime = c.makeExpireKey(event.e) + if newExpireTime != oldExpireTime { + c.expireSets.GetOrNew(newExpireTime).Add(event.k) + if oldExpireTime != 0 { + c.expireSets.GetOrNew(oldExpireTime).Remove(event.k) + } + // Updating the expired time for . + c.expireTimes.Set(event.k, newExpireTime) + } + // Adding the key the LRU history by writing operations. + if c.cap > 0 { + c.lru.Push(event.k) + } + } + // Processing expired keys from LRU. + if c.cap > 0 { + if c.lruGetList.Len() > 0 { + for { + if v := c.lruGetList.PopFront(); v != nil { + c.lru.Push(v) + } else { + break + } + } + } + c.lru.SyncAndClear(ctx) + } + // ======================== + // Data Cleaning up. + // ======================== + var ( + expireSet *gset.Set + ek = c.makeExpireKey(gtime.TimestampMilli()) + eks = []int64{ek - 1000, ek - 2000, ek - 3000, ek - 4000, ek - 5000} + ) + for _, expireTime := range eks { + if expireSet = c.expireSets.Get(expireTime); expireSet != nil { + // Iterating the set to delete all keys in it. + expireSet.Iterator(func(key interface{}) bool { + c.clearByKey(key) + return true + }) + // Deleting the set after all of its keys are deleted. + c.expireSets.Delete(expireTime) + } + } +} + +// clearByKey deletes the key-value pair with given `key`. +// The parameter `force` specifies whether doing this deleting forcibly. +func (c *AdapterMemory) clearByKey(key interface{}, force ...bool) { + // Doubly check before really deleting it from cache. + c.data.DeleteWithDoubleCheck(key, force...) + + // Deleting its expiration time from `expireTimes`. + c.expireTimes.Delete(key) + + // Deleting it from LRU. + if c.cap > 0 { + c.lru.Remove(key) + } +} diff --git a/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_data.go b/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_data.go new file mode 100644 index 00000000..941339d0 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_data.go @@ -0,0 +1,206 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gcache + +import ( + "context" + "sync" + "time" + + "github.com/gogf/gf/v2/os/gtime" +) + +type adapterMemoryData struct { + mu sync.RWMutex // dataMu ensures the concurrent safety of underlying data map. + data map[interface{}]adapterMemoryItem // data is the underlying cache data which is stored in a hash table. +} + +func newAdapterMemoryData() *adapterMemoryData { + return &adapterMemoryData{ + data: make(map[interface{}]adapterMemoryItem), + } +} + +// Update updates the value of `key` without changing its expiration and returns the old value. +// The returned value `exist` is false if the `key` does not exist in the cache. +// +// It deletes the `key` if given `value` is nil. +// It does nothing if `key` does not exist in the cache. +func (d *adapterMemoryData) Update(key interface{}, value interface{}) (oldValue interface{}, exist bool, err error) { + d.mu.Lock() + defer d.mu.Unlock() + if item, ok := d.data[key]; ok { + d.data[key] = adapterMemoryItem{ + v: value, + e: item.e, + } + return item.v, true, nil + } + return nil, false, nil +} + +// UpdateExpire updates the expiration of `key` and returns the old expiration duration value. +// +// It returns -1 and does nothing if the `key` does not exist in the cache. +// It deletes the `key` if `duration` < 0. +func (d *adapterMemoryData) UpdateExpire(key interface{}, expireTime int64) (oldDuration time.Duration, err error) { + d.mu.Lock() + defer d.mu.Unlock() + if item, ok := d.data[key]; ok { + d.data[key] = adapterMemoryItem{ + v: item.v, + e: expireTime, + } + return time.Duration(item.e-gtime.TimestampMilli()) * time.Millisecond, nil + } + return -1, nil +} + +// Remove deletes the one or more keys from cache, and returns its value. +// If multiple keys are given, it returns the value of the deleted last item. +func (d *adapterMemoryData) Remove(keys ...interface{}) (removedKeys []interface{}, value interface{}, err error) { + d.mu.Lock() + defer d.mu.Unlock() + removedKeys = make([]interface{}, 0) + for _, key := range keys { + item, ok := d.data[key] + if ok { + value = item.v + delete(d.data, key) + removedKeys = append(removedKeys, key) + } + } + return removedKeys, value, nil +} + +// Data returns a copy of all key-value pairs in the cache as map type. +func (d *adapterMemoryData) Data() (map[interface{}]interface{}, error) { + d.mu.RLock() + m := make(map[interface{}]interface{}, len(d.data)) + for k, v := range d.data { + if !v.IsExpired() { + m[k] = v.v + } + } + d.mu.RUnlock() + return m, nil +} + +// Keys returns all keys in the cache as slice. +func (d *adapterMemoryData) Keys() ([]interface{}, error) { + d.mu.RLock() + var ( + index = 0 + keys = make([]interface{}, len(d.data)) + ) + for k, v := range d.data { + if !v.IsExpired() { + keys[index] = k + index++ + } + } + d.mu.RUnlock() + return keys, nil +} + +// Values returns all values in the cache as slice. +func (d *adapterMemoryData) Values() ([]interface{}, error) { + d.mu.RLock() + var ( + index = 0 + values = make([]interface{}, len(d.data)) + ) + for _, v := range d.data { + if !v.IsExpired() { + values[index] = v.v + index++ + } + } + d.mu.RUnlock() + return values, nil +} + +// Size returns the size of the cache. +func (d *adapterMemoryData) Size() (size int, err error) { + d.mu.RLock() + size = len(d.data) + d.mu.RUnlock() + return size, nil +} + +// Clear clears all data of the cache. +// Note that this function is sensitive and should be carefully used. +func (d *adapterMemoryData) Clear() error { + d.mu.Lock() + defer d.mu.Unlock() + d.data = make(map[interface{}]adapterMemoryItem) + return nil +} + +func (d *adapterMemoryData) Get(key interface{}) (item adapterMemoryItem, ok bool) { + d.mu.RLock() + item, ok = d.data[key] + d.mu.RUnlock() + return +} + +func (d *adapterMemoryData) Set(key interface{}, value adapterMemoryItem) { + d.mu.Lock() + d.data[key] = value + d.mu.Unlock() +} + +// SetMap batch sets cache with key-value pairs by `data`, which is expired after `duration`. +// +// It does not expire if `duration` == 0. +// It deletes the keys of `data` if `duration` < 0 or given `value` is nil. +func (d *adapterMemoryData) SetMap(data map[interface{}]interface{}, expireTime int64) error { + d.mu.Lock() + for k, v := range data { + d.data[k] = adapterMemoryItem{ + v: v, + e: expireTime, + } + } + d.mu.Unlock() + return nil +} + +func (d *adapterMemoryData) SetWithLock(ctx context.Context, key interface{}, value interface{}, expireTimestamp int64) (interface{}, error) { + d.mu.Lock() + defer d.mu.Unlock() + var ( + err error + ) + if v, ok := d.data[key]; ok && !v.IsExpired() { + return v.v, nil + } + f, ok := value.(Func) + if !ok { + // Compatible with raw function value. + f, ok = value.(func(ctx context.Context) (value interface{}, err error)) + } + if ok { + if value, err = f(ctx); err != nil { + return nil, err + } + if value == nil { + return nil, nil + } + } + d.data[key] = adapterMemoryItem{v: value, e: expireTimestamp} + return value, nil +} + +func (d *adapterMemoryData) DeleteWithDoubleCheck(key interface{}, force ...bool) { + d.mu.Lock() + // Doubly check before really deleting it from cache. + if item, ok := d.data[key]; (ok && item.IsExpired()) || (len(force) > 0 && force[0]) { + delete(d.data, key) + } + d.mu.Unlock() +} diff --git a/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_expire_sets.go b/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_expire_sets.go new file mode 100644 index 00000000..b49678c7 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_expire_sets.go @@ -0,0 +1,52 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gcache + +import ( + "sync" + + "github.com/gogf/gf/v2/container/gset" +) + +type adapterMemoryExpireSets struct { + mu sync.RWMutex // expireSetMu ensures the concurrent safety of expireSets map. + expireSets map[int64]*gset.Set // expireSets is the expiring timestamp to its key set mapping, which is used for quick indexing and deleting. +} + +func newAdapterMemoryExpireSets() *adapterMemoryExpireSets { + return &adapterMemoryExpireSets{ + expireSets: make(map[int64]*gset.Set), + } +} + +func (d *adapterMemoryExpireSets) Get(key int64) (result *gset.Set) { + d.mu.RLock() + result = d.expireSets[key] + d.mu.RUnlock() + return +} + +func (d *adapterMemoryExpireSets) GetOrNew(key int64) (result *gset.Set) { + if result = d.Get(key); result != nil { + return + } + d.mu.Lock() + if es, ok := d.expireSets[key]; ok { + result = es + } else { + result = gset.New(true) + d.expireSets[key] = result + } + d.mu.Unlock() + return +} + +func (d *adapterMemoryExpireSets) Delete(key int64) { + d.mu.Lock() + delete(d.expireSets, key) + d.mu.Unlock() +} diff --git a/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_expire_times.go b/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_expire_times.go new file mode 100644 index 00000000..af3d4b41 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_expire_times.go @@ -0,0 +1,41 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gcache + +import ( + "sync" +) + +type adapterMemoryExpireTimes struct { + mu sync.RWMutex // expireTimeMu ensures the concurrent safety of expireTimes map. + expireTimes map[interface{}]int64 // expireTimes is the expiring key to its timestamp mapping, which is used for quick indexing and deleting. +} + +func newAdapterMemoryExpireTimes() *adapterMemoryExpireTimes { + return &adapterMemoryExpireTimes{ + expireTimes: make(map[interface{}]int64), + } +} + +func (d *adapterMemoryExpireTimes) Get(key interface{}) (value int64) { + d.mu.RLock() + value = d.expireTimes[key] + d.mu.RUnlock() + return +} + +func (d *adapterMemoryExpireTimes) Set(key interface{}, value int64) { + d.mu.Lock() + d.expireTimes[key] = value + d.mu.Unlock() +} + +func (d *adapterMemoryExpireTimes) Delete(key interface{}) { + d.mu.Lock() + delete(d.expireTimes, key) + d.mu.Unlock() +} diff --git a/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_item.go b/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_item.go new file mode 100644 index 00000000..5a7862ca --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_item.go @@ -0,0 +1,19 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gcache + +import ( + "github.com/gogf/gf/v2/os/gtime" +) + +// IsExpired checks whether `item` is expired. +func (item *adapterMemoryItem) IsExpired() bool { + // Note that it should use greater than or equal judgement here + // imagining that the cache time is only 1 millisecond. + + return item.e < gtime.TimestampMilli() +} diff --git a/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_lru.go b/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_lru.go new file mode 100644 index 00000000..6583ec96 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_memory_lru.go @@ -0,0 +1,100 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gcache + +import ( + "context" + + "github.com/gogf/gf/v2/container/glist" + "github.com/gogf/gf/v2/container/gmap" + "github.com/gogf/gf/v2/container/gtype" + "github.com/gogf/gf/v2/os/gtimer" +) + +// LRU cache object. +// It uses list.List from stdlib for its underlying doubly linked list. +type adapterMemoryLru struct { + cache *AdapterMemory // Parent cache object. + data *gmap.Map // Key mapping to the item of the list. + list *glist.List // Key list. + rawList *glist.List // History for key adding. + closed *gtype.Bool // Closed or not. +} + +// newMemCacheLru creates and returns a new LRU object. +func newMemCacheLru(cache *AdapterMemory) *adapterMemoryLru { + lru := &adapterMemoryLru{ + cache: cache, + data: gmap.New(true), + list: glist.New(true), + rawList: glist.New(true), + closed: gtype.NewBool(), + } + return lru +} + +// Close closes the LRU object. +func (lru *adapterMemoryLru) Close() { + lru.closed.Set(true) +} + +// Remove deletes the `key` FROM `lru`. +func (lru *adapterMemoryLru) Remove(key interface{}) { + if v := lru.data.Get(key); v != nil { + lru.data.Remove(key) + lru.list.Remove(v.(*glist.Element)) + } +} + +// Size returns the size of `lru`. +func (lru *adapterMemoryLru) Size() int { + return lru.data.Size() +} + +// Push pushes `key` to the tail of `lru`. +func (lru *adapterMemoryLru) Push(key interface{}) { + lru.rawList.PushBack(key) +} + +// Pop deletes and returns the key from tail of `lru`. +func (lru *adapterMemoryLru) Pop() interface{} { + if v := lru.list.PopBack(); v != nil { + lru.data.Remove(v) + return v + } + return nil +} + +// SyncAndClear synchronizes the keys from `rawList` to `list` and `data` +// using Least Recently Used algorithm. +func (lru *adapterMemoryLru) SyncAndClear(ctx context.Context) { + if lru.closed.Val() { + gtimer.Exit() + return + } + // Data synchronization. + var alreadyExistItem interface{} + for { + if rawListItem := lru.rawList.PopFront(); rawListItem != nil { + // Deleting the key from list. + if alreadyExistItem = lru.data.Get(rawListItem); alreadyExistItem != nil { + lru.list.Remove(alreadyExistItem.(*glist.Element)) + } + // Pushing key to the head of the list + // and setting its list item to hash table for quick indexing. + lru.data.Set(rawListItem, lru.list.PushFront(rawListItem)) + } else { + break + } + } + // Data cleaning up. + for clearLength := lru.Size() - lru.cache.cap; clearLength > 0; clearLength-- { + if topKey := lru.Pop(); topKey != nil { + lru.cache.clearByKey(topKey, true) + } + } +} diff --git a/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_redis.go b/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_redis.go new file mode 100644 index 00000000..b9cb45b3 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gcache/gcache_adapter_redis.go @@ -0,0 +1,438 @@ +// Copyright 2020 gf Author(https://github.com/gogf/gf). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gcache + +import ( + "context" + "time" + + "github.com/gogf/gf/v2/container/gvar" + "github.com/gogf/gf/v2/database/gredis" + "github.com/gogf/gf/v2/util/gconv" +) + +// AdapterRedis is the gcache adapter implements using Redis server. +type AdapterRedis struct { + redis *gredis.Redis +} + +// NewAdapterRedis creates and returns a new memory cache object. +func NewAdapterRedis(redis *gredis.Redis) Adapter { + return &AdapterRedis{ + redis: redis, + } +} + +// Set sets cache with `key`-`value` pair, which is expired after `duration`. +// +// It does not expire if `duration` == 0. +// It deletes the keys of `data` if `duration` < 0 or given `value` is nil. +func (c *AdapterRedis) Set(ctx context.Context, key interface{}, value interface{}, duration time.Duration) (err error) { + redisKey := gconv.String(key) + if value == nil || duration < 0 { + _, err = c.redis.Del(ctx, redisKey) + } else { + if duration == 0 { + _, err = c.redis.Set(ctx, redisKey, value) + } else { + err = c.redis.SetEX(ctx, redisKey, value, int64(duration.Seconds())) + } + } + return err +} + +// SetMap batch sets cache with key-value pairs by `data` map, which is expired after `duration`. +// +// It does not expire if `duration` == 0. +// It deletes the keys of `data` if `duration` < 0 or given `value` is nil. +func (c *AdapterRedis) SetMap(ctx context.Context, data map[interface{}]interface{}, duration time.Duration) error { + if len(data) == 0 { + return nil + } + // DEL. + if duration < 0 { + var ( + index = 0 + keys = make([]string, len(data)) + ) + for k := range data { + keys[index] = gconv.String(k) + index += 1 + } + _, err := c.redis.Del(ctx, keys...) + if err != nil { + return err + } + } + if duration == 0 { + err := c.redis.MSet(ctx, gconv.Map(data)) + if err != nil { + return err + } + } + if duration > 0 { + var err error + for k, v := range data { + if err = c.Set(ctx, k, v, duration); err != nil { + return err + } + } + } + return nil +} + +// SetIfNotExist sets cache with `key`-`value` pair which is expired after `duration` +// if `key` does not exist in the cache. It returns true the `key` does not exist in the +// cache, and it sets `value` successfully to the cache, or else it returns false. +// +// It does not expire if `duration` == 0. +// It deletes the `key` if `duration` < 0 or given `value` is nil. +func (c *AdapterRedis) SetIfNotExist(ctx context.Context, key interface{}, value interface{}, duration time.Duration) (bool, error) { + var ( + err error + redisKey = gconv.String(key) + ) + // Execute the function and retrieve the result. + f, ok := value.(Func) + if !ok { + // Compatible with raw function value. + f, ok = value.(func(ctx context.Context) (value interface{}, err error)) + } + if ok { + if value, err = f(ctx); err != nil { + return false, err + } + } + // DEL. + if duration < 0 || value == nil { + var delResult int64 + delResult, err = c.redis.Del(ctx, redisKey) + if err != nil { + return false, err + } + if delResult == 1 { + return true, err + } + return false, err + } + ok, err = c.redis.SetNX(ctx, redisKey, value) + if err != nil { + return ok, err + } + if ok && duration > 0 { + // Set the expiration. + _, err = c.redis.Expire(ctx, redisKey, int64(duration.Seconds())) + if err != nil { + return ok, err + } + return ok, err + } + return ok, err +} + +// SetIfNotExistFunc sets `key` with result of function `f` and returns true +// if `key` does not exist in the cache, or else it does nothing and returns false if `key` already exists. +// +// The parameter `value` can be type of `func() interface{}`, but it does nothing if its +// result is nil. +// +// It does not expire if `duration` == 0. +// It deletes the `key` if `duration` < 0 or given `value` is nil. +func (c *AdapterRedis) SetIfNotExistFunc(ctx context.Context, key interface{}, f Func, duration time.Duration) (ok bool, err error) { + value, err := f(ctx) + if err != nil { + return false, err + } + return c.SetIfNotExist(ctx, key, value, duration) +} + +// SetIfNotExistFuncLock sets `key` with result of function `f` and returns true +// if `key` does not exist in the cache, or else it does nothing and returns false if `key` already exists. +// +// It does not expire if `duration` == 0. +// It deletes the `key` if `duration` < 0 or given `value` is nil. +// +// Note that it differs from function `SetIfNotExistFunc` is that the function `f` is executed within +// writing mutex lock for concurrent safety purpose. +func (c *AdapterRedis) SetIfNotExistFuncLock(ctx context.Context, key interface{}, f Func, duration time.Duration) (ok bool, err error) { + value, err := f(ctx) + if err != nil { + return false, err + } + return c.SetIfNotExist(ctx, key, value, duration) +} + +// Get retrieves and returns the associated value of given . +// It returns nil if it does not exist or its value is nil. +func (c *AdapterRedis) Get(ctx context.Context, key interface{}) (*gvar.Var, error) { + return c.redis.Get(ctx, gconv.String(key)) +} + +// GetOrSet retrieves and returns the value of `key`, or sets `key`-`value` pair and +// returns `value` if `key` does not exist in the cache. The key-value pair expires +// after `duration`. +// +// It does not expire if `duration` == 0. +// It deletes the `key` if `duration` < 0 or given `value` is nil, but it does nothing +// if `value` is a function and the function result is nil. +func (c *AdapterRedis) GetOrSet(ctx context.Context, key interface{}, value interface{}, duration time.Duration) (result *gvar.Var, err error) { + result, err = c.Get(ctx, key) + if err != nil { + return nil, err + } + if result.IsNil() { + return gvar.New(value), c.Set(ctx, key, value, duration) + } + return +} + +// GetOrSetFunc retrieves and returns the value of `key`, or sets `key` with result of +// function `f` and returns its result if `key` does not exist in the cache. The key-value +// pair expires after `duration`. +// +// It does not expire if `duration` == 0. +// It deletes the `key` if `duration` < 0 or given `value` is nil, but it does nothing +// if `value` is a function and the function result is nil. +func (c *AdapterRedis) GetOrSetFunc(ctx context.Context, key interface{}, f Func, duration time.Duration) (result *gvar.Var, err error) { + v, err := c.Get(ctx, key) + if err != nil { + return nil, err + } + if v.IsNil() { + value, err := f(ctx) + if err != nil { + return nil, err + } + if value == nil { + return nil, nil + } + return gvar.New(value), c.Set(ctx, key, value, duration) + } else { + return v, nil + } +} + +// GetOrSetFuncLock retrieves and returns the value of `key`, or sets `key` with result of +// function `f` and returns its result if `key` does not exist in the cache. The key-value +// pair expires after `duration`. +// +// It does not expire if `duration` == 0. +// It deletes the `key` if `duration` < 0 or given `value` is nil, but it does nothing +// if `value` is a function and the function result is nil. +// +// Note that it differs from function `GetOrSetFunc` is that the function `f` is executed within +// writing mutex lock for concurrent safety purpose. +func (c *AdapterRedis) GetOrSetFuncLock(ctx context.Context, key interface{}, f Func, duration time.Duration) (result *gvar.Var, err error) { + return c.GetOrSetFunc(ctx, key, f, duration) +} + +// Contains checks and returns true if `key` exists in the cache, or else returns false. +func (c *AdapterRedis) Contains(ctx context.Context, key interface{}) (bool, error) { + n, err := c.redis.Exists(ctx, gconv.String(key)) + if err != nil { + return false, err + } + return n > 0, nil +} + +// Size returns the number of items in the cache. +func (c *AdapterRedis) Size(ctx context.Context) (size int, err error) { + n, err := c.redis.DBSize(ctx) + if err != nil { + return 0, err + } + return int(n), nil +} + +// Data returns a copy of all key-value pairs in the cache as map type. +// Note that this function may lead lots of memory usage, you can implement this function +// if necessary. +func (c *AdapterRedis) Data(ctx context.Context) (map[interface{}]interface{}, error) { + // Keys. + keys, err := c.redis.Keys(ctx, "*") + if err != nil { + return nil, err + } + // Key-Value pairs. + var m map[string]*gvar.Var + m, err = c.redis.MGet(ctx, keys...) + if err != nil { + return nil, err + } + // Type converting. + data := make(map[interface{}]interface{}) + for k, v := range m { + data[k] = v.Val() + } + return data, nil +} + +// Keys returns all keys in the cache as slice. +func (c *AdapterRedis) Keys(ctx context.Context) ([]interface{}, error) { + keys, err := c.redis.Keys(ctx, "*") + if err != nil { + return nil, err + } + return gconv.Interfaces(keys), nil +} + +// Values returns all values in the cache as slice. +func (c *AdapterRedis) Values(ctx context.Context) ([]interface{}, error) { + // Keys. + keys, err := c.redis.Keys(ctx, "*") + if err != nil { + return nil, err + } + // Key-Value pairs. + var m map[string]*gvar.Var + m, err = c.redis.MGet(ctx, keys...) + if err != nil { + return nil, err + } + // Values. + var values []interface{} + for _, key := range keys { + if v := m[key]; !v.IsNil() { + values = append(values, v.Val()) + } + } + return values, nil +} + +// Update updates the value of `key` without changing its expiration and returns the old value. +// The returned value `exist` is false if the `key` does not exist in the cache. +// +// It deletes the `key` if given `value` is nil. +// It does nothing if `key` does not exist in the cache. +func (c *AdapterRedis) Update(ctx context.Context, key interface{}, value interface{}) (oldValue *gvar.Var, exist bool, err error) { + var ( + v *gvar.Var + oldTTL int64 + redisKey = gconv.String(key) + ) + // TTL. + oldTTL, err = c.redis.TTL(ctx, redisKey) + if err != nil { + return + } + if oldTTL == -2 { + // It does not exist. + return + } + // Check existence. + v, err = c.redis.Get(ctx, redisKey) + if err != nil { + return + } + oldValue = v + // DEL. + if value == nil { + _, err = c.redis.Del(ctx, redisKey) + if err != nil { + return + } + return + } + // Update the value. + if oldTTL == -1 { + _, err = c.redis.Set(ctx, redisKey, value) + } else { + err = c.redis.SetEX(ctx, redisKey, value, oldTTL) + } + return oldValue, true, err +} + +// UpdateExpire updates the expiration of `key` and returns the old expiration duration value. +// +// It returns -1 and does nothing if the `key` does not exist in the cache. +// It deletes the `key` if `duration` < 0. +func (c *AdapterRedis) UpdateExpire(ctx context.Context, key interface{}, duration time.Duration) (oldDuration time.Duration, err error) { + var ( + v *gvar.Var + oldTTL int64 + redisKey = gconv.String(key) + ) + // TTL. + oldTTL, err = c.redis.TTL(ctx, redisKey) + if err != nil { + return + } + if oldTTL == -2 { + // It does not exist. + oldTTL = -1 + return + } + oldDuration = time.Duration(oldTTL) * time.Second + // DEL. + if duration < 0 { + _, err = c.redis.Del(ctx, redisKey) + return + } + // Update the expiration. + if duration > 0 { + _, err = c.redis.Expire(ctx, redisKey, int64(duration.Seconds())) + } + // No expire. + if duration == 0 { + v, err = c.redis.Get(ctx, redisKey) + if err != nil { + return + } + _, err = c.redis.Set(ctx, redisKey, v.Val()) + } + return +} + +// GetExpire retrieves and returns the expiration of `key` in the cache. +// +// Note that, +// It returns 0 if the `key` does not expire. +// It returns -1 if the `key` does not exist in the cache. +func (c *AdapterRedis) GetExpire(ctx context.Context, key interface{}) (time.Duration, error) { + ttl, err := c.redis.TTL(ctx, gconv.String(key)) + if err != nil { + return 0, err + } + switch ttl { + case -1: + return 0, nil + case -2: + return -1, nil + default: + return time.Duration(ttl) * time.Second, nil + } +} + +// Remove deletes the one or more keys from cache, and returns its value. +// If multiple keys are given, it returns the value of the deleted last item. +func (c *AdapterRedis) Remove(ctx context.Context, keys ...interface{}) (lastValue *gvar.Var, err error) { + if len(keys) == 0 { + return nil, nil + } + // Retrieves the last key value. + if lastValue, err = c.redis.Get(ctx, gconv.String(keys[len(keys)-1])); err != nil { + return nil, err + } + // Deletes all given keys. + _, err = c.redis.Del(ctx, gconv.Strings(keys)...) + return +} + +// Clear clears all data of the cache. +// Note that this function is sensitive and should be carefully used. +// It uses `FLUSHDB` command in redis server, which might be disabled in server. +func (c *AdapterRedis) Clear(ctx context.Context) (err error) { + // The "FLUSHDB" may not be available. + err = c.redis.FlushDB(ctx) + return +} + +// Close closes the cache. +func (c *AdapterRedis) Close(ctx context.Context) error { + // It does nothing. + return nil +} diff --git a/vendor/github.com/gogf/gf/v2/os/gcache/gcache_cache.go b/vendor/github.com/gogf/gf/v2/os/gcache/gcache_cache.go new file mode 100644 index 00000000..9a039457 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gcache/gcache_cache.go @@ -0,0 +1,70 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gcache + +import ( + "context" + "time" + + "github.com/gogf/gf/v2/os/gtimer" + "github.com/gogf/gf/v2/util/gconv" +) + +// Cache struct. +type Cache struct { + localAdapter +} + +// localAdapter is alias of Adapter, for embedded attribute purpose only. +type localAdapter = Adapter + +// New creates and returns a new cache object using default memory adapter. +// Note that the LRU feature is only available using memory adapter. +func New(lruCap ...int) *Cache { + memAdapter := NewAdapterMemory(lruCap...) + c := &Cache{ + localAdapter: memAdapter, + } + // Here may be a "timer leak" if adapter is manually changed from memory adapter. + // Do not worry about this, as adapter is less changed, and it does nothing if it's not used. + gtimer.AddSingleton(context.Background(), time.Second, memAdapter.(*AdapterMemory).syncEventAndClearExpired) + return c +} + +// NewWithAdapter creates and returns a Cache object with given Adapter implements. +func NewWithAdapter(adapter Adapter) *Cache { + return &Cache{ + localAdapter: adapter, + } +} + +// SetAdapter changes the adapter for this cache. +// Be very note that, this setting function is not concurrent-safe, which means you should not call +// this setting function concurrently in multiple goroutines. +func (c *Cache) SetAdapter(adapter Adapter) { + c.localAdapter = adapter +} + +// GetAdapter returns the adapter that is set in current Cache. +func (c *Cache) GetAdapter() Adapter { + return c.localAdapter +} + +// Removes deletes `keys` in the cache. +func (c *Cache) Removes(ctx context.Context, keys []interface{}) error { + _, err := c.Remove(ctx, keys...) + return err +} + +// KeyStrings returns all keys in the cache as string slice. +func (c *Cache) KeyStrings(ctx context.Context) ([]string, error) { + keys, err := c.Keys(ctx) + if err != nil { + return nil, err + } + return gconv.Strings(keys), nil +} diff --git a/vendor/github.com/gogf/gf/v2/os/gcache/gcache_cache_must.go b/vendor/github.com/gogf/gf/v2/os/gcache/gcache_cache_must.go new file mode 100644 index 00000000..65961a00 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gcache/gcache_cache_must.go @@ -0,0 +1,113 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gcache + +import ( + "context" + "time" + + "github.com/gogf/gf/v2/container/gvar" +) + +// MustGet acts like Get, but it panics if any error occurs. +func (c *Cache) MustGet(ctx context.Context, key interface{}) *gvar.Var { + v, err := c.Get(ctx, key) + if err != nil { + panic(err) + } + return v +} + +// MustGetOrSet acts like GetOrSet, but it panics if any error occurs. +func (c *Cache) MustGetOrSet(ctx context.Context, key interface{}, value interface{}, duration time.Duration) *gvar.Var { + v, err := c.GetOrSet(ctx, key, value, duration) + if err != nil { + panic(err) + } + return v +} + +// MustGetOrSetFunc acts like GetOrSetFunc, but it panics if any error occurs. +func (c *Cache) MustGetOrSetFunc(ctx context.Context, key interface{}, f Func, duration time.Duration) *gvar.Var { + v, err := c.GetOrSetFunc(ctx, key, f, duration) + if err != nil { + panic(err) + } + return v +} + +// MustGetOrSetFuncLock acts like GetOrSetFuncLock, but it panics if any error occurs. +func (c *Cache) MustGetOrSetFuncLock(ctx context.Context, key interface{}, f Func, duration time.Duration) *gvar.Var { + v, err := c.GetOrSetFuncLock(ctx, key, f, duration) + if err != nil { + panic(err) + } + return v +} + +// MustContains acts like Contains, but it panics if any error occurs. +func (c *Cache) MustContains(ctx context.Context, key interface{}) bool { + v, err := c.Contains(ctx, key) + if err != nil { + panic(err) + } + return v +} + +// MustGetExpire acts like GetExpire, but it panics if any error occurs. +func (c *Cache) MustGetExpire(ctx context.Context, key interface{}) time.Duration { + v, err := c.GetExpire(ctx, key) + if err != nil { + panic(err) + } + return v +} + +// MustSize acts like Size, but it panics if any error occurs. +func (c *Cache) MustSize(ctx context.Context) int { + v, err := c.Size(ctx) + if err != nil { + panic(err) + } + return v +} + +// MustData acts like Data, but it panics if any error occurs. +func (c *Cache) MustData(ctx context.Context) map[interface{}]interface{} { + v, err := c.Data(ctx) + if err != nil { + panic(err) + } + return v +} + +// MustKeys acts like Keys, but it panics if any error occurs. +func (c *Cache) MustKeys(ctx context.Context) []interface{} { + v, err := c.Keys(ctx) + if err != nil { + panic(err) + } + return v +} + +// MustKeyStrings acts like KeyStrings, but it panics if any error occurs. +func (c *Cache) MustKeyStrings(ctx context.Context) []string { + v, err := c.KeyStrings(ctx) + if err != nil { + panic(err) + } + return v +} + +// MustValues acts like Values, but it panics if any error occurs. +func (c *Cache) MustValues(ctx context.Context) []interface{} { + v, err := c.Values(ctx) + if err != nil { + panic(err) + } + return v +} diff --git a/vendor/github.com/gogf/gf/v2/os/gcron/gcron.go b/vendor/github.com/gogf/gf/v2/os/gcron/gcron.go new file mode 100644 index 00000000..05e834d3 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gcron/gcron.go @@ -0,0 +1,122 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package gcron implements a cron pattern parser and job runner. +package gcron + +import ( + "context" + "time" + + "github.com/gogf/gf/v2/os/glog" + "github.com/gogf/gf/v2/os/gtimer" +) + +const ( + StatusReady = gtimer.StatusReady + StatusRunning = gtimer.StatusRunning + StatusStopped = gtimer.StatusStopped + StatusClosed = gtimer.StatusClosed +) + +var ( + // Default cron object. + defaultCron = New() +) + +// SetLogger sets the logger for cron. +func SetLogger(logger glog.ILogger) { + defaultCron.SetLogger(logger) +} + +// GetLogger returns the logger in the cron. +func GetLogger() glog.ILogger { + return defaultCron.GetLogger() +} + +// Add adds a timed task to default cron object. +// A unique `name` can be bound with the timed task. +// It returns and error if the `name` is already used. +func Add(ctx context.Context, pattern string, job JobFunc, name ...string) (*Entry, error) { + return defaultCron.Add(ctx, pattern, job, name...) +} + +// AddSingleton adds a singleton timed task, to default cron object. +// A singleton timed task is that can only be running one single instance at the same time. +// A unique `name` can be bound with the timed task. +// It returns and error if the `name` is already used. +func AddSingleton(ctx context.Context, pattern string, job JobFunc, name ...string) (*Entry, error) { + return defaultCron.AddSingleton(ctx, pattern, job, name...) +} + +// AddOnce adds a timed task which can be run only once, to default cron object. +// A unique `name` can be bound with the timed task. +// It returns and error if the `name` is already used. +func AddOnce(ctx context.Context, pattern string, job JobFunc, name ...string) (*Entry, error) { + return defaultCron.AddOnce(ctx, pattern, job, name...) +} + +// AddTimes adds a timed task which can be run specified times, to default cron object. +// A unique `name` can be bound with the timed task. +// It returns and error if the `name` is already used. +func AddTimes(ctx context.Context, pattern string, times int, job JobFunc, name ...string) (*Entry, error) { + return defaultCron.AddTimes(ctx, pattern, times, job, name...) +} + +// DelayAdd adds a timed task to default cron object after `delay` time. +func DelayAdd(ctx context.Context, delay time.Duration, pattern string, job JobFunc, name ...string) { + defaultCron.DelayAdd(ctx, delay, pattern, job, name...) +} + +// DelayAddSingleton adds a singleton timed task after `delay` time to default cron object. +func DelayAddSingleton(ctx context.Context, delay time.Duration, pattern string, job JobFunc, name ...string) { + defaultCron.DelayAddSingleton(ctx, delay, pattern, job, name...) +} + +// DelayAddOnce adds a timed task after `delay` time to default cron object. +// This timed task can be run only once. +func DelayAddOnce(ctx context.Context, delay time.Duration, pattern string, job JobFunc, name ...string) { + defaultCron.DelayAddOnce(ctx, delay, pattern, job, name...) +} + +// DelayAddTimes adds a timed task after `delay` time to default cron object. +// This timed task can be run specified times. +func DelayAddTimes(ctx context.Context, delay time.Duration, pattern string, times int, job JobFunc, name ...string) { + defaultCron.DelayAddTimes(ctx, delay, pattern, times, job, name...) +} + +// Search returns a scheduled task with the specified `name`. +// It returns nil if no found. +func Search(name string) *Entry { + return defaultCron.Search(name) +} + +// Remove deletes scheduled task which named `name`. +func Remove(name string) { + defaultCron.Remove(name) +} + +// Size returns the size of the timed tasks of default cron. +func Size() int { + return defaultCron.Size() +} + +// Entries return all timed tasks as slice. +func Entries() []*Entry { + return defaultCron.Entries() +} + +// Start starts running the specified timed task named `name`. +// If no`name` specified, it starts the entire cron. +func Start(name ...string) { + defaultCron.Start(name...) +} + +// Stop stops running the specified timed task named `name`. +// If no`name` specified, it stops the entire cron. +func Stop(name ...string) { + defaultCron.Stop(name...) +} diff --git a/vendor/github.com/gogf/gf/v2/os/gcron/gcron_cron.go b/vendor/github.com/gogf/gf/v2/os/gcron/gcron_cron.go new file mode 100644 index 00000000..1a83f57b --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gcron/gcron_cron.go @@ -0,0 +1,221 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gcron + +import ( + "context" + "time" + + "github.com/gogf/gf/v2/container/garray" + "github.com/gogf/gf/v2/container/gmap" + "github.com/gogf/gf/v2/container/gtype" + "github.com/gogf/gf/v2/os/glog" + "github.com/gogf/gf/v2/os/gtimer" +) + +type Cron struct { + idGen *gtype.Int64 // Used for unique name generation. + status *gtype.Int // Timed task status(0: Not Start; 1: Running; 2: Stopped; -1: Closed) + entries *gmap.StrAnyMap // All timed task entries. + logger glog.ILogger // Logger, it is nil in default. +} + +// New returns a new Cron object with default settings. +func New() *Cron { + return &Cron{ + idGen: gtype.NewInt64(), + status: gtype.NewInt(StatusRunning), + entries: gmap.NewStrAnyMap(true), + } +} + +// SetLogger sets the logger for cron. +func (c *Cron) SetLogger(logger glog.ILogger) { + c.logger = logger +} + +// GetLogger returns the logger in the cron. +func (c *Cron) GetLogger() glog.ILogger { + return c.logger +} + +// AddEntry creates and returns a new Entry object. +func (c *Cron) AddEntry(ctx context.Context, pattern string, job JobFunc, times int, isSingleton bool, name ...string) (*Entry, error) { + var ( + entryName = "" + infinite = false + ) + if len(name) > 0 { + entryName = name[0] + } + if times <= 0 { + infinite = true + } + return c.doAddEntry(doAddEntryInput{ + Name: entryName, + Job: job, + Ctx: ctx, + Times: times, + Pattern: pattern, + IsSingleton: isSingleton, + Infinite: infinite, + }) +} + +// Add adds a timed task. +// A unique `name` can be bound with the timed task. +// It returns and error if the `name` is already used. +func (c *Cron) Add(ctx context.Context, pattern string, job JobFunc, name ...string) (*Entry, error) { + return c.AddEntry(ctx, pattern, job, -1, false, name...) +} + +// AddSingleton adds a singleton timed task. +// A singleton timed task is that can only be running one single instance at the same time. +// A unique `name` can be bound with the timed task. +// It returns and error if the `name` is already used. +func (c *Cron) AddSingleton(ctx context.Context, pattern string, job JobFunc, name ...string) (*Entry, error) { + return c.AddEntry(ctx, pattern, job, -1, true, name...) +} + +// AddTimes adds a timed task which can be run specified times. +// A unique `name` can be bound with the timed task. +// It returns and error if the `name` is already used. +func (c *Cron) AddTimes(ctx context.Context, pattern string, times int, job JobFunc, name ...string) (*Entry, error) { + return c.AddEntry(ctx, pattern, job, times, false, name...) +} + +// AddOnce adds a timed task which can be run only once. +// A unique `name` can be bound with the timed task. +// It returns and error if the `name` is already used. +func (c *Cron) AddOnce(ctx context.Context, pattern string, job JobFunc, name ...string) (*Entry, error) { + return c.AddEntry(ctx, pattern, job, 1, false, name...) +} + +// DelayAddEntry adds a timed task after `delay` time. +func (c *Cron) DelayAddEntry(ctx context.Context, delay time.Duration, pattern string, job JobFunc, times int, isSingleton bool, name ...string) { + gtimer.AddOnce(ctx, delay, func(ctx context.Context) { + if _, err := c.AddEntry(ctx, pattern, job, times, isSingleton, name...); err != nil { + panic(err) + } + }) +} + +// DelayAdd adds a timed task after `delay` time. +func (c *Cron) DelayAdd(ctx context.Context, delay time.Duration, pattern string, job JobFunc, name ...string) { + gtimer.AddOnce(ctx, delay, func(ctx context.Context) { + if _, err := c.Add(ctx, pattern, job, name...); err != nil { + panic(err) + } + }) +} + +// DelayAddSingleton adds a singleton timed task after `delay` time. +func (c *Cron) DelayAddSingleton(ctx context.Context, delay time.Duration, pattern string, job JobFunc, name ...string) { + gtimer.AddOnce(ctx, delay, func(ctx context.Context) { + if _, err := c.AddSingleton(ctx, pattern, job, name...); err != nil { + panic(err) + } + }) +} + +// DelayAddOnce adds a timed task after `delay` time. +// This timed task can be run only once. +func (c *Cron) DelayAddOnce(ctx context.Context, delay time.Duration, pattern string, job JobFunc, name ...string) { + gtimer.AddOnce(ctx, delay, func(ctx context.Context) { + if _, err := c.AddOnce(ctx, pattern, job, name...); err != nil { + panic(err) + } + }) +} + +// DelayAddTimes adds a timed task after `delay` time. +// This timed task can be run specified times. +func (c *Cron) DelayAddTimes(ctx context.Context, delay time.Duration, pattern string, times int, job JobFunc, name ...string) { + gtimer.AddOnce(ctx, delay, func(ctx context.Context) { + if _, err := c.AddTimes(ctx, pattern, times, job, name...); err != nil { + panic(err) + } + }) +} + +// Search returns a scheduled task with the specified `name`. +// It returns nil if not found. +func (c *Cron) Search(name string) *Entry { + if v := c.entries.Get(name); v != nil { + return v.(*Entry) + } + return nil +} + +// Start starts running the specified timed task named `name`. +// If no`name` specified, it starts the entire cron. +func (c *Cron) Start(name ...string) { + if len(name) > 0 { + for _, v := range name { + if entry := c.Search(v); entry != nil { + entry.Start() + } + } + } else { + c.status.Set(StatusReady) + } +} + +// Stop stops running the specified timed task named `name`. +// If no`name` specified, it stops the entire cron. +func (c *Cron) Stop(name ...string) { + if len(name) > 0 { + for _, v := range name { + if entry := c.Search(v); entry != nil { + entry.Stop() + } + } + } else { + c.status.Set(StatusStopped) + } +} + +// Remove deletes scheduled task which named `name`. +func (c *Cron) Remove(name string) { + if v := c.entries.Get(name); v != nil { + v.(*Entry).Close() + } +} + +// Close stops and closes current cron. +func (c *Cron) Close() { + c.status.Set(StatusClosed) +} + +// Size returns the size of the timed tasks. +func (c *Cron) Size() int { + return c.entries.Size() +} + +// Entries return all timed tasks as slice(order by registered time asc). +func (c *Cron) Entries() []*Entry { + array := garray.NewSortedArraySize(c.entries.Size(), func(v1, v2 interface{}) int { + entry1 := v1.(*Entry) + entry2 := v2.(*Entry) + if entry1.Time.Nanosecond() > entry2.Time.Nanosecond() { + return 1 + } + return -1 + }, true) + c.entries.RLockFunc(func(m map[string]interface{}) { + for _, v := range m { + array.Add(v.(*Entry)) + } + }) + entries := make([]*Entry, array.Len()) + array.RLockFunc(func(array []interface{}) { + for k, v := range array { + entries[k] = v.(*Entry) + } + }) + return entries +} diff --git a/vendor/github.com/gogf/gf/v2/os/gcron/gcron_entry.go b/vendor/github.com/gogf/gf/v2/os/gcron/gcron_entry.go new file mode 100644 index 00000000..878f2fa8 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gcron/gcron_entry.go @@ -0,0 +1,195 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gcron + +import ( + "context" + "fmt" + "reflect" + "runtime" + "time" + + "github.com/gogf/gf/v2/container/gtype" + "github.com/gogf/gf/v2/errors/gcode" + "github.com/gogf/gf/v2/errors/gerror" + "github.com/gogf/gf/v2/os/glog" + "github.com/gogf/gf/v2/os/gtimer" + "github.com/gogf/gf/v2/util/gconv" +) + +// JobFunc is the timing called job function in cron. +type JobFunc = gtimer.JobFunc + +// Entry is timing task entry. +type Entry struct { + cron *Cron // Cron object belonged to. + timerEntry *gtimer.Entry // Associated timer Entry. + schedule *cronSchedule // Timed schedule object. + jobName string // Callback function name(address info). + times *gtype.Int // Running times limit. + infinite *gtype.Bool // No times limit. + Name string // Entry name. + Job JobFunc `json:"-"` // Callback function. + Time time.Time // Registered time. +} + +type doAddEntryInput struct { + Name string // Name names this entry for manual control. + Job JobFunc // Job is the callback function for timed task execution. + Ctx context.Context // The context for the job. + Times int // Times specifies the running limit times for the entry. + Pattern string // Pattern is the crontab style string for scheduler. + IsSingleton bool // Singleton specifies whether timed task executing in singleton mode. + Infinite bool // Infinite specifies whether this entry is running with no times limit. +} + +// doAddEntry creates and returns a new Entry object. +func (c *Cron) doAddEntry(in doAddEntryInput) (*Entry, error) { + if in.Name != "" { + if c.Search(in.Name) != nil { + return nil, gerror.NewCodef(gcode.CodeInvalidOperation, `cron job "%s" already exists`, in.Name) + } + } + schedule, err := newSchedule(in.Pattern) + if err != nil { + return nil, err + } + // No limit for `times`, for timer checking scheduling every second. + entry := &Entry{ + cron: c, + schedule: schedule, + jobName: runtime.FuncForPC(reflect.ValueOf(in.Job).Pointer()).Name(), + times: gtype.NewInt(in.Times), + infinite: gtype.NewBool(in.Infinite), + Job: in.Job, + Time: time.Now(), + } + if in.Name != "" { + entry.Name = in.Name + } else { + entry.Name = "cron-" + gconv.String(c.idGen.Add(1)) + } + // When you add a scheduled task, you cannot allow it to run. + // It cannot start running when added to timer. + // It should start running after the entry is added to the Cron entries map, to avoid the task + // from running during adding where the entries do not have the entry information, which might cause panic. + entry.timerEntry = gtimer.AddEntry( + in.Ctx, + time.Second, + entry.checkAndRun, + in.IsSingleton, + -1, + gtimer.StatusStopped, + ) + c.entries.Set(entry.Name, entry) + entry.timerEntry.Start() + return entry, nil +} + +// IsSingleton return whether this entry is a singleton timed task. +func (entry *Entry) IsSingleton() bool { + return entry.timerEntry.IsSingleton() +} + +// SetSingleton sets the entry running in singleton mode. +func (entry *Entry) SetSingleton(enabled bool) { + entry.timerEntry.SetSingleton(enabled) +} + +// SetTimes sets the times which the entry can run. +func (entry *Entry) SetTimes(times int) { + entry.times.Set(times) + entry.infinite.Set(false) +} + +// Status returns the status of entry. +func (entry *Entry) Status() int { + return entry.timerEntry.Status() +} + +// SetStatus sets the status of the entry. +func (entry *Entry) SetStatus(status int) int { + return entry.timerEntry.SetStatus(status) +} + +// Start starts running the entry. +func (entry *Entry) Start() { + entry.timerEntry.Start() +} + +// Stop stops running the entry. +func (entry *Entry) Stop() { + entry.timerEntry.Stop() +} + +// Close stops and removes the entry from cron. +func (entry *Entry) Close() { + entry.cron.entries.Remove(entry.Name) + entry.timerEntry.Close() +} + +// checkAndRun is the core timing task check logic. +func (entry *Entry) checkAndRun(ctx context.Context) { + currentTime := time.Now() + if !entry.schedule.checkMeetAndUpdateLastSeconds(ctx, currentTime) { + return + } + switch entry.cron.status.Val() { + case StatusStopped: + return + + case StatusClosed: + entry.logDebugf(ctx, `cron job "%s" is removed`, entry.getJobNameWithPattern()) + entry.Close() + + case StatusReady, StatusRunning: + defer func() { + if exception := recover(); exception != nil { + // Exception caught, it logs the error content to logger in default behavior. + entry.logErrorf(ctx, + `cron job "%s(%s)" end with error: %+v`, + entry.jobName, entry.schedule.pattern, exception, + ) + } else { + entry.logDebugf(ctx, `cron job "%s" ends`, entry.getJobNameWithPattern()) + } + if entry.timerEntry.Status() == StatusClosed { + entry.Close() + } + }() + + // Running times check. + if !entry.infinite.Val() { + times := entry.times.Add(-1) + if times <= 0 { + if entry.timerEntry.SetStatus(StatusClosed) == StatusClosed || times < 0 { + return + } + } + } + entry.logDebugf(ctx, `cron job "%s" starts`, entry.getJobNameWithPattern()) + entry.Job(ctx) + } +} + +func (entry *Entry) getJobNameWithPattern() string { + return fmt.Sprintf(`%s(%s)`, entry.jobName, entry.schedule.pattern) +} + +func (entry *Entry) logDebugf(ctx context.Context, format string, v ...interface{}) { + if logger := entry.cron.GetLogger(); logger != nil { + logger.Debugf(ctx, format, v...) + } +} + +func (entry *Entry) logErrorf(ctx context.Context, format string, v ...interface{}) { + logger := entry.cron.GetLogger() + if logger == nil { + logger = glog.DefaultLogger() + } + logger.Errorf(ctx, format, v...) +} diff --git a/vendor/github.com/gogf/gf/v2/os/gcron/gcron_schedule.go b/vendor/github.com/gogf/gf/v2/os/gcron/gcron_schedule.go new file mode 100644 index 00000000..abeb345e --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gcron/gcron_schedule.go @@ -0,0 +1,412 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gcron + +import ( + "context" + "strconv" + "strings" + "time" + + "github.com/gogf/gf/v2/container/gtype" + "github.com/gogf/gf/v2/errors/gcode" + "github.com/gogf/gf/v2/errors/gerror" + "github.com/gogf/gf/v2/os/gtime" + "github.com/gogf/gf/v2/text/gregex" +) + +// cronSchedule is the schedule for cron job. +type cronSchedule struct { + createTimestamp int64 // Created timestamp in seconds. + everySeconds int64 // Running interval in seconds. + pattern string // The raw cron pattern string. + secondMap map[int]struct{} // Job can run in these second numbers. + minuteMap map[int]struct{} // Job can run in these minute numbers. + hourMap map[int]struct{} // Job can run in these hour numbers. + dayMap map[int]struct{} // Job can run in these day numbers. + weekMap map[int]struct{} // Job can run in these week numbers. + monthMap map[int]struct{} // Job can run in these moth numbers. + lastTimestamp *gtype.Int64 // Last timestamp number, for timestamp fix in some delay. +} + +const ( + // regular expression for cron pattern, which contains 6 parts of time units. + regexForCron = `^([\-/\d\*\?,]+)\s+([\-/\d\*\?,]+)\s+([\-/\d\*\?,]+)\s+([\-/\d\*\?,]+)\s+([\-/\d\*\?,A-Za-z]+)\s+([\-/\d\*\?,A-Za-z]+)$` + patternItemTypeUnknown = iota + patternItemTypeWeek + patternItemTypeMonth +) + +var ( + // Predefined pattern map. + predefinedPatternMap = map[string]string{ + "@yearly": "0 0 0 1 1 *", + "@annually": "0 0 0 1 1 *", + "@monthly": "0 0 0 1 * *", + "@weekly": "0 0 0 * * 0", + "@daily": "0 0 0 * * *", + "@midnight": "0 0 0 * * *", + "@hourly": "0 0 * * * *", + } + // Short month name to its number. + monthShortNameMap = map[string]int{ + "jan": 1, + "feb": 2, + "mar": 3, + "apr": 4, + "may": 5, + "jun": 6, + "jul": 7, + "aug": 8, + "sep": 9, + "oct": 10, + "nov": 11, + "dec": 12, + } + // Full month name to its number. + monthFullNameMap = map[string]int{ + "january": 1, + "february": 2, + "march": 3, + "april": 4, + "may": 5, + "june": 6, + "july": 7, + "august": 8, + "september": 9, + "october": 10, + "november": 11, + "december": 12, + } + // Short week name to its number. + weekShortNameMap = map[string]int{ + "sun": 0, + "mon": 1, + "tue": 2, + "wed": 3, + "thu": 4, + "fri": 5, + "sat": 6, + } + // Full week name to its number. + weekFullNameMap = map[string]int{ + "sunday": 0, + "monday": 1, + "tuesday": 2, + "wednesday": 3, + "thursday": 4, + "friday": 5, + "saturday": 6, + } +) + +// newSchedule creates and returns a schedule object for given cron pattern. +func newSchedule(pattern string) (*cronSchedule, error) { + var currentTimestamp = time.Now().Unix() + // Check if the predefined patterns. + if match, _ := gregex.MatchString(`(@\w+)\s*(\w*)\s*`, pattern); len(match) > 0 { + key := strings.ToLower(match[1]) + if v, ok := predefinedPatternMap[key]; ok { + pattern = v + } else if strings.Compare(key, "@every") == 0 { + d, err := gtime.ParseDuration(match[2]) + if err != nil { + return nil, err + } + return &cronSchedule{ + createTimestamp: currentTimestamp, + everySeconds: int64(d.Seconds()), + pattern: pattern, + lastTimestamp: gtype.NewInt64(currentTimestamp), + }, nil + } else { + return nil, gerror.NewCodef(gcode.CodeInvalidParameter, `invalid pattern: "%s"`, pattern) + } + } + // Handle the common cron pattern, like: + // 0 0 0 1 1 2 + if match, _ := gregex.MatchString(regexForCron, pattern); len(match) == 7 { + schedule := &cronSchedule{ + createTimestamp: currentTimestamp, + everySeconds: 0, + pattern: pattern, + lastTimestamp: gtype.NewInt64(currentTimestamp), + } + // Second. + if m, err := parsePatternItem(match[1], 0, 59, false); err != nil { + return nil, err + } else { + schedule.secondMap = m + } + // Minute. + if m, err := parsePatternItem(match[2], 0, 59, false); err != nil { + return nil, err + } else { + schedule.minuteMap = m + } + // Hour. + if m, err := parsePatternItem(match[3], 0, 23, false); err != nil { + return nil, err + } else { + schedule.hourMap = m + } + // Day. + if m, err := parsePatternItem(match[4], 1, 31, true); err != nil { + return nil, err + } else { + schedule.dayMap = m + } + // Month. + if m, err := parsePatternItem(match[5], 1, 12, false); err != nil { + return nil, err + } else { + schedule.monthMap = m + } + // Week. + if m, err := parsePatternItem(match[6], 0, 6, true); err != nil { + return nil, err + } else { + schedule.weekMap = m + } + return schedule, nil + } + return nil, gerror.NewCodef(gcode.CodeInvalidParameter, `invalid pattern: "%s"`, pattern) +} + +// parsePatternItem parses every item in the pattern and returns the result as map, which is used for indexing. +func parsePatternItem(item string, min int, max int, allowQuestionMark bool) (map[int]struct{}, error) { + m := make(map[int]struct{}, max-min+1) + if item == "*" || (allowQuestionMark && item == "?") { + for i := min; i <= max; i++ { + m[i] = struct{}{} + } + return m, nil + } + // Like: MON,FRI + for _, itemElem := range strings.Split(item, ",") { + var ( + interval = 1 + intervalArray = strings.Split(itemElem, "/") + ) + if len(intervalArray) == 2 { + if number, err := strconv.Atoi(intervalArray[1]); err != nil { + return nil, gerror.NewCodef(gcode.CodeInvalidParameter, `invalid pattern item: "%s"`, itemElem) + } else { + interval = number + } + } + var ( + rangeMin = min + rangeMax = max + itemType = patternItemTypeUnknown + rangeArray = strings.Split(intervalArray[0], "-") // Like: 1-30, JAN-DEC + ) + switch max { + case 6: + // It's checking week field. + itemType = patternItemTypeWeek + + case 12: + // It's checking month field. + itemType = patternItemTypeMonth + } + // Eg: */5 + if rangeArray[0] != "*" { + if number, err := parsePatternItemValue(rangeArray[0], itemType); err != nil { + return nil, gerror.NewCodef(gcode.CodeInvalidParameter, `invalid pattern item: "%s"`, itemElem) + } else { + rangeMin = number + if len(intervalArray) == 1 { + rangeMax = number + } + } + } + if len(rangeArray) == 2 { + if number, err := parsePatternItemValue(rangeArray[1], itemType); err != nil { + return nil, gerror.NewCodef(gcode.CodeInvalidParameter, `invalid pattern item: "%s"`, itemElem) + } else { + rangeMax = number + } + } + for i := rangeMin; i <= rangeMax; i += interval { + m[i] = struct{}{} + } + } + return m, nil +} + +// parsePatternItemValue parses the field value to a number according to its field type. +func parsePatternItemValue(value string, itemType int) (int, error) { + if gregex.IsMatchString(`^\d+$`, value) { + // It is pure number. + if number, err := strconv.Atoi(value); err == nil { + return number, nil + } + } else { + // Check if it contains letter, + // it converts the value to number according to predefined map. + switch itemType { + case patternItemTypeWeek: + if number, ok := weekShortNameMap[strings.ToLower(value)]; ok { + return number, nil + } + if number, ok := weekFullNameMap[strings.ToLower(value)]; ok { + return number, nil + } + case patternItemTypeMonth: + if number, ok := monthShortNameMap[strings.ToLower(value)]; ok { + return number, nil + } + if number, ok := monthFullNameMap[strings.ToLower(value)]; ok { + return number, nil + } + } + } + return 0, gerror.NewCodef(gcode.CodeInvalidParameter, `invalid pattern value: "%s"`, value) +} + +// checkMeetAndUpdateLastSeconds checks if the given time `t` meets the runnable point for the job. +func (s *cronSchedule) checkMeetAndUpdateLastSeconds(ctx context.Context, t time.Time) bool { + var ( + lastTimestamp = s.getAndUpdateLastTimestamp(ctx, t) + lastTime = gtime.NewFromTimeStamp(lastTimestamp) + ) + + if s.everySeconds != 0 { + // It checks using interval. + secondsAfterCreated := lastTime.Timestamp() - s.createTimestamp + if secondsAfterCreated > 0 { + return secondsAfterCreated%s.everySeconds == 0 + } + return false + } + + // It checks using normal cron pattern. + if _, ok := s.secondMap[lastTime.Second()]; !ok { + return false + } + if _, ok := s.minuteMap[lastTime.Minute()]; !ok { + return false + } + if _, ok := s.hourMap[lastTime.Hour()]; !ok { + return false + } + if _, ok := s.dayMap[lastTime.Day()]; !ok { + return false + } + if _, ok := s.monthMap[lastTime.Month()]; !ok { + return false + } + if _, ok := s.weekMap[int(lastTime.Weekday())]; !ok { + return false + } + return true +} + +// Next returns the next time this schedule is activated, greater than the given +// time. If no time can be found to satisfy the schedule, return the zero time. +func (s *cronSchedule) Next(t time.Time) time.Time { + if s.everySeconds != 0 { + var ( + diff = t.Unix() - s.createTimestamp + count = diff/s.everySeconds + 1 + ) + return t.Add(time.Duration(count*s.everySeconds) * time.Second) + } + + // Start at the earliest possible time (the upcoming second). + t = t.Add(1*time.Second - time.Duration(t.Nanosecond())*time.Nanosecond) + var ( + loc = t.Location() + added = false + yearLimit = t.Year() + 5 + ) + +WRAP: + if t.Year() > yearLimit { + return t // who will care the job that run in five years later + } + + for !s.match(s.monthMap, int(t.Month())) { + if !added { + added = true + t = time.Date(t.Year(), t.Month(), 1, 0, 0, 0, 0, loc) + } + t = t.AddDate(0, 1, 0) + // need recheck + if t.Month() == time.January { + goto WRAP + } + } + + for !s.dayMatches(t) { + if !added { + added = true + t = time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, loc) + } + t = t.AddDate(0, 0, 1) + + // Notice if the hour is no longer midnight due to DST. + // Add an hour if it's 23, subtract an hour if it's 1. + if t.Hour() != 0 { + if t.Hour() > 12 { + t = t.Add(time.Duration(24-t.Hour()) * time.Hour) + } else { + t = t.Add(time.Duration(-t.Hour()) * time.Hour) + } + } + if t.Day() == 1 { + goto WRAP + } + } + for !s.match(s.hourMap, t.Hour()) { + if !added { + added = true + t = time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), 0, 0, 0, loc) + } + t = t.Add(time.Hour) + // need recheck + if t.Hour() == 0 { + goto WRAP + } + } + for !s.match(s.minuteMap, t.Minute()) { + if !added { + added = true + t = t.Truncate(time.Minute) + } + t = t.Add(1 * time.Minute) + + if t.Minute() == 0 { + goto WRAP + } + } + for !s.match(s.secondMap, t.Second()) { + if !added { + added = true + t = t.Truncate(time.Second) + } + t = t.Add(1 * time.Second) + if t.Second() == 0 { + goto WRAP + } + } + return t.In(loc) +} + +// dayMatches returns true if the schedule's day-of-week and day-of-month +// restrictions are satisfied by the given time. +func (s *cronSchedule) dayMatches(t time.Time) bool { + _, ok1 := s.dayMap[t.Day()] + _, ok2 := s.weekMap[int(t.Weekday())] + return ok1 && ok2 +} + +func (s *cronSchedule) match(m map[int]struct{}, key int) bool { + _, ok := m[key] + return ok +} diff --git a/vendor/github.com/gogf/gf/v2/os/gcron/gcron_schedule_fix.go b/vendor/github.com/gogf/gf/v2/os/gcron/gcron_schedule_fix.go new file mode 100644 index 00000000..fac3da1c --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gcron/gcron_schedule_fix.go @@ -0,0 +1,47 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gcron + +import ( + "context" + "time" + + "github.com/gogf/gf/v2/internal/intlog" +) + +// getAndUpdateLastTimestamp checks fixes and returns the last timestamp that have delay fix in some seconds. +func (s *cronSchedule) getAndUpdateLastTimestamp(ctx context.Context, t time.Time) int64 { + var ( + currentTimestamp = t.Unix() + lastTimestamp = s.lastTimestamp.Val() + ) + switch { + case + lastTimestamp == currentTimestamp: + lastTimestamp += 1 + + case + lastTimestamp == currentTimestamp-1: + lastTimestamp = currentTimestamp + + case + lastTimestamp == currentTimestamp-2, + lastTimestamp == currentTimestamp-3: + lastTimestamp += 1 + + default: + // Too much delay, let's update the last timestamp to current one. + intlog.Printf( + ctx, + `too much delay, last timestamp "%d", current "%d"`, + lastTimestamp, currentTimestamp, + ) + lastTimestamp = currentTimestamp + } + s.lastTimestamp.Set(lastTimestamp) + return lastTimestamp +} diff --git a/vendor/github.com/gogf/gf/v2/os/gctx/gctx.go b/vendor/github.com/gogf/gf/v2/os/gctx/gctx.go new file mode 100644 index 00000000..62032b36 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gctx/gctx.go @@ -0,0 +1,82 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package gctx wraps context.Context and provides extra context features. +package gctx + +import ( + "context" + "os" + "strings" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/propagation" + + "github.com/gogf/gf/v2/net/gtrace" +) + +type ( + Ctx = context.Context // Ctx is short name alias for context.Context. + StrKey string // StrKey is a type for warps basic type string as context key. +) + +var ( + // initCtx is the context initialized from process environment. + initCtx context.Context +) + +func init() { + // All environment key-value pairs. + m := make(map[string]string) + i := 0 + for _, s := range os.Environ() { + i = strings.IndexByte(s, '=') + if i == -1 { + continue + } + m[s[0:i]] = s[i+1:] + } + // OpenTelemetry from environments. + initCtx = otel.GetTextMapPropagator().Extract( + context.Background(), + propagation.MapCarrier(m), + ) +} + +// New creates and returns a context which contains context id. +func New() context.Context { + return WithCtx(context.Background()) +} + +// WithCtx creates and returns a context containing context id upon given parent context `ctx`. +func WithCtx(ctx context.Context) context.Context { + if CtxId(ctx) != "" { + return ctx + } + if gtrace.IsUsingDefaultProvider() { + var span *gtrace.Span + ctx, span = gtrace.NewSpan(ctx, "gctx.WithCtx") + defer span.End() + } + return ctx +} + +// CtxId retrieves and returns the context id from context. +func CtxId(ctx context.Context) string { + return gtrace.GetTraceID(ctx) +} + +// SetInitCtx sets custom initialization context. +// Note that this function cannot be called in multiple goroutines. +func SetInitCtx(ctx context.Context) { + initCtx = ctx +} + +// GetInitCtx returns the initialization context. +// Initialization context is used in `main` or `init` functions. +func GetInitCtx() context.Context { + return initCtx +} diff --git a/vendor/github.com/gogf/gf/v2/os/gfile/gfile.go b/vendor/github.com/gogf/gf/v2/os/gfile/gfile.go new file mode 100644 index 00000000..f3a87381 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gfile/gfile.go @@ -0,0 +1,458 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package gfile provides easy-to-use operations for file system. +package gfile + +import ( + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "time" + + "github.com/gogf/gf/v2/container/gtype" + "github.com/gogf/gf/v2/errors/gerror" + "github.com/gogf/gf/v2/text/gstr" + "github.com/gogf/gf/v2/util/gconv" +) + +const ( + // Separator for file system. + // It here defines the separator as variable + // to allow it modified by developer if necessary. + Separator = string(filepath.Separator) + + // DefaultPermOpen is the default perm for file opening. + DefaultPermOpen = os.FileMode(0666) + + // DefaultPermCopy is the default perm for file/folder copy. + DefaultPermCopy = os.FileMode(0777) +) + +var ( + // The absolute file path for main package. + // It can be only checked and set once. + mainPkgPath = gtype.NewString() + + // selfPath is the current running binary path. + // As it is most commonly used, it is so defined as an internal package variable. + selfPath = "" + + // Temporary directory of system. + tempDir = "/tmp" +) + +func init() { + // Initialize internal package variable: tempDir. + if runtime.GOOS == "windows" || Separator != "/" || !Exists(tempDir) { + tempDir = os.TempDir() + } + // Initialize internal package variable: selfPath. + selfPath, _ = exec.LookPath(os.Args[0]) + if selfPath != "" { + selfPath, _ = filepath.Abs(selfPath) + } + if selfPath == "" { + selfPath, _ = filepath.Abs(os.Args[0]) + } +} + +// Mkdir creates directories recursively with given `path`. +// The parameter `path` is suggested to be an absolute path instead of relative one. +func Mkdir(path string) (err error) { + if err = os.MkdirAll(path, os.ModePerm); err != nil { + err = gerror.Wrapf(err, `os.MkdirAll failed for path "%s" with perm "%d"`, path, os.ModePerm) + return err + } + return nil +} + +// Create creates file with given `path` recursively. +// The parameter `path` is suggested to be absolute path. +func Create(path string) (*os.File, error) { + dir := Dir(path) + if !Exists(dir) { + if err := Mkdir(dir); err != nil { + return nil, err + } + } + file, err := os.Create(path) + if err != nil { + err = gerror.Wrapf(err, `os.Create failed for name "%s"`, path) + } + return file, err +} + +// Open opens file/directory READONLY. +func Open(path string) (*os.File, error) { + file, err := os.Open(path) + if err != nil { + err = gerror.Wrapf(err, `os.Open failed for name "%s"`, path) + } + return file, err +} + +// OpenFile opens file/directory with custom `flag` and `perm`. +// The parameter `flag` is like: O_RDONLY, O_RDWR, O_RDWR|O_CREATE|O_TRUNC, etc. +func OpenFile(path string, flag int, perm os.FileMode) (*os.File, error) { + file, err := os.OpenFile(path, flag, perm) + if err != nil { + err = gerror.Wrapf(err, `os.OpenFile failed with name "%s", flag "%d", perm "%d"`, path, flag, perm) + } + return file, err +} + +// OpenWithFlag opens file/directory with default perm and custom `flag`. +// The default `perm` is 0666. +// The parameter `flag` is like: O_RDONLY, O_RDWR, O_RDWR|O_CREATE|O_TRUNC, etc. +func OpenWithFlag(path string, flag int) (*os.File, error) { + file, err := OpenFile(path, flag, DefaultPermOpen) + if err != nil { + return nil, err + } + return file, nil +} + +// OpenWithFlagPerm opens file/directory with custom `flag` and `perm`. +// The parameter `flag` is like: O_RDONLY, O_RDWR, O_RDWR|O_CREATE|O_TRUNC, etc. +// The parameter `perm` is like: 0600, 0666, 0777, etc. +func OpenWithFlagPerm(path string, flag int, perm os.FileMode) (*os.File, error) { + file, err := OpenFile(path, flag, perm) + if err != nil { + return nil, err + } + return file, nil +} + +// Join joins string array paths with file separator of current system. +func Join(paths ...string) string { + var s string + for _, path := range paths { + if s != "" { + s += Separator + } + s += gstr.TrimRight(path, Separator) + } + return s +} + +// Exists checks whether given `path` exist. +func Exists(path string) bool { + if stat, err := os.Stat(path); stat != nil && !os.IsNotExist(err) { + return true + } + return false +} + +// IsDir checks whether given `path` a directory. +// Note that it returns false if the `path` does not exist. +func IsDir(path string) bool { + s, err := os.Stat(path) + if err != nil { + return false + } + return s.IsDir() +} + +// Pwd returns absolute path of current working directory. +// Note that it returns an empty string if retrieving current +// working directory failed. +func Pwd() string { + path, err := os.Getwd() + if err != nil { + return "" + } + return path +} + +// Chdir changes the current working directory to the named directory. +// If there is an error, it will be of type *PathError. +func Chdir(dir string) (err error) { + err = os.Chdir(dir) + if err != nil { + err = gerror.Wrapf(err, `os.Chdir failed with dir "%s"`, dir) + } + return +} + +// IsFile checks whether given `path` a file, which means it's not a directory. +// Note that it returns false if the `path` does not exist. +func IsFile(path string) bool { + s, err := Stat(path) + if err != nil { + return false + } + return !s.IsDir() +} + +// Stat returns a FileInfo describing the named file. +// If there is an error, it will be of type *PathError. +func Stat(path string) (os.FileInfo, error) { + info, err := os.Stat(path) + if err != nil { + err = gerror.Wrapf(err, `os.Stat failed for file "%s"`, path) + } + return info, err +} + +// Move renames (moves) `src` to `dst` path. +// If `dst` already exists and is not a directory, it'll be replaced. +func Move(src string, dst string) (err error) { + err = os.Rename(src, dst) + if err != nil { + err = gerror.Wrapf(err, `os.Rename failed from "%s" to "%s"`, src, dst) + } + return +} + +// Rename is alias of Move. +// See Move. +func Rename(src string, dst string) error { + return Move(src, dst) +} + +// DirNames returns sub-file names of given directory `path`. +// Note that the returned names are NOT absolute paths. +func DirNames(path string) ([]string, error) { + f, err := Open(path) + if err != nil { + return nil, err + } + list, err := f.Readdirnames(-1) + _ = f.Close() + if err != nil { + err = gerror.Wrapf(err, `Read dir files failed from path "%s"`, path) + return nil, err + } + return list, nil +} + +// Glob returns the names of all files matching pattern or nil +// if there is no matching file. The syntax of patterns is the same +// as in Match. The pattern may describe hierarchical names such as +// /usr/*/bin/ed (assuming the Separator is '/'). +// +// Glob ignores file system errors such as I/O errors reading directories. +// The only possible returned error is ErrBadPattern, when pattern +// is malformed. +func Glob(pattern string, onlyNames ...bool) ([]string, error) { + list, err := filepath.Glob(pattern) + if err != nil { + err = gerror.Wrapf(err, `filepath.Glob failed for pattern "%s"`, pattern) + return nil, err + } + if len(onlyNames) > 0 && onlyNames[0] && len(list) > 0 { + array := make([]string, len(list)) + for k, v := range list { + array[k] = Basename(v) + } + return array, nil + } + return list, nil +} + +// Remove deletes all file/directory with `path` parameter. +// If parameter `path` is directory, it deletes it recursively. +// +// It does nothing if given `path` does not exist or is empty. +func Remove(path string) (err error) { + // It does nothing if `path` is empty. + if path == "" { + return nil + } + if err = os.RemoveAll(path); err != nil { + err = gerror.Wrapf(err, `os.RemoveAll failed for path "%s"`, path) + } + return +} + +// IsReadable checks whether given `path` is readable. +func IsReadable(path string) bool { + result := true + file, err := os.OpenFile(path, os.O_RDONLY, DefaultPermOpen) + if err != nil { + result = false + } + file.Close() + return result +} + +// IsWritable checks whether given `path` is writable. +// +// TODO improve performance; use golang.org/x/sys to cross-plat-form +func IsWritable(path string) bool { + result := true + if IsDir(path) { + // If it's a directory, create a temporary file to test whether it's writable. + tmpFile := strings.TrimRight(path, Separator) + Separator + gconv.String(time.Now().UnixNano()) + if f, err := Create(tmpFile); err != nil || !Exists(tmpFile) { + result = false + } else { + _ = f.Close() + _ = Remove(tmpFile) + } + } else { + // If it's a file, check if it can open it. + file, err := os.OpenFile(path, os.O_WRONLY, DefaultPermOpen) + if err != nil { + result = false + } + _ = file.Close() + } + return result +} + +// Chmod is alias of os.Chmod. +// See os.Chmod. +func Chmod(path string, mode os.FileMode) (err error) { + err = os.Chmod(path, mode) + if err != nil { + err = gerror.Wrapf(err, `os.Chmod failed with path "%s" and mode "%s"`, path, mode) + } + return +} + +// Abs returns an absolute representation of path. +// If the path is not absolute it will be joined with the current +// working directory to turn it into an absolute path. The absolute +// path name for a given file is not guaranteed to be unique. +// Abs calls Clean on the result. +func Abs(path string) string { + p, _ := filepath.Abs(path) + return p +} + +// RealPath converts the given `path` to its absolute path +// and checks if the file path exists. +// If the file does not exist, return an empty string. +func RealPath(path string) string { + p, err := filepath.Abs(path) + if err != nil { + return "" + } + if !Exists(p) { + return "" + } + return p +} + +// SelfPath returns absolute file path of current running process(binary). +func SelfPath() string { + return selfPath +} + +// SelfName returns file name of current running process(binary). +func SelfName() string { + return Basename(SelfPath()) +} + +// SelfDir returns absolute directory path of current running process(binary). +func SelfDir() string { + return filepath.Dir(SelfPath()) +} + +// Basename returns the last element of path, which contains file extension. +// Trailing path separators are removed before extracting the last element. +// If the path is empty, Base returns ".". +// If the path consists entirely of separators, Basename returns a single separator. +// Example: +// /var/www/file.js -> file.js +// file.js -> file.js +func Basename(path string) string { + return filepath.Base(path) +} + +// Name returns the last element of path without file extension. +// Example: +// /var/www/file.js -> file +// file.js -> file +func Name(path string) string { + base := filepath.Base(path) + if i := strings.LastIndexByte(base, '.'); i != -1 { + return base[:i] + } + return base +} + +// Dir returns all but the last element of path, typically the path's directory. +// After dropping the final element, Dir calls Clean on the path and trailing +// slashes are removed. +// If the `path` is empty, Dir returns ".". +// If the `path` is ".", Dir treats the path as current working directory. +// If the `path` consists entirely of separators, Dir returns a single separator. +// The returned path does not end in a separator unless it is the root directory. +func Dir(path string) string { + if path == "." { + return filepath.Dir(RealPath(path)) + } + return filepath.Dir(path) +} + +// IsEmpty checks whether the given `path` is empty. +// If `path` is a folder, it checks if there's any file under it. +// If `path` is a file, it checks if the file size is zero. +// +// Note that it returns true if `path` does not exist. +func IsEmpty(path string) bool { + stat, err := Stat(path) + if err != nil { + return true + } + if stat.IsDir() { + file, err := os.Open(path) + if err != nil { + return true + } + defer file.Close() + names, err := file.Readdirnames(-1) + if err != nil { + return true + } + return len(names) == 0 + } else { + return stat.Size() == 0 + } +} + +// Ext returns the file name extension used by path. +// The extension is the suffix beginning at the final dot +// in the final element of path; it is empty if there is +// no dot. +// Note: the result contains symbol '.'. +// Eg: +// main.go => .go +// api.json => .json +func Ext(path string) string { + ext := filepath.Ext(path) + if p := strings.IndexByte(ext, '?'); p != -1 { + ext = ext[0:p] + } + return ext +} + +// ExtName is like function Ext, which returns the file name extension used by path, +// but the result does not contain symbol '.'. +// Eg: +// main.go => go +// api.json => json +func ExtName(path string) string { + return strings.TrimLeft(Ext(path), ".") +} + +// Temp retrieves and returns the temporary directory of current system. +// It returns "/tmp" is current in *nix system, or else it returns os.TempDir(). +// +// The optional parameter `names` specifies the sub-folders/sub-files, +// which will be joined with current system separator and returned with the path. +func Temp(names ...string) string { + path := tempDir + for _, name := range names { + path += Separator + name + } + return path +} diff --git a/vendor/github.com/gogf/gf/v2/os/gfile/gfile_cache.go b/vendor/github.com/gogf/gf/v2/os/gfile/gfile_cache.go new file mode 100644 index 00000000..731cb1ce --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gfile/gfile_cache.go @@ -0,0 +1,87 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gfile + +import ( + "context" + "time" + + "github.com/gogf/gf/v2/errors/gcode" + "github.com/gogf/gf/v2/errors/gerror" + "github.com/gogf/gf/v2/internal/command" + "github.com/gogf/gf/v2/internal/intlog" + "github.com/gogf/gf/v2/os/gcache" + "github.com/gogf/gf/v2/os/gfsnotify" +) + +const ( + defaultCacheDuration = "1m" // defaultCacheExpire is the expire time for file content caching in seconds. + commandEnvKeyForCache = "gf.gfile.cache" // commandEnvKeyForCache is the configuration key for command argument or environment configuring cache expire duration. +) + +var ( + // Default expire time for file content caching. + cacheDuration = getCacheDuration() + + // internalCache is the memory cache for internal usage. + internalCache = gcache.New() +) + +func getCacheDuration() time.Duration { + cacheDurationConfigured := command.GetOptWithEnv(commandEnvKeyForCache, defaultCacheDuration) + d, err := time.ParseDuration(cacheDurationConfigured) + if err != nil { + panic(gerror.WrapCodef( + gcode.CodeInvalidConfiguration, + err, + `error parsing string "%s" to time duration`, + cacheDurationConfigured, + )) + } + return d +} + +// GetContentsWithCache returns string content of given file by `path` from cache. +// If there's no content in the cache, it will read it from disk file specified by `path`. +// The parameter `expire` specifies the caching time for this file content in seconds. +func GetContentsWithCache(path string, duration ...time.Duration) string { + return string(GetBytesWithCache(path, duration...)) +} + +// GetBytesWithCache returns []byte content of given file by `path` from cache. +// If there's no content in the cache, it will read it from disk file specified by `path`. +// The parameter `expire` specifies the caching time for this file content in seconds. +func GetBytesWithCache(path string, duration ...time.Duration) []byte { + var ( + ctx = context.Background() + expire = cacheDuration + cacheKey = commandEnvKeyForCache + path + ) + + if len(duration) > 0 { + expire = duration[0] + } + r, _ := internalCache.GetOrSetFuncLock(ctx, cacheKey, func(ctx context.Context) (interface{}, error) { + b := GetBytes(path) + if b != nil { + // Adding this `path` to gfsnotify, + // it will clear its cache if there's any changes of the file. + _, _ = gfsnotify.Add(path, func(event *gfsnotify.Event) { + _, err := internalCache.Remove(ctx, cacheKey) + if err != nil { + intlog.Errorf(ctx, `%+v`, err) + } + gfsnotify.Exit() + }) + } + return b, nil + }, expire) + if r != nil { + return r.Bytes() + } + return nil +} diff --git a/vendor/github.com/gogf/gf/v2/os/gfile/gfile_contents.go b/vendor/github.com/gogf/gf/v2/os/gfile/gfile_contents.go new file mode 100644 index 00000000..c47b0a98 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gfile/gfile_contents.go @@ -0,0 +1,214 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gfile + +import ( + "bufio" + "io" + "io/ioutil" + "os" + + "github.com/gogf/gf/v2/errors/gerror" +) + +var ( + // DefaultReadBuffer is the buffer size for reading file content. + DefaultReadBuffer = 1024 +) + +// GetContents returns the file content of `path` as string. +// It returns en empty string if it fails reading. +func GetContents(path string) string { + return string(GetBytes(path)) +} + +// GetBytes returns the file content of `path` as []byte. +// It returns nil if it fails reading. +func GetBytes(path string) []byte { + data, err := ioutil.ReadFile(path) + if err != nil { + return nil + } + return data +} + +// putContents puts binary content to file of `path`. +func putContents(path string, data []byte, flag int, perm os.FileMode) error { + // It supports creating file of `path` recursively. + dir := Dir(path) + if !Exists(dir) { + if err := Mkdir(dir); err != nil { + return err + } + } + // Opening file with given `flag` and `perm`. + f, err := OpenWithFlagPerm(path, flag, perm) + if err != nil { + return err + } + defer f.Close() + // Write data. + var n int + if n, err = f.Write(data); err != nil { + err = gerror.Wrapf(err, `Write data to file "%s" failed`, path) + return err + } else if n < len(data) { + return io.ErrShortWrite + } + return nil +} + +// Truncate truncates file of `path` to given size by `size`. +func Truncate(path string, size int) (err error) { + err = os.Truncate(path, int64(size)) + if err != nil { + err = gerror.Wrapf(err, `os.Truncate failed for file "%s", size "%d"`, path, size) + } + return +} + +// PutContents puts string `content` to file of `path`. +// It creates file of `path` recursively if it does not exist. +func PutContents(path string, content string) error { + return putContents(path, []byte(content), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, DefaultPermOpen) +} + +// PutContentsAppend appends string `content` to file of `path`. +// It creates file of `path` recursively if it does not exist. +func PutContentsAppend(path string, content string) error { + return putContents(path, []byte(content), os.O_WRONLY|os.O_CREATE|os.O_APPEND, DefaultPermOpen) +} + +// PutBytes puts binary `content` to file of `path`. +// It creates file of `path` recursively if it does not exist. +func PutBytes(path string, content []byte) error { + return putContents(path, content, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, DefaultPermOpen) +} + +// PutBytesAppend appends binary `content` to file of `path`. +// It creates file of `path` recursively if it does not exist. +func PutBytesAppend(path string, content []byte) error { + return putContents(path, content, os.O_WRONLY|os.O_CREATE|os.O_APPEND, DefaultPermOpen) +} + +// GetNextCharOffset returns the file offset for given `char` starting from `start`. +func GetNextCharOffset(reader io.ReaderAt, char byte, start int64) int64 { + buffer := make([]byte, DefaultReadBuffer) + offset := start + for { + if n, err := reader.ReadAt(buffer, offset); n > 0 { + for i := 0; i < n; i++ { + if buffer[i] == char { + return int64(i) + offset + } + } + offset += int64(n) + } else if err != nil { + break + } + } + return -1 +} + +// GetNextCharOffsetByPath returns the file offset for given `char` starting from `start`. +// It opens file of `path` for reading with os.O_RDONLY flag and default perm. +func GetNextCharOffsetByPath(path string, char byte, start int64) int64 { + if f, err := OpenWithFlagPerm(path, os.O_RDONLY, DefaultPermOpen); err == nil { + defer f.Close() + return GetNextCharOffset(f, char, start) + } + return -1 +} + +// GetBytesTilChar returns the contents of the file as []byte +// until the next specified byte `char` position. +// +// Note: Returned value contains the character of the last position. +func GetBytesTilChar(reader io.ReaderAt, char byte, start int64) ([]byte, int64) { + if offset := GetNextCharOffset(reader, char, start); offset != -1 { + return GetBytesByTwoOffsets(reader, start, offset+1), offset + } + return nil, -1 +} + +// GetBytesTilCharByPath returns the contents of the file given by `path` as []byte +// until the next specified byte `char` position. +// It opens file of `path` for reading with os.O_RDONLY flag and default perm. +// +// Note: Returned value contains the character of the last position. +func GetBytesTilCharByPath(path string, char byte, start int64) ([]byte, int64) { + if f, err := OpenWithFlagPerm(path, os.O_RDONLY, DefaultPermOpen); err == nil { + defer f.Close() + return GetBytesTilChar(f, char, start) + } + return nil, -1 +} + +// GetBytesByTwoOffsets returns the binary content as []byte from `start` to `end`. +// Note: Returned value does not contain the character of the last position, which means +// it returns content range as [start, end). +func GetBytesByTwoOffsets(reader io.ReaderAt, start int64, end int64) []byte { + buffer := make([]byte, end-start) + if _, err := reader.ReadAt(buffer, start); err != nil { + return nil + } + return buffer +} + +// GetBytesByTwoOffsetsByPath returns the binary content as []byte from `start` to `end`. +// Note: Returned value does not contain the character of the last position, which means +// it returns content range as [start, end). +// It opens file of `path` for reading with os.O_RDONLY flag and default perm. +func GetBytesByTwoOffsetsByPath(path string, start int64, end int64) []byte { + if f, err := OpenWithFlagPerm(path, os.O_RDONLY, DefaultPermOpen); err == nil { + defer f.Close() + return GetBytesByTwoOffsets(f, start, end) + } + return nil +} + +// ReadLines reads file content line by line, which is passed to the callback function `callback` as string. +// It matches each line of text, separated by chars '\r' or '\n', stripped any trailing end-of-line marker. +// +// Note that the parameter passed to callback function might be an empty value, and the last non-empty line +// will be passed to callback function `callback` even if it has no newline marker. +func ReadLines(file string, callback func(line string) error) error { + f, err := Open(file) + if err != nil { + return err + } + defer f.Close() + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + if err = callback(scanner.Text()); err != nil { + return err + } + } + return nil +} + +// ReadLinesBytes reads file content line by line, which is passed to the callback function `callback` as []byte. +// It matches each line of text, separated by chars '\r' or '\n', stripped any trailing end-of-line marker. +// +// Note that the parameter passed to callback function might be an empty value, and the last non-empty line +// will be passed to callback function `callback` even if it has no newline marker. +func ReadLinesBytes(file string, callback func(bytes []byte) error) error { + f, err := Open(file) + if err != nil { + return err + } + defer f.Close() + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + if err = callback(scanner.Bytes()); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/gogf/gf/v2/os/gfile/gfile_copy.go b/vendor/github.com/gogf/gf/v2/os/gfile/gfile_copy.go new file mode 100644 index 00000000..967351b5 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gfile/gfile_copy.go @@ -0,0 +1,139 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gfile + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/gogf/gf/v2/errors/gcode" + "github.com/gogf/gf/v2/errors/gerror" +) + +// Copy file/directory from `src` to `dst`. +// +// If `src` is file, it calls CopyFile to implements copy feature, +// or else it calls CopyDir. +func Copy(src string, dst string) error { + if src == "" { + return gerror.NewCode(gcode.CodeInvalidParameter, "source path cannot be empty") + } + if dst == "" { + return gerror.NewCode(gcode.CodeInvalidParameter, "destination path cannot be empty") + } + if IsFile(src) { + return CopyFile(src, dst) + } + return CopyDir(src, dst) +} + +// CopyFile copies the contents of the file named `src` to the file named +// by `dst`. The file will be created if it does not exist. If the +// destination file exists, all it's contents will be replaced by the contents +// of the source file. The file mode will be copied from the source and +// the copied data is synced/flushed to stable storage. +// Thanks: https://gist.github.com/r0l1/92462b38df26839a3ca324697c8cba04 +func CopyFile(src, dst string) (err error) { + if src == "" { + return gerror.NewCode(gcode.CodeInvalidParameter, "source file cannot be empty") + } + if dst == "" { + return gerror.NewCode(gcode.CodeInvalidParameter, "destination file cannot be empty") + } + // If src and dst are the same path, it does nothing. + if src == dst { + return nil + } + var inFile *os.File + inFile, err = Open(src) + if err != nil { + return + } + defer func() { + if e := inFile.Close(); e != nil { + err = gerror.Wrapf(e, `file close failed for "%s"`, src) + } + }() + var outFile *os.File + outFile, err = Create(dst) + if err != nil { + return + } + defer func() { + if e := outFile.Close(); e != nil { + err = gerror.Wrapf(e, `file close failed for "%s"`, dst) + } + }() + if _, err = io.Copy(outFile, inFile); err != nil { + err = gerror.Wrapf(err, `io.Copy failed from "%s" to "%s"`, src, dst) + return + } + if err = outFile.Sync(); err != nil { + err = gerror.Wrapf(err, `file sync failed for file "%s"`, dst) + return + } + if err = Chmod(dst, DefaultPermCopy); err != nil { + return + } + return +} + +// CopyDir recursively copies a directory tree, attempting to preserve permissions. +// +// Note that, the Source directory must exist and symlinks are ignored and skipped. +func CopyDir(src string, dst string) (err error) { + if src == "" { + return gerror.NewCode(gcode.CodeInvalidParameter, "source directory cannot be empty") + } + if dst == "" { + return gerror.NewCode(gcode.CodeInvalidParameter, "destination directory cannot be empty") + } + // If src and dst are the same path, it does nothing. + if src == dst { + return nil + } + src = filepath.Clean(src) + dst = filepath.Clean(dst) + si, err := Stat(src) + if err != nil { + return err + } + if !si.IsDir() { + return gerror.NewCode(gcode.CodeInvalidParameter, "source is not a directory") + } + if !Exists(dst) { + if err = os.MkdirAll(dst, DefaultPermCopy); err != nil { + err = gerror.Wrapf(err, `create directory failed for path "%s", perm "%s"`, dst, DefaultPermCopy) + return + } + } + entries, err := ioutil.ReadDir(src) + if err != nil { + err = gerror.Wrapf(err, `read directory failed for path "%s"`, src) + return + } + for _, entry := range entries { + srcPath := filepath.Join(src, entry.Name()) + dstPath := filepath.Join(dst, entry.Name()) + if entry.IsDir() { + if err = CopyDir(srcPath, dstPath); err != nil { + return + } + } else { + // Skip symlinks. + if entry.Mode()&os.ModeSymlink != 0 { + continue + } + if err = CopyFile(srcPath, dstPath); err != nil { + return + } + } + } + return +} diff --git a/vendor/github.com/gogf/gf/v2/os/gfile/gfile_home.go b/vendor/github.com/gogf/gf/v2/os/gfile/gfile_home.go new file mode 100644 index 00000000..817e74bf --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gfile/gfile_home.go @@ -0,0 +1,82 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gfile + +import ( + "bytes" + "os" + "os/exec" + "os/user" + "runtime" + "strings" + + "github.com/gogf/gf/v2/errors/gerror" +) + +// Home returns absolute path of current user's home directory. +// The optional parameter `names` specifies the sub-folders/sub-files, +// which will be joined with current system separator and returned with the path. +func Home(names ...string) (string, error) { + path, err := getHomePath() + if err != nil { + return "", err + } + for _, name := range names { + path += Separator + name + } + return path, nil +} + +// getHomePath returns absolute path of current user's home directory. +func getHomePath() (string, error) { + u, err := user.Current() + if nil == err { + return u.HomeDir, nil + } + if runtime.GOOS == "windows" { + return homeWindows() + } + return homeUnix() +} + +// homeUnix retrieves and returns the home on unix system. +func homeUnix() (string, error) { + if home := os.Getenv("HOME"); home != "" { + return home, nil + } + var stdout bytes.Buffer + cmd := exec.Command("sh", "-c", "eval echo ~$USER") + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + err = gerror.Wrapf(err, `retrieve home directory failed`) + return "", err + } + + result := strings.TrimSpace(stdout.String()) + if result == "" { + return "", gerror.New("blank output when reading home directory") + } + + return result, nil +} + +// homeWindows retrieves and returns the home on windows system. +func homeWindows() (string, error) { + var ( + drive = os.Getenv("HOMEDRIVE") + path = os.Getenv("HOMEPATH") + home = drive + path + ) + if drive == "" || path == "" { + home = os.Getenv("USERPROFILE") + } + if home == "" { + return "", gerror.New("environment keys HOMEDRIVE, HOMEPATH and USERPROFILE are empty") + } + + return home, nil +} diff --git a/vendor/github.com/gogf/gf/v2/os/gfile/gfile_replace.go b/vendor/github.com/gogf/gf/v2/os/gfile/gfile_replace.go new file mode 100644 index 00000000..3cb7b689 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gfile/gfile_replace.go @@ -0,0 +1,58 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gfile + +import ( + "github.com/gogf/gf/v2/text/gstr" +) + +// ReplaceFile replaces content for file `path`. +func ReplaceFile(search, replace, path string) error { + return PutContents(path, gstr.Replace(GetContents(path), search, replace)) +} + +// ReplaceFileFunc replaces content for file `path` with callback function `f`. +func ReplaceFileFunc(f func(path, content string) string, path string) error { + data := GetContents(path) + result := f(path, data) + if len(data) != len(result) && data != result { + return PutContents(path, result) + } + return nil +} + +// ReplaceDir replaces content for files under `path`. +// The parameter `pattern` specifies the file pattern which matches to be replaced. +// It does replacement recursively if given parameter `recursive` is true. +func ReplaceDir(search, replace, path, pattern string, recursive ...bool) error { + files, err := ScanDirFile(path, pattern, recursive...) + if err != nil { + return err + } + for _, file := range files { + if err = ReplaceFile(search, replace, file); err != nil { + return err + } + } + return err +} + +// ReplaceDirFunc replaces content for files under `path` with callback function `f`. +// The parameter `pattern` specifies the file pattern which matches to be replaced. +// It does replacement recursively if given parameter `recursive` is true. +func ReplaceDirFunc(f func(path, content string) string, path, pattern string, recursive ...bool) error { + files, err := ScanDirFile(path, pattern, recursive...) + if err != nil { + return err + } + for _, file := range files { + if err = ReplaceFileFunc(f, file); err != nil { + return err + } + } + return err +} diff --git a/vendor/github.com/gogf/gf/v2/os/gfile/gfile_scan.go b/vendor/github.com/gogf/gf/v2/os/gfile/gfile_scan.go new file mode 100644 index 00000000..deda5b6d --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gfile/gfile_scan.go @@ -0,0 +1,184 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gfile + +import ( + "path/filepath" + "sort" + + "github.com/gogf/gf/v2/errors/gerror" + "github.com/gogf/gf/v2/text/gstr" +) + +const ( + // Max recursive depth for directory scanning. + maxScanDepth = 100000 +) + +// ScanDir returns all sub-files with absolute paths of given `path`, +// It scans directory recursively if given parameter `recursive` is true. +// +// The pattern parameter `pattern` supports multiple file name patterns, +// using the ',' symbol to separate multiple patterns. +func ScanDir(path string, pattern string, recursive ...bool) ([]string, error) { + isRecursive := false + if len(recursive) > 0 { + isRecursive = recursive[0] + } + list, err := doScanDir(0, path, pattern, isRecursive, nil) + if err != nil { + return nil, err + } + if len(list) > 0 { + sort.Strings(list) + } + return list, nil +} + +// ScanDirFunc returns all sub-files with absolute paths of given `path`, +// It scans directory recursively if given parameter `recursive` is true. +// +// The pattern parameter `pattern` supports multiple file name patterns, using the ',' +// symbol to separate multiple patterns. +// +// The parameter `recursive` specifies whether scanning the `path` recursively, which +// means it scans its sub-files and appends the files path to result array if the sub-file +// is also a folder. It is false in default. +// +// The parameter `handler` specifies the callback function handling each sub-file path of +// the `path` and its sub-folders. It ignores the sub-file path if `handler` returns an empty +// string, or else it appends the sub-file path to result slice. +func ScanDirFunc(path string, pattern string, recursive bool, handler func(path string) string) ([]string, error) { + list, err := doScanDir(0, path, pattern, recursive, handler) + if err != nil { + return nil, err + } + if len(list) > 0 { + sort.Strings(list) + } + return list, nil +} + +// ScanDirFile returns all sub-files with absolute paths of given `path`, +// It scans directory recursively if given parameter `recursive` is true. +// +// The pattern parameter `pattern` supports multiple file name patterns, +// using the ',' symbol to separate multiple patterns. +// +// Note that it returns only files, exclusive of directories. +func ScanDirFile(path string, pattern string, recursive ...bool) ([]string, error) { + isRecursive := false + if len(recursive) > 0 { + isRecursive = recursive[0] + } + list, err := doScanDir(0, path, pattern, isRecursive, func(path string) string { + if IsDir(path) { + return "" + } + return path + }) + if err != nil { + return nil, err + } + if len(list) > 0 { + sort.Strings(list) + } + return list, nil +} + +// ScanDirFileFunc returns all sub-files with absolute paths of given `path`, +// It scans directory recursively if given parameter `recursive` is true. +// +// The pattern parameter `pattern` supports multiple file name patterns, using the ',' +// symbol to separate multiple patterns. +// +// The parameter `recursive` specifies whether scanning the `path` recursively, which +// means it scans its sub-files and appends the file paths to result array if the sub-file +// is also a folder. It is false in default. +// +// The parameter `handler` specifies the callback function handling each sub-file path of +// the `path` and its sub-folders. It ignores the sub-file path if `handler` returns an empty +// string, or else it appends the sub-file path to result slice. +// +// Note that the parameter `path` for `handler` is not a directory but a file. +// It returns only files, exclusive of directories. +func ScanDirFileFunc(path string, pattern string, recursive bool, handler func(path string) string) ([]string, error) { + list, err := doScanDir(0, path, pattern, recursive, func(path string) string { + if IsDir(path) { + return "" + } + return handler(path) + }) + if err != nil { + return nil, err + } + if len(list) > 0 { + sort.Strings(list) + } + return list, nil +} + +// doScanDir is an internal method which scans directory and returns the absolute path +// list of files that are not sorted. +// +// The pattern parameter `pattern` supports multiple file name patterns, using the ',' +// symbol to separate multiple patterns. +// +// The parameter `recursive` specifies whether scanning the `path` recursively, which +// means it scans its sub-files and appends the files path to result array if the sub-file +// is also a folder. It is false in default. +// +// The parameter `handler` specifies the callback function handling each sub-file path of +// the `path` and its sub-folders. It ignores the sub-file path if `handler` returns an empty +// string, or else it appends the sub-file path to result slice. +func doScanDir(depth int, path string, pattern string, recursive bool, handler func(path string) string) ([]string, error) { + if depth >= maxScanDepth { + return nil, gerror.Newf("directory scanning exceeds max recursive depth: %d", maxScanDepth) + } + var ( + list []string + file, err = Open(path) + ) + if err != nil { + return nil, err + } + defer file.Close() + names, err := file.Readdirnames(-1) + if err != nil { + err = gerror.Wrapf(err, `read directory files failed from path "%s"`, path) + return nil, err + } + var ( + filePath string + patterns = gstr.SplitAndTrim(pattern, ",") + ) + for _, name := range names { + filePath = path + Separator + name + if IsDir(filePath) && recursive { + array, _ := doScanDir(depth+1, filePath, pattern, true, handler) + if len(array) > 0 { + list = append(list, array...) + } + } + // Handler filtering. + if handler != nil { + filePath = handler(filePath) + if filePath == "" { + continue + } + } + // If it meets pattern, then add it to the result list. + for _, p := range patterns { + if match, _ := filepath.Match(p, name); match { + if filePath = Abs(filePath); filePath != "" { + list = append(list, filePath) + } + } + } + } + return list, nil +} diff --git a/vendor/github.com/gogf/gf/v2/os/gfile/gfile_search.go b/vendor/github.com/gogf/gf/v2/os/gfile/gfile_search.go new file mode 100644 index 00000000..a0d06999 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gfile/gfile_search.go @@ -0,0 +1,58 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gfile + +import ( + "bytes" + "fmt" + + "github.com/gogf/gf/v2/container/garray" + "github.com/gogf/gf/v2/errors/gerror" +) + +// Search searches file by name `name` in following paths with priority: +// prioritySearchPaths, Pwd()、SelfDir()、MainPkgPath(). +// It returns the absolute file path of `name` if found, or en empty string if not found. +func Search(name string, prioritySearchPaths ...string) (realPath string, err error) { + // Check if it's an absolute path. + realPath = RealPath(name) + if realPath != "" { + return + } + // Search paths array. + array := garray.NewStrArray() + array.Append(prioritySearchPaths...) + array.Append(Pwd(), SelfDir()) + if path := MainPkgPath(); path != "" { + array.Append(path) + } + // Remove repeated items. + array.Unique() + // Do the searching. + array.RLockFunc(func(array []string) { + path := "" + for _, v := range array { + path = RealPath(v + Separator + name) + if path != "" { + realPath = path + break + } + } + }) + // If it fails searching, it returns formatted error. + if realPath == "" { + buffer := bytes.NewBuffer(nil) + buffer.WriteString(fmt.Sprintf(`cannot find "%s" in following paths:`, name)) + array.RLockFunc(func(array []string) { + for k, v := range array { + buffer.WriteString(fmt.Sprintf("\n%d. %s", k+1, v)) + } + }) + err = gerror.New(buffer.String()) + } + return +} diff --git a/vendor/github.com/gogf/gf/v2/os/gfile/gfile_size.go b/vendor/github.com/gogf/gf/v2/os/gfile/gfile_size.go new file mode 100644 index 00000000..fb8fca76 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gfile/gfile_size.go @@ -0,0 +1,131 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gfile + +import ( + "fmt" + "os" + "strconv" + "strings" +) + +// Size returns the size of file specified by `path` in byte. +func Size(path string) int64 { + s, e := os.Stat(path) + if e != nil { + return 0 + } + return s.Size() +} + +// SizeFormat returns the size of file specified by `path` in format string. +func SizeFormat(path string) string { + return FormatSize(Size(path)) +} + +// ReadableSize formats size of file given by `path`, for more human readable. +func ReadableSize(path string) string { + return FormatSize(Size(path)) +} + +// StrToSize converts formatted size string to its size in bytes. +func StrToSize(sizeStr string) int64 { + i := 0 + for ; i < len(sizeStr); i++ { + if sizeStr[i] == '.' || (sizeStr[i] >= '0' && sizeStr[i] <= '9') { + continue + } else { + break + } + } + var ( + unit = sizeStr[i:] + number, _ = strconv.ParseFloat(sizeStr[:i], 64) + ) + if unit == "" { + return int64(number) + } + switch strings.ToLower(unit) { + case "b", "bytes": + return int64(number) + case "k", "kb", "ki", "kib", "kilobyte": + return int64(number * 1024) + case "m", "mb", "mi", "mib", "mebibyte": + return int64(number * 1024 * 1024) + case "g", "gb", "gi", "gib", "gigabyte": + return int64(number * 1024 * 1024 * 1024) + case "t", "tb", "ti", "tib", "terabyte": + return int64(number * 1024 * 1024 * 1024 * 1024) + case "p", "pb", "pi", "pib", "petabyte": + return int64(number * 1024 * 1024 * 1024 * 1024 * 1024) + case "e", "eb", "ei", "eib", "exabyte": + return int64(number * 1024 * 1024 * 1024 * 1024 * 1024 * 1024) + case "z", "zb", "zi", "zib", "zettabyte": + return int64(number * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024) + case "y", "yb", "yi", "yib", "yottabyte": + return int64(number * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024) + case "bb", "brontobyte": + return int64(number * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024) + } + return -1 +} + +// FormatSize formats size `raw` for more manually readable. +func FormatSize(raw int64) string { + var r float64 = float64(raw) + var t float64 = 1024 + var d float64 = 1 + if r < t { + return fmt.Sprintf("%.2fB", r/d) + } + d *= 1024 + t *= 1024 + if r < t { + return fmt.Sprintf("%.2fK", r/d) + } + d *= 1024 + t *= 1024 + if r < t { + return fmt.Sprintf("%.2fM", r/d) + } + d *= 1024 + t *= 1024 + if r < t { + return fmt.Sprintf("%.2fG", r/d) + } + d *= 1024 + t *= 1024 + if r < t { + return fmt.Sprintf("%.2fT", r/d) + } + d *= 1024 + t *= 1024 + if r < t { + return fmt.Sprintf("%.2fP", r/d) + } + d *= 1024 + t *= 1024 + if r < t { + return fmt.Sprintf("%.2fE", r/d) + } + d *= 1024 + t *= 1024 + if r < t { + return fmt.Sprintf("%.2fZ", r/d) + } + d *= 1024 + t *= 1024 + if r < t { + return fmt.Sprintf("%.2fY", r/d) + } + d *= 1024 + t *= 1024 + if r < t { + return fmt.Sprintf("%.2fBB", r/d) + } + return "TooLarge" +} diff --git a/vendor/github.com/gogf/gf/v2/os/gfile/gfile_sort.go b/vendor/github.com/gogf/gf/v2/os/gfile/gfile_sort.go new file mode 100644 index 00000000..3772cb07 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gfile/gfile_sort.go @@ -0,0 +1,40 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gfile + +import ( + "strings" + + "github.com/gogf/gf/v2/container/garray" +) + +// fileSortFunc is the comparison function for files. +// It sorts the array in order of: directory -> file. +// If `path1` and `path2` are the same type, it then sorts them as strings. +func fileSortFunc(path1, path2 string) int { + isDirPath1 := IsDir(path1) + isDirPath2 := IsDir(path2) + if isDirPath1 && !isDirPath2 { + return -1 + } + if !isDirPath1 && isDirPath2 { + return 1 + } + if n := strings.Compare(path1, path2); n != 0 { + return n + } else { + return -1 + } +} + +// SortFiles sorts the `files` in order of: directory -> file. +// Note that the item of `files` should be absolute path. +func SortFiles(files []string) []string { + array := garray.NewSortedStrArrayComparator(fileSortFunc) + array.Add(files...) + return array.Slice() +} diff --git a/vendor/github.com/gogf/gf/v2/os/gfile/gfile_source.go b/vendor/github.com/gogf/gf/v2/os/gfile/gfile_source.go new file mode 100644 index 00000000..d77f0b9b --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gfile/gfile_source.go @@ -0,0 +1,91 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gfile + +import ( + "os" + "runtime" + "strings" + + "github.com/gogf/gf/v2/text/gregex" + "github.com/gogf/gf/v2/text/gstr" +) + +var ( + // goRootForFilter is used for stack filtering purpose. + goRootForFilter = runtime.GOROOT() +) + +func init() { + if goRootForFilter != "" { + goRootForFilter = strings.ReplaceAll(goRootForFilter, "\\", "/") + } +} + +// MainPkgPath returns absolute file path of package main, +// which contains the entrance function main. +// +// It's only available in develop environment. +// +// Note1: Only valid for source development environments, +// IE only valid for systems that generate this executable. +// +// Note2: When the method is called for the first time, if it is in an asynchronous goroutine, +// the method may not get the main package path. +func MainPkgPath() string { + // It is only for source development environments. + if goRootForFilter == "" { + return "" + } + path := mainPkgPath.Val() + if path != "" { + return path + } + var lastFile string + for i := 1; i < 10000; i++ { + if pc, file, _, ok := runtime.Caller(i); ok { + if goRootForFilter != "" && len(file) >= len(goRootForFilter) && file[0:len(goRootForFilter)] == goRootForFilter { + continue + } + if Ext(file) != ".go" { + continue + } + lastFile = file + // Check if it is called in package initialization function, + // in which it here cannot retrieve main package path, + // it so just returns that can make next check. + if fn := runtime.FuncForPC(pc); fn != nil { + array := gstr.Split(fn.Name(), ".") + if array[0] != "main" { + continue + } + } + if gregex.IsMatchString(`package\s+main\s+`, GetContents(file)) { + mainPkgPath.Set(Dir(file)) + return Dir(file) + } + } else { + break + } + } + // If it still cannot find the path of the package main, + // it recursively searches the directory and its parents directory of the last go file. + // It's usually necessary for uint testing cases of business project. + if lastFile != "" { + for path = Dir(lastFile); len(path) > 1 && Exists(path) && path[len(path)-1] != os.PathSeparator; { + files, _ := ScanDir(path, "*.go") + for _, v := range files { + if gregex.IsMatchString(`package\s+main\s+`, GetContents(v)) { + mainPkgPath.Set(path) + return path + } + } + path = Dir(path) + } + } + return "" +} diff --git a/vendor/github.com/gogf/gf/v2/os/gfile/gfile_time.go b/vendor/github.com/gogf/gf/v2/os/gfile/gfile_time.go new file mode 100644 index 00000000..21053b73 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gfile/gfile_time.go @@ -0,0 +1,39 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gfile + +import ( + "os" + "time" +) + +// MTime returns the modification time of file given by `path` in second. +func MTime(path string) time.Time { + s, e := os.Stat(path) + if e != nil { + return time.Time{} + } + return s.ModTime() +} + +// MTimestamp returns the modification time of file given by `path` in second. +func MTimestamp(path string) int64 { + mtime := MTime(path) + if mtime.IsZero() { + return -1 + } + return mtime.Unix() +} + +// MTimestampMilli returns the modification time of file given by `path` in millisecond. +func MTimestampMilli(path string) int64 { + mtime := MTime(path) + if mtime.IsZero() { + return -1 + } + return mtime.UnixNano() / 1000000 +} diff --git a/vendor/github.com/gogf/gf/v2/os/gfpool/gfpool.go b/vendor/github.com/gogf/gf/v2/os/gfpool/gfpool.go new file mode 100644 index 00000000..5eec01b2 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gfpool/gfpool.go @@ -0,0 +1,41 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package gfpool provides io-reusable pool for file pointer. +package gfpool + +import ( + "os" + "time" + + "github.com/gogf/gf/v2/container/gmap" + "github.com/gogf/gf/v2/container/gpool" + "github.com/gogf/gf/v2/container/gtype" +) + +// Pool pointer pool. +type Pool struct { + id *gtype.Int // Pool id, which is used to mark this pool whether recreated. + pool *gpool.Pool // Underlying pool. + init *gtype.Bool // Whether initialized, used for marking this file added to fsnotify, and it can only be added just once. + ttl time.Duration // Time to live for file pointer items. +} + +// File is an item in the pool. +type File struct { + *os.File // Underlying file pointer. + stat os.FileInfo // State of current file pointer. + pid int // Belonging pool id, which is set when file pointer created. It's used to check whether the pool is recreated. + pool *Pool // Belonging ool. + flag int // Flash for opening file. + perm os.FileMode // Permission for opening file. + path string // Absolute path of the file. +} + +var ( + // Global file pointer pool. + pools = gmap.NewStrAnyMap(true) +) diff --git a/vendor/github.com/gogf/gf/v2/os/gfpool/gfpool_file.go b/vendor/github.com/gogf/gf/v2/os/gfpool/gfpool_file.go new file mode 100644 index 00000000..052866d6 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gfpool/gfpool_file.go @@ -0,0 +1,77 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gfpool + +import ( + "fmt" + "os" + "time" + + "github.com/gogf/gf/v2/errors/gerror" +) + +// Open creates and returns a file item with given file path, flag and opening permission. +// It automatically creates an associated file pointer pool internally when it's called first time. +// It retrieves a file item from the file pointer pool after then. +func Open(path string, flag int, perm os.FileMode, ttl ...time.Duration) (file *File, err error) { + var fpTTL time.Duration + if len(ttl) > 0 { + fpTTL = ttl[0] + } + // DO NOT search the path here wasting performance! + // Leave following codes just for warning you. + // + // path, err = gfile.Search(path) + // if err != nil { + // return nil, err + // } + pool := pools.GetOrSetFuncLock( + fmt.Sprintf("%s&%d&%d&%d", path, flag, fpTTL, perm), + func() interface{} { + return New(path, flag, perm, fpTTL) + }, + ).(*Pool) + + return pool.File() +} + +// Get returns a file item with given file path, flag and opening permission. +// It retrieves a file item from the file pointer pool after then. +func Get(path string, flag int, perm os.FileMode, ttl ...time.Duration) (file *File) { + var fpTTL time.Duration + if len(ttl) > 0 { + fpTTL = ttl[0] + } + + f, found := pools.Search(fmt.Sprintf("%s&%d&%d&%d", path, flag, fpTTL, perm)) + if !found { + return nil + } + + fp, _ := f.(*Pool).pool.Get() + return fp.(*File) +} + +// Stat returns the FileInfo structure describing file. +func (f *File) Stat() (os.FileInfo, error) { + if f.stat == nil { + return nil, gerror.New("file stat is empty") + } + return f.stat, nil +} + +// Close puts the file pointer back to the file pointer pool. +func (f *File) Close(close ...bool) error { + if len(close) > 0 && close[0] { + f.File.Close() + } + + if f.pid == f.pool.id.Val() { + return f.pool.pool.Put(f) + } + return nil +} diff --git a/vendor/github.com/gogf/gf/v2/os/gfpool/gfpool_pool.go b/vendor/github.com/gogf/gf/v2/os/gfpool/gfpool_pool.go new file mode 100644 index 00000000..6cee707e --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gfpool/gfpool_pool.go @@ -0,0 +1,122 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gfpool + +import ( + "os" + "time" + + "github.com/gogf/gf/v2/container/gpool" + "github.com/gogf/gf/v2/container/gtype" + "github.com/gogf/gf/v2/errors/gerror" + "github.com/gogf/gf/v2/os/gfsnotify" +) + +// New creates and returns a file pointer pool with given file path, flag and opening permission. +// +// Note the expiration logic: +// ttl = 0 : not expired; +// ttl < 0 : immediate expired after use; +// ttl > 0 : timeout expired; +// It is not expired in default. +func New(path string, flag int, perm os.FileMode, ttl ...time.Duration) *Pool { + var fpTTL time.Duration + if len(ttl) > 0 { + fpTTL = ttl[0] + } + p := &Pool{ + id: gtype.NewInt(), + ttl: fpTTL, + init: gtype.NewBool(), + } + p.pool = newFilePool(p, path, flag, perm, fpTTL) + return p +} + +// newFilePool creates and returns a file pointer pool with given file path, flag and opening permission. +func newFilePool(p *Pool, path string, flag int, perm os.FileMode, ttl time.Duration) *gpool.Pool { + pool := gpool.New(ttl, func() (interface{}, error) { + file, err := os.OpenFile(path, flag, perm) + if err != nil { + err = gerror.Wrapf(err, `os.OpenFile failed for file "%s", flag "%d", perm "%s"`, path, flag, perm) + return nil, err + } + return &File{ + File: file, + pid: p.id.Val(), + pool: p, + flag: flag, + perm: perm, + path: path, + }, nil + }, func(i interface{}) { + _ = i.(*File).File.Close() + }) + return pool +} + +// File retrieves file item from the file pointer pool and returns it. It creates one if +// the file pointer pool is empty. +// Note that it should be closed when it will never be used. When it's closed, it is not +// really closed the underlying file pointer but put back to the file pinter pool. +func (p *Pool) File() (*File, error) { + if v, err := p.pool.Get(); err != nil { + return nil, err + } else { + f := v.(*File) + f.stat, err = os.Stat(f.path) + if f.flag&os.O_CREATE > 0 { + if os.IsNotExist(err) { + if f.File, err = os.OpenFile(f.path, f.flag, f.perm); err != nil { + return nil, err + } else { + // Retrieve the state of the new created file. + if f.stat, err = f.File.Stat(); err != nil { + return nil, err + } + } + } + } + if f.flag&os.O_TRUNC > 0 { + if f.stat.Size() > 0 { + if err = f.Truncate(0); err != nil { + return nil, err + } + } + } + if f.flag&os.O_APPEND > 0 { + if _, err = f.Seek(0, 2); err != nil { + return nil, err + } + } else { + if _, err = f.Seek(0, 0); err != nil { + return nil, err + } + } + // It firstly checks using !p.init.Val() for performance purpose. + if !p.init.Val() && p.init.Cas(false, true) { + _, _ = gfsnotify.Add(f.path, func(event *gfsnotify.Event) { + // If the file is removed or renamed, recreates the pool by increasing the pool id. + if event.IsRemove() || event.IsRename() { + // It drops the old pool. + p.id.Add(1) + // Clears the pool items staying in the pool. + p.pool.Clear() + // It uses another adding to drop the file items between the two adding. + // Whenever the pool id changes, the pool will be recreated. + p.id.Add(1) + } + }, false) + } + return f, nil + } +} + +// Close closes current file pointer pool. +func (p *Pool) Close() { + p.pool.Close() +} diff --git a/vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify.go b/vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify.go new file mode 100644 index 00000000..be58e2c3 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify.go @@ -0,0 +1,170 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package gfsnotify provides a platform-independent interface for file system notifications. +package gfsnotify + +import ( + "context" + "sync" + "time" + + "github.com/fsnotify/fsnotify" + + "github.com/gogf/gf/v2/container/glist" + "github.com/gogf/gf/v2/container/gmap" + "github.com/gogf/gf/v2/container/gqueue" + "github.com/gogf/gf/v2/container/gset" + "github.com/gogf/gf/v2/container/gtype" + "github.com/gogf/gf/v2/errors/gcode" + "github.com/gogf/gf/v2/errors/gerror" + "github.com/gogf/gf/v2/internal/intlog" + "github.com/gogf/gf/v2/os/gcache" +) + +// Watcher is the monitor for file changes. +type Watcher struct { + watcher *fsnotify.Watcher // Underlying fsnotify object. + events *gqueue.Queue // Used for internal event management. + cache *gcache.Cache // Used for repeated event filter. + nameSet *gset.StrSet // Used for AddOnce feature. + callbacks *gmap.StrAnyMap // Path(file/folder) to callbacks mapping. + closeChan chan struct{} // Used for watcher closing notification. +} + +// Callback is the callback function for Watcher. +type Callback struct { + Id int // Unique id for callback object. + Func func(event *Event) // Callback function. + Path string // Bound file path (absolute). + name string // Registered name for AddOnce. + elem *glist.Element // Element in the callbacks of watcher. + recursive bool // Is bound to path recursively or not. +} + +// Event is the event produced by underlying fsnotify. +type Event struct { + event fsnotify.Event // Underlying event. + Path string // Absolute file path. + Op Op // File operation. + Watcher *Watcher // Parent watcher. +} + +// Op is the bits union for file operations. +type Op uint32 + +// internalPanic is the custom panic for internal usage. +type internalPanic string + +const ( + CREATE Op = 1 << iota + WRITE + REMOVE + RENAME + CHMOD +) + +const ( + repeatEventFilterDuration = time.Millisecond // Duration for repeated event filter. + callbackExitEventPanicStr internalPanic = "exit" // Custom exit event for internal usage. +) + +var ( + mu sync.Mutex // Mutex for concurrent safety of defaultWatcher. + defaultWatcher *Watcher // Default watcher. + callbackIdMap = gmap.NewIntAnyMap(true) // Id to callback mapping. + callbackIdGenerator = gtype.NewInt() // Atomic id generator for callback. +) + +// New creates and returns a new watcher. +// Note that the watcher number is limited by the file handle setting of the system. +// Eg: fs.inotify.max_user_instances system variable in linux systems. +func New() (*Watcher, error) { + w := &Watcher{ + cache: gcache.New(), + events: gqueue.New(), + nameSet: gset.NewStrSet(true), + closeChan: make(chan struct{}), + callbacks: gmap.NewStrAnyMap(true), + } + if watcher, err := fsnotify.NewWatcher(); err == nil { + w.watcher = watcher + } else { + intlog.Printf(context.TODO(), "New watcher failed: %v", err) + return nil, err + } + w.watchLoop() + w.eventLoop() + return w, nil +} + +// Add monitors `path` using default watcher with callback function `callbackFunc`. +// The optional parameter `recursive` specifies whether monitoring the `path` recursively, which is true in default. +func Add(path string, callbackFunc func(event *Event), recursive ...bool) (callback *Callback, err error) { + w, err := getDefaultWatcher() + if err != nil { + return nil, err + } + return w.Add(path, callbackFunc, recursive...) +} + +// AddOnce monitors `path` using default watcher with callback function `callbackFunc` only once using unique name `name`. +// If AddOnce is called multiple times with the same `name` parameter, `path` is only added to monitor once. It returns error +// if it's called twice with the same `name`. +// +// The optional parameter `recursive` specifies whether monitoring the `path` recursively, which is true in default. +func AddOnce(name, path string, callbackFunc func(event *Event), recursive ...bool) (callback *Callback, err error) { + w, err := getDefaultWatcher() + if err != nil { + return nil, err + } + return w.AddOnce(name, path, callbackFunc, recursive...) +} + +// Remove removes all monitoring callbacks of given `path` from watcher recursively. +func Remove(path string) error { + w, err := getDefaultWatcher() + if err != nil { + return err + } + return w.Remove(path) +} + +// RemoveCallback removes specified callback with given id from watcher. +func RemoveCallback(callbackId int) error { + w, err := getDefaultWatcher() + if err != nil { + return err + } + callback := (*Callback)(nil) + if r := callbackIdMap.Get(callbackId); r != nil { + callback = r.(*Callback) + } + if callback == nil { + return gerror.NewCodef(gcode.CodeInvalidParameter, `callback for id %d not found`, callbackId) + } + w.RemoveCallback(callbackId) + return nil +} + +// Exit is only used in the callback function, which can be used to remove current callback +// of itself from the watcher. +func Exit() { + panic(callbackExitEventPanicStr) +} + +// getDefaultWatcher creates and returns the default watcher. +// This is used for lazy initialization purpose. +func getDefaultWatcher() (*Watcher, error) { + mu.Lock() + defer mu.Unlock() + if defaultWatcher != nil { + return defaultWatcher, nil + } + var err error + defaultWatcher, err = New() + return defaultWatcher, err +} diff --git a/vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify_event.go b/vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify_event.go new file mode 100644 index 00000000..f91638ca --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify_event.go @@ -0,0 +1,37 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// ThIs Source Code Form Is subject to the terms of the MIT License. +// If a copy of the MIT was not dIstributed with thIs file, +// You can obtain one at https://github.com/gogf/gf. + +package gfsnotify + +// String returns current event as string. +func (e *Event) String() string { + return e.event.String() +} + +// IsCreate checks whether current event contains file/folder create event. +func (e *Event) IsCreate() bool { + return e.Op == 1 || e.Op&CREATE == CREATE +} + +// IsWrite checks whether current event contains file/folder write event. +func (e *Event) IsWrite() bool { + return e.Op&WRITE == WRITE +} + +// IsRemove checks whether current event contains file/folder remove event. +func (e *Event) IsRemove() bool { + return e.Op&REMOVE == REMOVE +} + +// IsRename checks whether current event contains file/folder rename event. +func (e *Event) IsRename() bool { + return e.Op&RENAME == RENAME +} + +// IsChmod checks whether current event contains file/folder chmod event. +func (e *Event) IsChmod() bool { + return e.Op&CHMOD == CHMOD +} diff --git a/vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify_filefunc.go b/vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify_filefunc.go new file mode 100644 index 00000000..c8f4c7b0 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify_filefunc.go @@ -0,0 +1,134 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// ThIs Source Code Form Is subject to the terms of the MIT License. +// If a copy of the MIT was not dIstributed with thIs file, +// You can obtain one at https://github.com/gogf/gf. + +package gfsnotify + +import ( + "fmt" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/gogf/gf/v2/errors/gerror" +) + +// fileDir returns all but the last element of path, typically the path's directory. +// After dropping the final element, Dir calls Clean on the path and trailing +// slashes are removed. +// If the path is empty, Dir returns ".". +// If the path consists entirely of separators, Dir returns a single separator. +// The returned path does not end in a separator unless it is the root directory. +func fileDir(path string) string { + return filepath.Dir(path) +} + +// fileRealPath converts the given `path` to its absolute path +// and checks if the file path exists. +// If the file does not exist, return an empty string. +func fileRealPath(path string) string { + p, err := filepath.Abs(path) + if err != nil { + return "" + } + if !fileExists(p) { + return "" + } + return p +} + +// fileExists checks whether given `path` exist. +func fileExists(path string) bool { + if stat, err := os.Stat(path); stat != nil && !os.IsNotExist(err) { + return true + } + return false +} + +// fileIsDir checks whether given `path` a directory. +func fileIsDir(path string) bool { + s, err := os.Stat(path) + if err != nil { + return false + } + return s.IsDir() +} + +// fileAllDirs returns all sub-folders including itself of given `path` recursively. +func fileAllDirs(path string) (list []string) { + list = []string{path} + file, err := os.Open(path) + if err != nil { + return list + } + defer file.Close() + names, err := file.Readdirnames(-1) + if err != nil { + return list + } + for _, name := range names { + tempPath := fmt.Sprintf("%s%s%s", path, string(filepath.Separator), name) + if fileIsDir(tempPath) { + if array := fileAllDirs(tempPath); len(array) > 0 { + list = append(list, array...) + } + } + } + return +} + +// fileScanDir returns all sub-files with absolute paths of given `path`, +// It scans directory recursively if given parameter `recursive` is true. +func fileScanDir(path string, pattern string, recursive ...bool) ([]string, error) { + list, err := doFileScanDir(path, pattern, recursive...) + if err != nil { + return nil, err + } + if len(list) > 0 { + sort.Strings(list) + } + return list, nil +} + +// doFileScanDir is an internal method which scans directory +// and returns the absolute path list of files that are not sorted. +// +// The pattern parameter `pattern` supports multiple file name patterns, +// using the ',' symbol to separate multiple patterns. +// +// It scans directory recursively if given parameter `recursive` is true. +func doFileScanDir(path string, pattern string, recursive ...bool) ([]string, error) { + var ( + list []string + file, err = os.Open(path) + ) + if err != nil { + err = gerror.Wrapf(err, `os.Open failed for path "%s"`, path) + return nil, err + } + defer file.Close() + names, err := file.Readdirnames(-1) + if err != nil { + err = gerror.Wrapf(err, `read directory files failed for path "%s"`, path) + return nil, err + } + filePath := "" + for _, name := range names { + filePath = fmt.Sprintf("%s%s%s", path, string(filepath.Separator), name) + if fileIsDir(filePath) && len(recursive) > 0 && recursive[0] { + array, _ := doFileScanDir(filePath, pattern, true) + if len(array) > 0 { + list = append(list, array...) + } + } + for _, p := range strings.Split(pattern, ",") { + if match, _ := filepath.Match(strings.TrimSpace(p), name); match { + list = append(list, filePath) + } + } + } + return list, nil +} diff --git a/vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify_watcher.go b/vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify_watcher.go new file mode 100644 index 00000000..80da7638 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify_watcher.go @@ -0,0 +1,198 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gfsnotify + +import ( + "context" + + "github.com/gogf/gf/v2/container/glist" + "github.com/gogf/gf/v2/errors/gcode" + "github.com/gogf/gf/v2/errors/gerror" + "github.com/gogf/gf/v2/internal/intlog" +) + +// Add monitors `path` with callback function `callbackFunc` to the watcher. +// The optional parameter `recursive` specifies whether monitoring the `path` recursively, +// which is true in default. +func (w *Watcher) Add(path string, callbackFunc func(event *Event), recursive ...bool) (callback *Callback, err error) { + return w.AddOnce("", path, callbackFunc, recursive...) +} + +// AddOnce monitors `path` with callback function `callbackFunc` only once using unique name +// `name` to the watcher. If AddOnce is called multiple times with the same `name` parameter, +// `path` is only added to monitor once. +// +// It returns error if it's called twice with the same `name`. +// +// The optional parameter `recursive` specifies whether monitoring the `path` recursively, +// which is true in default. +func (w *Watcher) AddOnce(name, path string, callbackFunc func(event *Event), recursive ...bool) (callback *Callback, err error) { + w.nameSet.AddIfNotExistFuncLock(name, func() bool { + // Firstly add the path to watcher. + callback, err = w.addWithCallbackFunc(name, path, callbackFunc, recursive...) + if err != nil { + return false + } + // If it's recursive adding, it then adds all sub-folders to the monitor. + // NOTE: + // 1. It only recursively adds **folders** to the monitor, NOT files, + // because if the folders are monitored and their sub-files are also monitored. + // 2. It bounds no callbacks to the folders, because it will search the callbacks + // from its parent recursively if any event produced. + if fileIsDir(path) && (len(recursive) == 0 || recursive[0]) { + for _, subPath := range fileAllDirs(path) { + if fileIsDir(subPath) { + if err = w.watcher.Add(subPath); err != nil { + err = gerror.Wrapf(err, `add watch failed for path "%s"`, subPath) + } else { + intlog.Printf(context.TODO(), "watcher adds monitor for: %s", subPath) + } + } + } + } + if name == "" { + return false + } + return true + }) + return +} + +// addWithCallbackFunc adds the path to underlying monitor, creates and returns a callback object. +// Very note that if it calls multiple times with the same `path`, the latest one will overwrite the previous one. +func (w *Watcher) addWithCallbackFunc(name, path string, callbackFunc func(event *Event), recursive ...bool) (callback *Callback, err error) { + // Check and convert the given path to absolute path. + if t := fileRealPath(path); t == "" { + return nil, gerror.NewCodef(gcode.CodeInvalidParameter, `"%s" does not exist`, path) + } else { + path = t + } + // Create callback object. + callback = &Callback{ + Id: callbackIdGenerator.Add(1), + Func: callbackFunc, + Path: path, + name: name, + recursive: true, + } + if len(recursive) > 0 { + callback.recursive = recursive[0] + } + // Register the callback to watcher. + w.callbacks.LockFunc(func(m map[string]interface{}) { + list := (*glist.List)(nil) + if v, ok := m[path]; !ok { + list = glist.New(true) + m[path] = list + } else { + list = v.(*glist.List) + } + callback.elem = list.PushBack(callback) + }) + // Add the path to underlying monitor. + if err = w.watcher.Add(path); err != nil { + err = gerror.Wrapf(err, `add watch failed for path "%s"`, path) + } else { + intlog.Printf(context.TODO(), "watcher adds monitor for: %s", path) + } + // Add the callback to global callback map. + callbackIdMap.Set(callback.Id, callback) + return +} + +// Close closes the watcher. +func (w *Watcher) Close() { + w.events.Close() + if err := w.watcher.Close(); err != nil { + intlog.Errorf(context.TODO(), `%+v`, err) + } + close(w.closeChan) +} + +// Remove removes monitor and all callbacks associated with the `path` recursively. +func (w *Watcher) Remove(path string) error { + // Firstly remove the callbacks of the path. + if value := w.callbacks.Remove(path); value != nil { + list := value.(*glist.List) + for { + if item := list.PopFront(); item != nil { + callbackIdMap.Remove(item.(*Callback).Id) + } else { + break + } + } + } + // Secondly remove monitor of all sub-files which have no callbacks. + if subPaths, err := fileScanDir(path, "*", true); err == nil && len(subPaths) > 0 { + for _, subPath := range subPaths { + if w.checkPathCanBeRemoved(subPath) { + if internalErr := w.watcher.Remove(subPath); internalErr != nil { + intlog.Errorf(context.TODO(), `%+v`, internalErr) + } + } + } + } + // Lastly remove the monitor of the path from underlying monitor. + err := w.watcher.Remove(path) + if err != nil { + err = gerror.Wrapf(err, `remove watch failed for path "%s"`, path) + } + return err +} + +// checkPathCanBeRemoved checks whether the given path have no callbacks bound. +func (w *Watcher) checkPathCanBeRemoved(path string) bool { + // Firstly check the callbacks in the watcher directly. + if v := w.callbacks.Get(path); v != nil { + return false + } + // Secondly check its parent whether has callbacks. + dirPath := fileDir(path) + if v := w.callbacks.Get(dirPath); v != nil { + for _, c := range v.(*glist.List).FrontAll() { + if c.(*Callback).recursive { + return false + } + } + return false + } + // Recursively check its parent. + parentDirPath := "" + for { + parentDirPath = fileDir(dirPath) + if parentDirPath == dirPath { + break + } + if v := w.callbacks.Get(parentDirPath); v != nil { + for _, c := range v.(*glist.List).FrontAll() { + if c.(*Callback).recursive { + return false + } + } + return false + } + dirPath = parentDirPath + } + return true +} + +// RemoveCallback removes callback with given callback id from watcher. +func (w *Watcher) RemoveCallback(callbackId int) { + callback := (*Callback)(nil) + if r := callbackIdMap.Get(callbackId); r != nil { + callback = r.(*Callback) + } + if callback != nil { + if r := w.callbacks.Get(callback.Path); r != nil { + r.(*glist.List).Remove(callback.elem) + } + callbackIdMap.Remove(callbackId) + if callback.name != "" { + w.nameSet.Remove(callback.name) + } + } +} diff --git a/vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify_watcher_loop.go b/vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify_watcher_loop.go new file mode 100644 index 00000000..59f558fd --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gfsnotify/gfsnotify_watcher_loop.go @@ -0,0 +1,181 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gfsnotify + +import ( + "context" + + "github.com/gogf/gf/v2/container/glist" + "github.com/gogf/gf/v2/internal/intlog" +) + +// watchLoop starts the loop for event listening from underlying inotify monitor. +func (w *Watcher) watchLoop() { + go func() { + for { + select { + // Close event. + case <-w.closeChan: + return + + // Event listening. + case ev := <-w.watcher.Events: + // Filter the repeated event in custom duration. + _, err := w.cache.SetIfNotExist( + context.Background(), + ev.String(), + func(ctx context.Context) (value interface{}, err error) { + w.events.Push(&Event{ + event: ev, + Path: ev.Name, + Op: Op(ev.Op), + Watcher: w, + }) + return struct{}{}, nil + }, repeatEventFilterDuration, + ) + if err != nil { + intlog.Errorf(context.TODO(), `%+v`, err) + } + + case err := <-w.watcher.Errors: + intlog.Errorf(context.TODO(), `%+v`, err) + } + } + }() +} + +// eventLoop is the core event handler. +func (w *Watcher) eventLoop() { + go func() { + for { + if v := w.events.Pop(); v != nil { + event := v.(*Event) + // If there's no any callback of this path, it removes it from monitor. + callbacks := w.getCallbacks(event.Path) + if len(callbacks) == 0 { + _ = w.watcher.Remove(event.Path) + continue + } + switch { + case event.IsRemove(): + // It should check again the existence of the path. + // It adds it back to the monitor if it still exists. + if fileExists(event.Path) { + // It adds the path back to monitor. + // We need no worry about the repeat adding. + if err := w.watcher.Add(event.Path); err != nil { + intlog.Errorf(context.TODO(), `%+v`, err) + } else { + intlog.Printf(context.TODO(), "fake remove event, watcher re-adds monitor for: %s", event.Path) + } + // Change the event to RENAME, which means it renames itself to its origin name. + event.Op = RENAME + } + + case event.IsRename(): + // It should check again the existence of the path. + // It adds it back to the monitor if it still exists. + // Especially Some editors might do RENAME and then CHMOD when it's editing file. + if fileExists(event.Path) { + // It might lost the monitoring for the path, so we add the path back to monitor. + // We need no worry about the repeat adding. + if err := w.watcher.Add(event.Path); err != nil { + intlog.Errorf(context.TODO(), `%+v`, err) + } else { + intlog.Printf(context.TODO(), "fake rename event, watcher re-adds monitor for: %s", event.Path) + } + // Change the event to CHMOD. + event.Op = CHMOD + } + + case event.IsCreate(): + // ========================================= + // Note that it here just adds the path to monitor without any callback registering, + // because its parent already has the callbacks. + // ========================================= + if fileIsDir(event.Path) { + // If it's a folder, it then does adding recursively to monitor. + for _, subPath := range fileAllDirs(event.Path) { + if fileIsDir(subPath) { + if err := w.watcher.Add(subPath); err != nil { + intlog.Errorf(context.TODO(), `%+v`, err) + } else { + intlog.Printf(context.TODO(), "folder creation event, watcher adds monitor for: %s", subPath) + } + } + } + } else { + // If it's a file, it directly adds it to monitor. + if err := w.watcher.Add(event.Path); err != nil { + intlog.Errorf(context.TODO(), `%+v`, err) + } else { + intlog.Printf(context.TODO(), "file creation event, watcher adds monitor for: %s", event.Path) + } + } + } + // Calling the callbacks in order. + for _, callback := range callbacks { + go func(callback *Callback) { + defer func() { + if err := recover(); err != nil { + switch err { + case callbackExitEventPanicStr: + w.RemoveCallback(callback.Id) + default: + panic(err) + } + } + }() + callback.Func(event) + }(callback) + } + } else { + break + } + } + }() +} + +// getCallbacks searches and returns all callbacks with given `path`. +// It also searches its parents for callbacks if they're recursive. +func (w *Watcher) getCallbacks(path string) (callbacks []*Callback) { + // Firstly add the callbacks of itself. + if v := w.callbacks.Get(path); v != nil { + for _, v := range v.(*glist.List).FrontAll() { + callback := v.(*Callback) + callbacks = append(callbacks, callback) + } + } + // Secondly searches its direct parent for callbacks. + // It is special handling here, which is the different between `recursive` and `not recursive` logic + // for direct parent folder of `path` that events are from. + dirPath := fileDir(path) + if v := w.callbacks.Get(dirPath); v != nil { + for _, v := range v.(*glist.List).FrontAll() { + callback := v.(*Callback) + callbacks = append(callbacks, callback) + } + } + // Lastly searches all the parents of directory of `path` recursively for callbacks. + for { + parentDirPath := fileDir(dirPath) + if parentDirPath == dirPath { + break + } + if v := w.callbacks.Get(parentDirPath); v != nil { + for _, v := range v.(*glist.List).FrontAll() { + callback := v.(*Callback) + if callback.recursive { + callbacks = append(callbacks, callback) + } + } + } + dirPath = parentDirPath + } + return +} diff --git a/vendor/github.com/gogf/gf/v2/os/glog/glog.go b/vendor/github.com/gogf/gf/v2/os/glog/glog.go new file mode 100644 index 00000000..9a5a0dd6 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/glog/glog.go @@ -0,0 +1,75 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package glog implements powerful and easy-to-use leveled logging functionality. +package glog + +import ( + "context" + + "github.com/gogf/gf/v2/internal/command" + "github.com/gogf/gf/v2/os/grpool" + "github.com/gogf/gf/v2/util/gconv" +) + +// ILogger is the API interface for logger. +type ILogger interface { + Print(ctx context.Context, v ...interface{}) + Printf(ctx context.Context, format string, v ...interface{}) + Debug(ctx context.Context, v ...interface{}) + Debugf(ctx context.Context, format string, v ...interface{}) + Info(ctx context.Context, v ...interface{}) + Infof(ctx context.Context, format string, v ...interface{}) + Notice(ctx context.Context, v ...interface{}) + Noticef(ctx context.Context, format string, v ...interface{}) + Warning(ctx context.Context, v ...interface{}) + Warningf(ctx context.Context, format string, v ...interface{}) + Error(ctx context.Context, v ...interface{}) + Errorf(ctx context.Context, format string, v ...interface{}) + Critical(ctx context.Context, v ...interface{}) + Criticalf(ctx context.Context, format string, v ...interface{}) + Panic(ctx context.Context, v ...interface{}) + Panicf(ctx context.Context, format string, v ...interface{}) + Fatal(ctx context.Context, v ...interface{}) + Fatalf(ctx context.Context, format string, v ...interface{}) +} + +const ( + commandEnvKeyForDebug = "gf.glog.debug" +) + +var ( + // Ensure Logger implements ILogger. + _ ILogger = &Logger{} + + // Default logger object, for package method usage. + defaultLogger = New() + + // Goroutine pool for async logging output. + // It uses only one asynchronous worker to ensure log sequence. + asyncPool = grpool.New(1) + + // defaultDebug enables debug level or not in default, + // which can be configured using command option or system environment. + defaultDebug = true +) + +func init() { + defaultDebug = gconv.Bool(command.GetOptWithEnv(commandEnvKeyForDebug, "true")) + SetDebug(defaultDebug) +} + +// DefaultLogger returns the default logger. +func DefaultLogger() *Logger { + return defaultLogger +} + +// SetDefaultLogger sets the default logger for package glog. +// Note that there might be concurrent safety issue if calls this function +// in different goroutines. +func SetDefaultLogger(l *Logger) { + defaultLogger = l +} diff --git a/vendor/github.com/gogf/gf/v2/os/glog/glog_api.go b/vendor/github.com/gogf/gf/v2/os/glog/glog_api.go new file mode 100644 index 00000000..3529973e --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/glog/glog_api.go @@ -0,0 +1,109 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package glog + +import "context" + +// Print prints `v` with newline using fmt.Sprintln. +// The parameter `v` can be multiple variables. +func Print(ctx context.Context, v ...interface{}) { + defaultLogger.Print(ctx, v...) +} + +// Printf prints `v` with format `format` using fmt.Sprintf. +// The parameter `v` can be multiple variables. +func Printf(ctx context.Context, format string, v ...interface{}) { + defaultLogger.Printf(ctx, format, v...) +} + +// Fatal prints the logging content with [FATA] header and newline, then exit the current process. +func Fatal(ctx context.Context, v ...interface{}) { + defaultLogger.Fatal(ctx, v...) +} + +// Fatalf prints the logging content with [FATA] header, custom format and newline, then exit the current process. +func Fatalf(ctx context.Context, format string, v ...interface{}) { + defaultLogger.Fatalf(ctx, format, v...) +} + +// Panic prints the logging content with [PANI] header and newline, then panics. +func Panic(ctx context.Context, v ...interface{}) { + defaultLogger.Panic(ctx, v...) +} + +// Panicf prints the logging content with [PANI] header, custom format and newline, then panics. +func Panicf(ctx context.Context, format string, v ...interface{}) { + defaultLogger.Panicf(ctx, format, v...) +} + +// Info prints the logging content with [INFO] header and newline. +func Info(ctx context.Context, v ...interface{}) { + defaultLogger.Info(ctx, v...) +} + +// Infof prints the logging content with [INFO] header, custom format and newline. +func Infof(ctx context.Context, format string, v ...interface{}) { + defaultLogger.Infof(ctx, format, v...) +} + +// Debug prints the logging content with [DEBU] header and newline. +func Debug(ctx context.Context, v ...interface{}) { + defaultLogger.Debug(ctx, v...) +} + +// Debugf prints the logging content with [DEBU] header, custom format and newline. +func Debugf(ctx context.Context, format string, v ...interface{}) { + defaultLogger.Debugf(ctx, format, v...) +} + +// Notice prints the logging content with [NOTI] header and newline. +// It also prints caller stack info if stack feature is enabled. +func Notice(ctx context.Context, v ...interface{}) { + defaultLogger.Notice(ctx, v...) +} + +// Noticef prints the logging content with [NOTI] header, custom format and newline. +// It also prints caller stack info if stack feature is enabled. +func Noticef(ctx context.Context, format string, v ...interface{}) { + defaultLogger.Noticef(ctx, format, v...) +} + +// Warning prints the logging content with [WARN] header and newline. +// It also prints caller stack info if stack feature is enabled. +func Warning(ctx context.Context, v ...interface{}) { + defaultLogger.Warning(ctx, v...) +} + +// Warningf prints the logging content with [WARN] header, custom format and newline. +// It also prints caller stack info if stack feature is enabled. +func Warningf(ctx context.Context, format string, v ...interface{}) { + defaultLogger.Warningf(ctx, format, v...) +} + +// Error prints the logging content with [ERRO] header and newline. +// It also prints caller stack info if stack feature is enabled. +func Error(ctx context.Context, v ...interface{}) { + defaultLogger.Error(ctx, v...) +} + +// Errorf prints the logging content with [ERRO] header, custom format and newline. +// It also prints caller stack info if stack feature is enabled. +func Errorf(ctx context.Context, format string, v ...interface{}) { + defaultLogger.Errorf(ctx, format, v...) +} + +// Critical prints the logging content with [CRIT] header and newline. +// It also prints caller stack info if stack feature is enabled. +func Critical(ctx context.Context, v ...interface{}) { + defaultLogger.Critical(ctx, v...) +} + +// Criticalf prints the logging content with [CRIT] header, custom format and newline. +// It also prints caller stack info if stack feature is enabled. +func Criticalf(ctx context.Context, format string, v ...interface{}) { + defaultLogger.Criticalf(ctx, format, v...) +} diff --git a/vendor/github.com/gogf/gf/v2/os/glog/glog_chaining.go b/vendor/github.com/gogf/gf/v2/os/glog/glog_chaining.go new file mode 100644 index 00000000..391def3e --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/glog/glog_chaining.go @@ -0,0 +1,98 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package glog + +import ( + "io" +) + +// Expose returns the default logger of package glog. +func Expose() *Logger { + return defaultLogger +} + +// To is a chaining function, +// which redirects current logging content output to the sepecified `writer`. +func To(writer io.Writer) *Logger { + return defaultLogger.To(writer) +} + +// Path is a chaining function, +// which sets the directory path to `path` for current logging content output. +func Path(path string) *Logger { + return defaultLogger.Path(path) +} + +// Cat is a chaining function, +// which sets the category to `category` for current logging content output. +func Cat(category string) *Logger { + return defaultLogger.Cat(category) +} + +// File is a chaining function, +// which sets file name `pattern` for the current logging content output. +func File(pattern string) *Logger { + return defaultLogger.File(pattern) +} + +// Level is a chaining function, +// which sets logging level for the current logging content output. +func Level(level int) *Logger { + return defaultLogger.Level(level) +} + +// LevelStr is a chaining function, +// which sets logging level for the current logging content output using level string. +func LevelStr(levelStr string) *Logger { + return defaultLogger.LevelStr(levelStr) +} + +// Skip is a chaining function, +// which sets stack skip for the current logging content output. +// It also affects the caller file path checks when line number printing enabled. +func Skip(skip int) *Logger { + return defaultLogger.Skip(skip) +} + +// Stack is a chaining function, +// which sets stack options for the current logging content output . +func Stack(enabled bool, skip ...int) *Logger { + return defaultLogger.Stack(enabled, skip...) +} + +// StackWithFilter is a chaining function, +// which sets stack filter for the current logging content output . +func StackWithFilter(filter string) *Logger { + return defaultLogger.StackWithFilter(filter) +} + +// Stdout is a chaining function, +// which enables/disables stdout for the current logging content output. +// It's enabled in default. +func Stdout(enabled ...bool) *Logger { + return defaultLogger.Stdout(enabled...) +} + +// Header is a chaining function, +// which enables/disables log header for the current logging content output. +// It's enabled in default. +func Header(enabled ...bool) *Logger { + return defaultLogger.Header(enabled...) +} + +// Line is a chaining function, +// which enables/disables printing its caller file along with its line number. +// The parameter `long` specified whether print the long absolute file path, eg: /a/b/c/d.go:23. +func Line(long ...bool) *Logger { + return defaultLogger.Line(long...) +} + +// Async is a chaining function, +// which enables/disables async logging output feature. +func Async(enabled ...bool) *Logger { + return defaultLogger.Async(enabled...) +} diff --git a/vendor/github.com/gogf/gf/v2/os/glog/glog_config.go b/vendor/github.com/gogf/gf/v2/os/glog/glog_config.go new file mode 100644 index 00000000..615caa68 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/glog/glog_config.go @@ -0,0 +1,161 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package glog + +import ( + "context" + "io" +) + +// SetConfig set configurations for the defaultLogger. +func SetConfig(config Config) error { + return defaultLogger.SetConfig(config) +} + +// SetConfigWithMap set configurations with map for the defaultLogger. +func SetConfigWithMap(m map[string]interface{}) error { + return defaultLogger.SetConfigWithMap(m) +} + +// SetPath sets the directory path for file logging. +func SetPath(path string) error { + return defaultLogger.SetPath(path) +} + +// GetPath returns the logging directory path for file logging. +// It returns empty string if no directory path set. +func GetPath() string { + return defaultLogger.GetPath() +} + +// SetFile sets the file name `pattern` for file logging. +// Datetime pattern can be used in `pattern`, eg: access-{Ymd}.log. +// The default file name pattern is: Y-m-d.log, eg: 2018-01-01.log +func SetFile(pattern string) { + defaultLogger.SetFile(pattern) +} + +// SetLevel sets the default logging level. +func SetLevel(level int) { + defaultLogger.SetLevel(level) +} + +// GetLevel returns the default logging level value. +func GetLevel() int { + return defaultLogger.GetLevel() +} + +// SetWriter sets the customized logging `writer` for logging. +// The `writer` object should implements the io.Writer interface. +// Developer can use customized logging `writer` to redirect logging output to another service, +// eg: kafka, mysql, mongodb, etc. +func SetWriter(writer io.Writer) { + defaultLogger.SetWriter(writer) +} + +// GetWriter returns the customized writer object, which implements the io.Writer interface. +// It returns nil if no customized writer set. +func GetWriter() io.Writer { + return defaultLogger.GetWriter() +} + +// SetDebug enables/disables the debug level for default defaultLogger. +// The debug level is enabled in default. +func SetDebug(debug bool) { + defaultLogger.SetDebug(debug) +} + +// SetAsync enables/disables async logging output feature for default defaultLogger. +func SetAsync(enabled bool) { + defaultLogger.SetAsync(enabled) +} + +// SetStdoutPrint sets whether ouptput the logging contents to stdout, which is true in default. +func SetStdoutPrint(enabled bool) { + defaultLogger.SetStdoutPrint(enabled) +} + +// SetHeaderPrint sets whether output header of the logging contents, which is true in default. +func SetHeaderPrint(enabled bool) { + defaultLogger.SetHeaderPrint(enabled) +} + +// SetPrefix sets prefix string for every logging content. +// Prefix is part of header, which means if header output is shut, no prefix will be output. +func SetPrefix(prefix string) { + defaultLogger.SetPrefix(prefix) +} + +// SetFlags sets extra flags for logging output features. +func SetFlags(flags int) { + defaultLogger.SetFlags(flags) +} + +// GetFlags returns the flags of defaultLogger. +func GetFlags() int { + return defaultLogger.GetFlags() +} + +// SetCtxKeys sets the context keys for defaultLogger. The keys is used for retrieving values +// from context and printing them to logging content. +// +// Note that multiple calls of this function will overwrite the previous set context keys. +func SetCtxKeys(keys ...interface{}) { + defaultLogger.SetCtxKeys(keys...) +} + +// GetCtxKeys retrieves and returns the context keys for logging. +func GetCtxKeys() []interface{} { + return defaultLogger.GetCtxKeys() +} + +// PrintStack prints the caller stack, +// the optional parameter `skip` specify the skipped stack offset from the end point. +func PrintStack(ctx context.Context, skip ...int) { + defaultLogger.PrintStack(ctx, skip...) +} + +// GetStack returns the caller stack content, +// the optional parameter `skip` specify the skipped stack offset from the end point. +func GetStack(skip ...int) string { + return defaultLogger.GetStack(skip...) +} + +// SetStack enables/disables the stack feature in failure logging outputs. +func SetStack(enabled bool) { + defaultLogger.SetStack(enabled) +} + +// SetLevelStr sets the logging level by level string. +func SetLevelStr(levelStr string) error { + return defaultLogger.SetLevelStr(levelStr) +} + +// SetLevelPrefix sets the prefix string for specified level. +func SetLevelPrefix(level int, prefix string) { + defaultLogger.SetLevelPrefix(level, prefix) +} + +// SetLevelPrefixes sets the level to prefix string mapping for the defaultLogger. +func SetLevelPrefixes(prefixes map[int]string) { + defaultLogger.SetLevelPrefixes(prefixes) +} + +// GetLevelPrefix returns the prefix string for specified level. +func GetLevelPrefix(level int) string { + return defaultLogger.GetLevelPrefix(level) +} + +// SetHandlers sets the logging handlers for default defaultLogger. +func SetHandlers(handlers ...Handler) { + defaultLogger.SetHandlers(handlers...) +} + +// SetWriterColorEnable sets the file logging with color +func SetWriterColorEnable(enabled bool) { + defaultLogger.SetWriterColorEnable(enabled) +} diff --git a/vendor/github.com/gogf/gf/v2/os/glog/glog_instance.go b/vendor/github.com/gogf/gf/v2/os/glog/glog_instance.go new file mode 100644 index 00000000..3b732606 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/glog/glog_instance.go @@ -0,0 +1,31 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package glog + +import "github.com/gogf/gf/v2/container/gmap" + +const ( + // DefaultName is the default group name for instance usage. + DefaultName = "default" +) + +var ( + // Instances map. + instances = gmap.NewStrAnyMap(true) +) + +// Instance returns an instance of Logger with default settings. +// The parameter `name` is the name for the instance. +func Instance(name ...string) *Logger { + key := DefaultName + if len(name) > 0 && name[0] != "" { + key = name[0] + } + return instances.GetOrSetFuncLock(key, func() interface{} { + return New() + }).(*Logger) +} diff --git a/vendor/github.com/gogf/gf/v2/os/glog/glog_logger.go b/vendor/github.com/gogf/gf/v2/os/glog/glog_logger.go new file mode 100644 index 00000000..1486c7e0 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/glog/glog_logger.go @@ -0,0 +1,411 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package glog + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "runtime" + "strings" + "time" + + "github.com/fatih/color" + "go.opentelemetry.io/otel/trace" + + "github.com/gogf/gf/v2/debug/gdebug" + "github.com/gogf/gf/v2/internal/consts" + "github.com/gogf/gf/v2/internal/intlog" + "github.com/gogf/gf/v2/os/gctx" + "github.com/gogf/gf/v2/os/gfile" + "github.com/gogf/gf/v2/os/gfpool" + "github.com/gogf/gf/v2/os/gmlock" + "github.com/gogf/gf/v2/os/gtime" + "github.com/gogf/gf/v2/os/gtimer" + "github.com/gogf/gf/v2/text/gregex" + "github.com/gogf/gf/v2/util/gconv" +) + +// Logger is the struct for logging management. +type Logger struct { + parent *Logger // Parent logger, if it is not empty, it means the logger is used in chaining function. + config Config // Logger configuration. +} + +const ( + defaultFileFormat = `{Y-m-d}.log` + defaultFileFlags = os.O_CREATE | os.O_WRONLY | os.O_APPEND + defaultFilePerm = os.FileMode(0666) + defaultFileExpire = time.Minute + pathFilterKey = "/os/glog/glog" + memoryLockPrefixForPrintingToFile = "glog.printToFile:" +) + +const ( + F_ASYNC = 1 << iota // Print logging content asynchronously。 + F_FILE_LONG // Print full file name and line number: /a/b/c/d.go:23. + F_FILE_SHORT // Print final file name element and line number: d.go:23. overrides F_FILE_LONG. + F_TIME_DATE // Print the date in the local time zone: 2009-01-23. + F_TIME_TIME // Print the time in the local time zone: 01:23:23. + F_TIME_MILLI // Print the time with milliseconds in the local time zone: 01:23:23.675. + F_CALLER_FN // Print Caller function name and package: main.main + F_TIME_STD = F_TIME_DATE | F_TIME_MILLI +) + +// New creates and returns a custom logger. +func New() *Logger { + return &Logger{ + config: DefaultConfig(), + } +} + +// NewWithWriter creates and returns a custom logger with io.Writer. +func NewWithWriter(writer io.Writer) *Logger { + l := New() + l.SetWriter(writer) + return l +} + +// Clone returns a new logger, which a `shallow copy` of the current logger. +// Note that the attribute `config` of the cloned one is the shallow copy of current one. +func (l *Logger) Clone() *Logger { + return &Logger{ + config: l.config, + parent: l, + } +} + +// getFilePath returns the logging file path. +// The logging file name must have extension name of "log". +func (l *Logger) getFilePath(now time.Time) string { + // Content containing "{}" in the file name is formatted using gtime. + file, _ := gregex.ReplaceStringFunc(`{.+?}`, l.config.File, func(s string) string { + return gtime.New(now).Format(strings.Trim(s, "{}")) + }) + file = gfile.Join(l.config.Path, file) + return file +} + +// print prints `s` to defined writer, logging file or passed `std`. +func (l *Logger) print(ctx context.Context, level int, stack string, values ...interface{}) { + // Lazy initialize for rotation feature. + // It uses atomic reading operation to enhance the performance checking. + // It here uses CAP for performance and concurrent safety. + // It just initializes once for each logger. + if l.config.RotateSize > 0 || l.config.RotateExpire > 0 { + if !l.config.rotatedHandlerInitialized.Val() && l.config.rotatedHandlerInitialized.Cas(false, true) { + gtimer.AddOnce(context.Background(), l.config.RotateCheckInterval, l.rotateChecksTimely) + intlog.Printf(ctx, "logger rotation initialized: every %s", l.config.RotateCheckInterval.String()) + } + } + + var ( + now = time.Now() + input = &HandlerInput{ + internalHandlerInfo: internalHandlerInfo{ + index: -1, + }, + Logger: l, + Buffer: bytes.NewBuffer(nil), + Time: now, + Color: defaultLevelColor[level], + Level: level, + Stack: stack, + } + ) + + // Logging handlers. + if len(l.config.Handlers) > 0 { + input.handlers = append(input.handlers, l.config.Handlers...) + } else if defaultHandler != nil { + input.handlers = []Handler{defaultHandler} + } + input.handlers = append(input.handlers, defaultPrintHandler) + + // Time. + timeFormat := "" + if l.config.Flags&F_TIME_DATE > 0 { + timeFormat += "2006-01-02" + } + if l.config.Flags&F_TIME_TIME > 0 { + if timeFormat != "" { + timeFormat += " " + } + timeFormat += "15:04:05" + } + if l.config.Flags&F_TIME_MILLI > 0 { + if timeFormat != "" { + timeFormat += " " + } + timeFormat += "15:04:05.000" + } + if len(timeFormat) > 0 { + input.TimeFormat = now.Format(timeFormat) + } + + // Level string. + input.LevelFormat = l.GetLevelPrefix(level) + + // Caller path and Fn name. + if l.config.Flags&(F_FILE_LONG|F_FILE_SHORT|F_CALLER_FN) > 0 { + callerFnName, path, line := gdebug.CallerWithFilter( + []string{consts.StackFilterKeyForGoFrame}, + l.config.StSkip, + ) + if l.config.Flags&F_CALLER_FN > 0 { + if len(callerFnName) > 2 { + input.CallerFunc = fmt.Sprintf(`[%s]`, callerFnName) + } + } + if line >= 0 && len(path) > 1 { + if l.config.Flags&F_FILE_LONG > 0 { + input.CallerPath = fmt.Sprintf(`%s:%d:`, path, line) + } + if l.config.Flags&F_FILE_SHORT > 0 { + input.CallerPath = fmt.Sprintf(`%s:%d:`, gfile.Basename(path), line) + } + } + } + // Prefix. + if len(l.config.Prefix) > 0 { + input.Prefix = l.config.Prefix + } + + // Convert value to string. + if ctx != nil { + // Tracing values. + spanCtx := trace.SpanContextFromContext(ctx) + if traceId := spanCtx.TraceID(); traceId.IsValid() { + input.TraceId = traceId.String() + } + // Context values. + if len(l.config.CtxKeys) > 0 { + for _, ctxKey := range l.config.CtxKeys { + var ctxValue interface{} + if ctxValue = ctx.Value(ctxKey); ctxValue == nil { + ctxValue = ctx.Value(gctx.StrKey(gconv.String(ctxKey))) + } + if ctxValue != nil { + if input.CtxStr != "" { + input.CtxStr += ", " + } + input.CtxStr += gconv.String(ctxValue) + } + } + } + } + var tempStr string + for _, v := range values { + tempStr = gconv.String(v) + if len(input.Content) > 0 { + if input.Content[len(input.Content)-1] == '\n' { + // Remove one blank line(\n\n). + if len(tempStr) > 0 && tempStr[0] == '\n' { + input.Content += tempStr[1:] + } else { + input.Content += tempStr + } + } else { + input.Content += " " + tempStr + } + } else { + input.Content = tempStr + } + } + if l.config.Flags&F_ASYNC > 0 { + input.IsAsync = true + err := asyncPool.Add(ctx, func(ctx context.Context) { + input.Next(ctx) + }) + if err != nil { + intlog.Errorf(ctx, `%+v`, err) + } + } else { + input.Next(ctx) + } +} + +// doDefaultPrint outputs the logging content according configuration. +func (l *Logger) doDefaultPrint(ctx context.Context, input *HandlerInput) *bytes.Buffer { + var buffer *bytes.Buffer + if l.config.Writer == nil { + // Allow output to stdout? + if l.config.StdoutPrint { + if buf := l.printToStdout(ctx, input); buf != nil { + buffer = buf + } + } + + // Output content to disk file. + if l.config.Path != "" { + if buf := l.printToFile(ctx, input.Time, input); buf != nil { + buffer = buf + } + } + } else { + // Output to custom writer. + if buf := l.printToWriter(ctx, input); buf != nil { + buffer = buf + } + } + return buffer +} + +// printToWriter writes buffer to writer. +func (l *Logger) printToWriter(ctx context.Context, input *HandlerInput) *bytes.Buffer { + if l.config.Writer != nil { + var ( + buffer = input.getRealBuffer(l.config.WriterColorEnable) + ) + if _, err := l.config.Writer.Write(buffer.Bytes()); err != nil { + intlog.Errorf(ctx, `%+v`, err) + } + return buffer + } + return nil +} + +// printToStdout outputs logging content to stdout. +func (l *Logger) printToStdout(ctx context.Context, input *HandlerInput) *bytes.Buffer { + if l.config.StdoutPrint { + var ( + err error + buffer = input.getRealBuffer(!l.config.StdoutColorDisabled) + ) + // This will lose color in Windows os system. + // if _, err := os.Stdout.Write(input.getRealBuffer(true).Bytes()); err != nil { + + // This will print color in Windows os system. + if _, err = fmt.Fprint(color.Output, buffer.String()); err != nil { + intlog.Errorf(ctx, `%+v`, err) + } + return buffer + } + return nil +} + +// printToFile outputs logging content to disk file. +func (l *Logger) printToFile(ctx context.Context, t time.Time, in *HandlerInput) *bytes.Buffer { + var ( + buffer = in.getRealBuffer(l.config.WriterColorEnable) + logFilePath = l.getFilePath(t) + memoryLockKey = memoryLockPrefixForPrintingToFile + logFilePath + ) + gmlock.Lock(memoryLockKey) + defer gmlock.Unlock(memoryLockKey) + + // Rotation file size checks. + if l.config.RotateSize > 0 && gfile.Size(logFilePath) > l.config.RotateSize { + if runtime.GOOS == "windows" { + file := l.getFilePointer(ctx, logFilePath) + if file == nil { + intlog.Errorf(ctx, `got nil file pointer for: %s`, logFilePath) + return buffer + } + + if _, err := file.Write(buffer.Bytes()); err != nil { + intlog.Errorf(ctx, `%+v`, err) + } + + if err := file.Close(true); err != nil { + intlog.Errorf(ctx, `%+v`, err) + } + l.rotateFileBySize(ctx, t) + + return buffer + } + + l.rotateFileBySize(ctx, t) + } + // Logging content outputting to disk file. + if file := l.getFilePointer(ctx, logFilePath); file == nil { + intlog.Errorf(ctx, `got nil file pointer for: %s`, logFilePath) + } else { + if _, err := file.Write(buffer.Bytes()); err != nil { + intlog.Errorf(ctx, `%+v`, err) + } + if err := file.Close(); err != nil { + intlog.Errorf(ctx, `%+v`, err) + } + } + return buffer +} + +// getFilePointer retrieves and returns a file pointer from file pool. +func (l *Logger) getFilePointer(ctx context.Context, path string) *gfpool.File { + file, err := gfpool.Open( + path, + defaultFileFlags, + defaultFilePerm, + defaultFileExpire, + ) + if err != nil { + // panic(err) + intlog.Errorf(ctx, `%+v`, err) + } + return file +} + +// getFilePointer retrieves and returns a file pointer from file pool. +func (l *Logger) getOpenedFilePointer(ctx context.Context, path string) *gfpool.File { + file := gfpool.Get( + path, + defaultFileFlags, + defaultFilePerm, + defaultFileExpire, + ) + if file == nil { + intlog.Errorf(ctx, `can not find the file, path:%s`, path) + } + return file +} + +// printStd prints content `s` without stack. +func (l *Logger) printStd(ctx context.Context, level int, value ...interface{}) { + l.print(ctx, level, "", value...) +} + +// printStd prints content `s` with stack check. +func (l *Logger) printErr(ctx context.Context, level int, value ...interface{}) { + var stack string + if l.config.StStatus == 1 { + stack = l.GetStack() + } + // In matter of sequence, do not use stderr here, but use the same stdout. + l.print(ctx, level, stack, value...) +} + +// format formats `values` using fmt.Sprintf. +func (l *Logger) format(format string, value ...interface{}) string { + return fmt.Sprintf(format, value...) +} + +// PrintStack prints the caller stack, +// the optional parameter `skip` specify the skipped stack offset from the end point. +func (l *Logger) PrintStack(ctx context.Context, skip ...int) { + if s := l.GetStack(skip...); s != "" { + l.Print(ctx, "Stack:\n"+s) + } else { + l.Print(ctx) + } +} + +// GetStack returns the caller stack content, +// the optional parameter `skip` specify the skipped stack offset from the end point. +func (l *Logger) GetStack(skip ...int) string { + stackSkip := l.config.StSkip + if len(skip) > 0 { + stackSkip += skip[0] + } + filters := []string{pathFilterKey} + if l.config.StFilter != "" { + filters = append(filters, l.config.StFilter) + } + return gdebug.StackWithFilters(filters, stackSkip) +} diff --git a/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_api.go b/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_api.go new file mode 100644 index 00000000..d485d6fc --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_api.go @@ -0,0 +1,146 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package glog + +import ( + "context" + "fmt" + "os" +) + +// Print prints `v` with newline using fmt.Sprintln. +// The parameter `v` can be multiple variables. +func (l *Logger) Print(ctx context.Context, v ...interface{}) { + l.printStd(ctx, LEVEL_NONE, v...) +} + +// Printf prints `v` with format `format` using fmt.Sprintf. +// The parameter `v` can be multiple variables. +func (l *Logger) Printf(ctx context.Context, format string, v ...interface{}) { + l.printStd(ctx, LEVEL_NONE, l.format(format, v...)) +} + +// Fatal prints the logging content with [FATA] header and newline, then exit the current process. +func (l *Logger) Fatal(ctx context.Context, v ...interface{}) { + l.printErr(ctx, LEVEL_FATA, v...) + os.Exit(1) +} + +// Fatalf prints the logging content with [FATA] header, custom format and newline, then exit the current process. +func (l *Logger) Fatalf(ctx context.Context, format string, v ...interface{}) { + l.printErr(ctx, LEVEL_FATA, l.format(format, v...)) + os.Exit(1) +} + +// Panic prints the logging content with [PANI] header and newline, then panics. +func (l *Logger) Panic(ctx context.Context, v ...interface{}) { + l.printErr(ctx, LEVEL_PANI, v...) + panic(fmt.Sprint(v...)) +} + +// Panicf prints the logging content with [PANI] header, custom format and newline, then panics. +func (l *Logger) Panicf(ctx context.Context, format string, v ...interface{}) { + l.printErr(ctx, LEVEL_PANI, l.format(format, v...)) + panic(l.format(format, v...)) +} + +// Info prints the logging content with [INFO] header and newline. +func (l *Logger) Info(ctx context.Context, v ...interface{}) { + if l.checkLevel(LEVEL_INFO) { + l.printStd(ctx, LEVEL_INFO, v...) + } +} + +// Infof prints the logging content with [INFO] header, custom format and newline. +func (l *Logger) Infof(ctx context.Context, format string, v ...interface{}) { + if l.checkLevel(LEVEL_INFO) { + l.printStd(ctx, LEVEL_INFO, l.format(format, v...)) + } +} + +// Debug prints the logging content with [DEBU] header and newline. +func (l *Logger) Debug(ctx context.Context, v ...interface{}) { + if l.checkLevel(LEVEL_DEBU) { + l.printStd(ctx, LEVEL_DEBU, v...) + } +} + +// Debugf prints the logging content with [DEBU] header, custom format and newline. +func (l *Logger) Debugf(ctx context.Context, format string, v ...interface{}) { + if l.checkLevel(LEVEL_DEBU) { + l.printStd(ctx, LEVEL_DEBU, l.format(format, v...)) + } +} + +// Notice prints the logging content with [NOTI] header and newline. +// It also prints caller stack info if stack feature is enabled. +func (l *Logger) Notice(ctx context.Context, v ...interface{}) { + if l.checkLevel(LEVEL_NOTI) { + l.printStd(ctx, LEVEL_NOTI, v...) + } +} + +// Noticef prints the logging content with [NOTI] header, custom format and newline. +// It also prints caller stack info if stack feature is enabled. +func (l *Logger) Noticef(ctx context.Context, format string, v ...interface{}) { + if l.checkLevel(LEVEL_NOTI) { + l.printStd(ctx, LEVEL_NOTI, l.format(format, v...)) + } +} + +// Warning prints the logging content with [WARN] header and newline. +// It also prints caller stack info if stack feature is enabled. +func (l *Logger) Warning(ctx context.Context, v ...interface{}) { + if l.checkLevel(LEVEL_WARN) { + l.printStd(ctx, LEVEL_WARN, v...) + } +} + +// Warningf prints the logging content with [WARN] header, custom format and newline. +// It also prints caller stack info if stack feature is enabled. +func (l *Logger) Warningf(ctx context.Context, format string, v ...interface{}) { + if l.checkLevel(LEVEL_WARN) { + l.printStd(ctx, LEVEL_WARN, l.format(format, v...)) + } +} + +// Error prints the logging content with [ERRO] header and newline. +// It also prints caller stack info if stack feature is enabled. +func (l *Logger) Error(ctx context.Context, v ...interface{}) { + if l.checkLevel(LEVEL_ERRO) { + l.printErr(ctx, LEVEL_ERRO, v...) + } +} + +// Errorf prints the logging content with [ERRO] header, custom format and newline. +// It also prints caller stack info if stack feature is enabled. +func (l *Logger) Errorf(ctx context.Context, format string, v ...interface{}) { + if l.checkLevel(LEVEL_ERRO) { + l.printErr(ctx, LEVEL_ERRO, l.format(format, v...)) + } +} + +// Critical prints the logging content with [CRIT] header and newline. +// It also prints caller stack info if stack feature is enabled. +func (l *Logger) Critical(ctx context.Context, v ...interface{}) { + if l.checkLevel(LEVEL_CRIT) { + l.printErr(ctx, LEVEL_CRIT, v...) + } +} + +// Criticalf prints the logging content with [CRIT] header, custom format and newline. +// It also prints caller stack info if stack feature is enabled. +func (l *Logger) Criticalf(ctx context.Context, format string, v ...interface{}) { + if l.checkLevel(LEVEL_CRIT) { + l.printErr(ctx, LEVEL_CRIT, l.format(format, v...)) + } +} + +// checkLevel checks whether the given `level` could be output. +func (l *Logger) checkLevel(level int) bool { + return l.config.Level&level > 0 +} diff --git a/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_chaining.go b/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_chaining.go new file mode 100644 index 00000000..29ebade2 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_chaining.go @@ -0,0 +1,223 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package glog + +import ( + "io" + + "github.com/gogf/gf/v2/os/gfile" +) + +// To is a chaining function, +// which redirects current logging content output to the specified `writer`. +func (l *Logger) To(writer io.Writer) *Logger { + logger := (*Logger)(nil) + if l.parent == nil { + logger = l.Clone() + } else { + logger = l + } + logger.SetWriter(writer) + return logger +} + +// Path is a chaining function, +// which sets the directory path to `path` for current logging content output. +// +// Note that the parameter `path` is a directory path, not a file path. +func (l *Logger) Path(path string) *Logger { + logger := (*Logger)(nil) + if l.parent == nil { + logger = l.Clone() + } else { + logger = l + } + if path != "" { + if err := logger.SetPath(path); err != nil { + panic(err) + } + } + return logger +} + +// Cat is a chaining function, +// which sets the category to `category` for current logging content output. +// Param `category` can be hierarchical, eg: module/user. +func (l *Logger) Cat(category string) *Logger { + logger := (*Logger)(nil) + if l.parent == nil { + logger = l.Clone() + } else { + logger = l + } + if logger.config.Path != "" { + if err := logger.SetPath(gfile.Join(logger.config.Path, category)); err != nil { + panic(err) + } + } + return logger +} + +// File is a chaining function, +// which sets file name `pattern` for the current logging content output. +func (l *Logger) File(file string) *Logger { + logger := (*Logger)(nil) + if l.parent == nil { + logger = l.Clone() + } else { + logger = l + } + logger.SetFile(file) + return logger +} + +// Level is a chaining function, +// which sets logging level for the current logging content output. +func (l *Logger) Level(level int) *Logger { + logger := (*Logger)(nil) + if l.parent == nil { + logger = l.Clone() + } else { + logger = l + } + logger.SetLevel(level) + return logger +} + +// LevelStr is a chaining function, +// which sets logging level for the current logging content output using level string. +func (l *Logger) LevelStr(levelStr string) *Logger { + logger := (*Logger)(nil) + if l.parent == nil { + logger = l.Clone() + } else { + logger = l + } + if err := logger.SetLevelStr(levelStr); err != nil { + panic(err) + } + return logger +} + +// Skip is a chaining function, +// which sets stack skip for the current logging content output. +// It also affects the caller file path checks when line number printing enabled. +func (l *Logger) Skip(skip int) *Logger { + logger := (*Logger)(nil) + if l.parent == nil { + logger = l.Clone() + } else { + logger = l + } + logger.SetStackSkip(skip) + return logger +} + +// Stack is a chaining function, +// which sets stack options for the current logging content output . +func (l *Logger) Stack(enabled bool, skip ...int) *Logger { + logger := (*Logger)(nil) + if l.parent == nil { + logger = l.Clone() + } else { + logger = l + } + logger.SetStack(enabled) + if len(skip) > 0 { + logger.SetStackSkip(skip[0]) + } + return logger +} + +// StackWithFilter is a chaining function, +// which sets stack filter for the current logging content output . +func (l *Logger) StackWithFilter(filter string) *Logger { + logger := (*Logger)(nil) + if l.parent == nil { + logger = l.Clone() + } else { + logger = l + } + logger.SetStack(true) + logger.SetStackFilter(filter) + return logger +} + +// Stdout is a chaining function, +// which enables/disables stdout for the current logging content output. +// It's enabled in default. +func (l *Logger) Stdout(enabled ...bool) *Logger { + logger := (*Logger)(nil) + if l.parent == nil { + logger = l.Clone() + } else { + logger = l + } + // stdout printing is enabled if `enabled` is not passed. + if len(enabled) > 0 && !enabled[0] { + logger.config.StdoutPrint = false + } else { + logger.config.StdoutPrint = true + } + return logger +} + +// Header is a chaining function, +// which enables/disables log header for the current logging content output. +// It's enabled in default. +func (l *Logger) Header(enabled ...bool) *Logger { + logger := (*Logger)(nil) + if l.parent == nil { + logger = l.Clone() + } else { + logger = l + } + // header is enabled if `enabled` is not passed. + if len(enabled) > 0 && !enabled[0] { + logger.SetHeaderPrint(false) + } else { + logger.SetHeaderPrint(true) + } + return logger +} + +// Line is a chaining function, +// which enables/disables printing its caller file path along with its line number. +// The parameter `long` specified whether print the long absolute file path, eg: /a/b/c/d.go:23, +// or else short one: d.go:23. +func (l *Logger) Line(long ...bool) *Logger { + logger := (*Logger)(nil) + if l.parent == nil { + logger = l.Clone() + } else { + logger = l + } + if len(long) > 0 && long[0] { + logger.config.Flags |= F_FILE_LONG + } else { + logger.config.Flags |= F_FILE_SHORT + } + return logger +} + +// Async is a chaining function, +// which enables/disables async logging output feature. +func (l *Logger) Async(enabled ...bool) *Logger { + logger := (*Logger)(nil) + if l.parent == nil { + logger = l.Clone() + } else { + logger = l + } + // async feature is enabled if `enabled` is not passed. + if len(enabled) > 0 && !enabled[0] { + logger.SetAsync(false) + } else { + logger.SetAsync(true) + } + return logger +} diff --git a/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_color.go b/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_color.go new file mode 100644 index 00000000..98bf966c --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_color.go @@ -0,0 +1,53 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package glog + +import "github.com/fatih/color" + +const ( + COLOR_BLACK = 30 + iota + COLOR_RED + COLOR_GREEN + COLOR_YELLOW + COLOR_BLUE + COLOR_MAGENTA + COLOR_CYAN + COLOR_WHITE +) + +// Foreground Hi-Intensity text colors +const ( + COLOR_HI_BLACK = 90 + iota + COLOR_HI_RED + COLOR_HI_GREEN + COLOR_HI_YELLOW + COLOR_HI_BLUE + COLOR_HI_MAGENTA + COLOR_HI_CYAN + COLOR_HI_WHITE +) + +// defaultLevelColor defines the default level and its mapping prefix string. +var defaultLevelColor = map[int]int{ + LEVEL_DEBU: COLOR_YELLOW, + LEVEL_INFO: COLOR_GREEN, + LEVEL_NOTI: COLOR_CYAN, + LEVEL_WARN: COLOR_MAGENTA, + LEVEL_ERRO: COLOR_RED, + LEVEL_CRIT: COLOR_HI_RED, + LEVEL_PANI: COLOR_HI_RED, + LEVEL_FATA: COLOR_HI_RED, +} + +// getColoredStr returns a string that is colored by given color. +func (l *Logger) getColoredStr(c int, s string) string { + return color.New(color.Attribute(c)).Sprint(s) +} + +func (l *Logger) getColorByLevel(level int) int { + return defaultLevelColor[level] +} diff --git a/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_config.go b/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_config.go new file mode 100644 index 00000000..356945f1 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_config.go @@ -0,0 +1,286 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package glog + +import ( + "context" + "io" + "strings" + "time" + + "github.com/gogf/gf/v2/container/gtype" + "github.com/gogf/gf/v2/errors/gcode" + "github.com/gogf/gf/v2/errors/gerror" + "github.com/gogf/gf/v2/internal/intlog" + "github.com/gogf/gf/v2/os/gfile" + "github.com/gogf/gf/v2/util/gconv" + "github.com/gogf/gf/v2/util/gutil" +) + +// Config is the configuration object for logger. +type Config struct { + Handlers []Handler `json:"-"` // Logger handlers which implement feature similar as middleware. + Writer io.Writer `json:"-"` // Customized io.Writer. + Flags int `json:"flags"` // Extra flags for logging output features. + Path string `json:"path"` // Logging directory path. + File string `json:"file"` // Format pattern for logging file. + Level int `json:"level"` // Output level. + Prefix string `json:"prefix"` // Prefix string for every logging content. + StSkip int `json:"stSkip"` // Skipping count for stack. + StStatus int `json:"stStatus"` // Stack status(1: enabled - default; 0: disabled) + StFilter string `json:"stFilter"` // Stack string filter. + CtxKeys []interface{} `json:"ctxKeys"` // Context keys for logging, which is used for value retrieving from context. + HeaderPrint bool `json:"header"` // Print header or not(true in default). + StdoutPrint bool `json:"stdout"` // Output to stdout or not(true in default). + LevelPrint bool `json:"levelPrint"` // Print level format string or not(true in default). + LevelPrefixes map[int]string `json:"levelPrefixes"` // Logging level to its prefix string mapping. + RotateSize int64 `json:"rotateSize"` // Rotate the logging file if its size > 0 in bytes. + RotateExpire time.Duration `json:"rotateExpire"` // Rotate the logging file if its mtime exceeds this duration. + RotateBackupLimit int `json:"rotateBackupLimit"` // Max backup for rotated files, default is 0, means no backups. + RotateBackupExpire time.Duration `json:"rotateBackupExpire"` // Max expires for rotated files, which is 0 in default, means no expiration. + RotateBackupCompress int `json:"rotateBackupCompress"` // Compress level for rotated files using gzip algorithm. It's 0 in default, means no compression. + RotateCheckInterval time.Duration `json:"rotateCheckInterval"` // Asynchronously checks the backups and expiration at intervals. It's 1 hour in default. + StdoutColorDisabled bool `json:"stdoutColorDisabled"` // Logging level prefix with color to writer or not (false in default). + WriterColorEnable bool `json:"writerColorEnable"` // Logging level prefix with color to writer or not (false in default). + internalConfig +} + +type internalConfig struct { + rotatedHandlerInitialized *gtype.Bool // Whether the rotation feature initialized. +} + +// DefaultConfig returns the default configuration for logger. +func DefaultConfig() Config { + c := Config{ + File: defaultFileFormat, + Flags: F_TIME_STD, + Level: LEVEL_ALL, + CtxKeys: []interface{}{}, + StStatus: 1, + HeaderPrint: true, + StdoutPrint: true, + LevelPrint: true, + LevelPrefixes: make(map[int]string, len(defaultLevelPrefixes)), + RotateCheckInterval: time.Hour, + internalConfig: internalConfig{ + rotatedHandlerInitialized: gtype.NewBool(), + }, + } + for k, v := range defaultLevelPrefixes { + c.LevelPrefixes[k] = v + } + if !defaultDebug { + c.Level = c.Level & ^LEVEL_DEBU + } + return c +} + +// GetConfig returns the configuration of current Logger. +func (l *Logger) GetConfig() Config { + return l.config +} + +// SetConfig set configurations for the logger. +func (l *Logger) SetConfig(config Config) error { + l.config = config + // Necessary validation. + if config.Path != "" { + if err := l.SetPath(config.Path); err != nil { + intlog.Errorf(context.TODO(), `%+v`, err) + return err + } + } + intlog.Printf(context.TODO(), "SetConfig: %+v", l.config) + return nil +} + +// SetConfigWithMap set configurations with map for the logger. +func (l *Logger) SetConfigWithMap(m map[string]interface{}) error { + if m == nil || len(m) == 0 { + return gerror.NewCode(gcode.CodeInvalidParameter, "configuration cannot be empty") + } + // The m now is a shallow copy of m. + // A little tricky, isn't it? + m = gutil.MapCopy(m) + // Change string configuration to int value for level. + levelKey, levelValue := gutil.MapPossibleItemByKey(m, "Level") + if levelValue != nil { + if level, ok := levelStringMap[strings.ToUpper(gconv.String(levelValue))]; ok { + m[levelKey] = level + } else { + return gerror.NewCodef(gcode.CodeInvalidParameter, `invalid level string: %v`, levelValue) + } + } + // Change string configuration to int value for file rotation size. + rotateSizeKey, rotateSizeValue := gutil.MapPossibleItemByKey(m, "RotateSize") + if rotateSizeValue != nil { + m[rotateSizeKey] = gfile.StrToSize(gconv.String(rotateSizeValue)) + if m[rotateSizeKey] == -1 { + return gerror.NewCodef(gcode.CodeInvalidConfiguration, `invalid rotate size: %v`, rotateSizeValue) + } + } + if err := gconv.Struct(m, &l.config); err != nil { + return err + } + return l.SetConfig(l.config) +} + +// SetDebug enables/disables the debug level for logger. +// The debug level is enabled in default. +func (l *Logger) SetDebug(debug bool) { + if debug { + l.config.Level = l.config.Level | LEVEL_DEBU + } else { + l.config.Level = l.config.Level & ^LEVEL_DEBU + } +} + +// SetAsync enables/disables async logging output feature. +func (l *Logger) SetAsync(enabled bool) { + if enabled { + l.config.Flags = l.config.Flags | F_ASYNC + } else { + l.config.Flags = l.config.Flags & ^F_ASYNC + } +} + +// SetFlags sets extra flags for logging output features. +func (l *Logger) SetFlags(flags int) { + l.config.Flags = flags +} + +// GetFlags returns the flags of logger. +func (l *Logger) GetFlags() int { + return l.config.Flags +} + +// SetStack enables/disables the stack feature in failure logging outputs. +func (l *Logger) SetStack(enabled bool) { + if enabled { + l.config.StStatus = 1 + } else { + l.config.StStatus = 0 + } +} + +// SetStackSkip sets the stack offset from the end point. +func (l *Logger) SetStackSkip(skip int) { + l.config.StSkip = skip +} + +// SetStackFilter sets the stack filter from the end point. +func (l *Logger) SetStackFilter(filter string) { + l.config.StFilter = filter +} + +// SetCtxKeys sets the context keys for logger. The keys is used for retrieving values +// from context and printing them to logging content. +// +// Note that multiple calls of this function will overwrite the previous set context keys. +func (l *Logger) SetCtxKeys(keys ...interface{}) { + l.config.CtxKeys = keys +} + +// AppendCtxKeys appends extra keys to logger. +// It ignores the key if it is already appended to the logger previously. +func (l *Logger) AppendCtxKeys(keys ...interface{}) { + var isExist bool + for _, key := range keys { + isExist = false + for _, ctxKey := range l.config.CtxKeys { + if ctxKey == key { + isExist = true + break + } + } + if !isExist { + l.config.CtxKeys = append(l.config.CtxKeys, key) + } + } +} + +// GetCtxKeys retrieves and returns the context keys for logging. +func (l *Logger) GetCtxKeys() []interface{} { + return l.config.CtxKeys +} + +// SetWriter sets the customized logging `writer` for logging. +// The `writer` object should implement the io.Writer interface. +// Developer can use customized logging `writer` to redirect logging output to another service, +// eg: kafka, mysql, mongodb, etc. +func (l *Logger) SetWriter(writer io.Writer) { + l.config.Writer = writer +} + +// GetWriter returns the customized writer object, which implements the io.Writer interface. +// It returns nil if no writer previously set. +func (l *Logger) GetWriter() io.Writer { + return l.config.Writer +} + +// SetPath sets the directory path for file logging. +func (l *Logger) SetPath(path string) error { + if path == "" { + return gerror.NewCode(gcode.CodeInvalidParameter, "logging path is empty") + } + if !gfile.Exists(path) { + if err := gfile.Mkdir(path); err != nil { + return gerror.Wrapf(err, `Mkdir "%s" failed in PWD "%s"`, path, gfile.Pwd()) + } + } + l.config.Path = strings.TrimRight(path, gfile.Separator) + return nil +} + +// GetPath returns the logging directory path for file logging. +// It returns empty string if no directory path set. +func (l *Logger) GetPath() string { + return l.config.Path +} + +// SetFile sets the file name `pattern` for file logging. +// Datetime pattern can be used in `pattern`, eg: access-{Ymd}.log. +// The default file name pattern is: Y-m-d.log, eg: 2018-01-01.log +func (l *Logger) SetFile(pattern string) { + l.config.File = pattern +} + +// SetStdoutPrint sets whether output the logging contents to stdout, which is true in default. +func (l *Logger) SetStdoutPrint(enabled bool) { + l.config.StdoutPrint = enabled +} + +// SetHeaderPrint sets whether output header of the logging contents, which is true in default. +func (l *Logger) SetHeaderPrint(enabled bool) { + l.config.HeaderPrint = enabled +} + +// SetLevelPrint sets whether output level string of the logging contents, which is true in default. +func (l *Logger) SetLevelPrint(enabled bool) { + l.config.LevelPrint = enabled +} + +// SetPrefix sets prefix string for every logging content. +// Prefix is part of header, which means if header output is shut, no prefix will be output. +func (l *Logger) SetPrefix(prefix string) { + l.config.Prefix = prefix +} + +// SetHandlers sets the logging handlers for current logger. +func (l *Logger) SetHandlers(handlers ...Handler) { + l.config.Handlers = handlers +} + +// SetWriterColorEnable enables file/writer logging with color. +func (l *Logger) SetWriterColorEnable(enabled bool) { + l.config.WriterColorEnable = enabled +} + +// SetStdoutColorDisabled disables stdout logging with color. +func (l *Logger) SetStdoutColorDisabled(disabled bool) { + l.config.StdoutColorDisabled = disabled +} diff --git a/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_handler.go b/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_handler.go new file mode 100644 index 00000000..4929fb51 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_handler.go @@ -0,0 +1,142 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package glog + +import ( + "bytes" + "context" + "time" +) + +// Handler is function handler for custom logging content outputs. +type Handler func(ctx context.Context, in *HandlerInput) + +// HandlerInput is the input parameter struct for logging Handler. +type HandlerInput struct { + internalHandlerInfo + Logger *Logger // Current Logger object. + Buffer *bytes.Buffer // Buffer for logging content outputs. + Time time.Time // Logging time, which is the time that logging triggers. + TimeFormat string // Formatted time string, like "2016-01-09 12:00:00". + Color int // Using color, like COLOR_RED, COLOR_BLUE, etc. Eg: 34 + Level int // Using level, like LEVEL_INFO, LEVEL_ERRO, etc. Eg: 256 + LevelFormat string // Formatted level string, like "DEBU", "ERRO", etc. Eg: ERRO + CallerFunc string // The source function name that calls logging, only available if F_CALLER_FN set. + CallerPath string // The source file path and its line number that calls logging, only available if F_FILE_SHORT or F_FILE_LONG set. + CtxStr string // The retrieved context value string from context, only available if Config.CtxKeys configured. + TraceId string // Trace id, only available if OpenTelemetry is enabled. + Prefix string // Custom prefix string for logging content. + Content string // Content is the main logging content without error stack string produced by logger. + Stack string // Stack string produced by logger, only available if Config.StStatus configured. + IsAsync bool // IsAsync marks it is in asynchronous logging. +} + +type internalHandlerInfo struct { + index int // Middleware handling index for internal usage. + handlers []Handler // Handler array calling bu index. +} + +// defaultHandler is the default handler for package. +var defaultHandler Handler + +// defaultPrintHandler is a handler for logging content printing. +// This handler outputs logging content to file/stdout/write if any of them configured. +func defaultPrintHandler(ctx context.Context, in *HandlerInput) { + buffer := in.Logger.doDefaultPrint(ctx, in) + if in.Buffer.Len() == 0 { + in.Buffer = buffer + } +} + +// SetDefaultHandler sets default handler for package. +func SetDefaultHandler(handler Handler) { + defaultHandler = handler +} + +// GetDefaultHandler returns the default handler of package. +func GetDefaultHandler() Handler { + return defaultHandler +} + +// Next calls the next logging handler in middleware way. +func (in *HandlerInput) Next(ctx context.Context) { + in.index++ + if in.index < len(in.handlers) { + in.handlers[in.index](ctx, in) + } +} + +// String returns the logging content formatted by default logging handler. +func (in *HandlerInput) String(withColor ...bool) string { + formatWithColor := false + if len(withColor) > 0 { + formatWithColor = withColor[0] + } + return in.getDefaultBuffer(formatWithColor).String() +} + +func (in *HandlerInput) getDefaultBuffer(withColor bool) *bytes.Buffer { + buffer := bytes.NewBuffer(nil) + if in.Logger.config.HeaderPrint { + if in.TimeFormat != "" { + buffer.WriteString(in.TimeFormat) + } + if in.Logger.config.LevelPrint && in.LevelFormat != "" { + var levelStr = "[" + in.LevelFormat + "]" + if withColor { + in.addStringToBuffer(buffer, in.Logger.getColoredStr( + in.Logger.getColorByLevel(in.Level), levelStr, + )) + } else { + in.addStringToBuffer(buffer, levelStr) + } + } + } + if in.TraceId != "" { + in.addStringToBuffer(buffer, "{"+in.TraceId+"}") + } + if in.CtxStr != "" { + in.addStringToBuffer(buffer, "{"+in.CtxStr+"}") + } + if in.Logger.config.HeaderPrint { + if in.Prefix != "" { + in.addStringToBuffer(buffer, in.Prefix) + } + if in.CallerFunc != "" { + in.addStringToBuffer(buffer, in.CallerFunc) + } + if in.CallerPath != "" { + in.addStringToBuffer(buffer, in.CallerPath) + } + } + if in.Content != "" { + if in.Stack != "" { + in.addStringToBuffer(buffer, in.Content+"\nStack:\n"+in.Stack) + } else { + in.addStringToBuffer(buffer, in.Content) + } + } + // avoid a single space at the end of a line. + buffer.WriteString("\n") + return buffer +} + +func (in *HandlerInput) getRealBuffer(withColor bool) *bytes.Buffer { + if in.Buffer.Len() > 0 { + return in.Buffer + } + return in.getDefaultBuffer(withColor) +} + +func (in *HandlerInput) addStringToBuffer(buffer *bytes.Buffer, strings ...string) { + for _, s := range strings { + if buffer.Len() > 0 { + buffer.WriteByte(' ') + } + buffer.WriteString(s) + } +} diff --git a/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_handler_json.go b/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_handler_json.go new file mode 100644 index 00000000..20b82c74 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_handler_json.go @@ -0,0 +1,48 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package glog + +import ( + "context" + + "github.com/gogf/gf/v2/internal/json" +) + +// HandlerOutputJson is the structure outputting logging content as single json. +type HandlerOutputJson struct { + Time string `json:""` // Formatted time string, like "2016-01-09 12:00:00". + TraceId string `json:",omitempty"` // Trace id, only available if tracing is enabled. + CtxStr string `json:",omitempty"` // The retrieved context value string from context, only available if Config.CtxKeys configured. + Level string `json:""` // Formatted level string, like "DEBU", "ERRO", etc. Eg: ERRO + CallerFunc string `json:",omitempty"` // The source function name that calls logging, only available if F_CALLER_FN set. + CallerPath string `json:",omitempty"` // The source file path and its line number that calls logging, only available if F_FILE_SHORT or F_FILE_LONG set. + Prefix string `json:",omitempty"` // Custom prefix string for logging content. + Content string `json:""` // Content is the main logging content, containing error stack string produced by logger. + Stack string `json:",omitempty"` // Stack string produced by logger, only available if Config.StStatus configured. +} + +// HandlerJson is a handler for output logging content as a single json string. +func HandlerJson(ctx context.Context, in *HandlerInput) { + output := HandlerOutputJson{ + Time: in.TimeFormat, + TraceId: in.TraceId, + CtxStr: in.CtxStr, + Level: in.LevelFormat, + CallerFunc: in.CallerFunc, + CallerPath: in.CallerPath, + Prefix: in.Prefix, + Content: in.Content, + Stack: in.Stack, + } + jsonBytes, err := json.Marshal(output) + if err != nil { + panic(err) + } + in.Buffer.Write(jsonBytes) + in.Buffer.Write([]byte("\n")) + in.Next(ctx) +} diff --git a/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_level.go b/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_level.go new file mode 100644 index 00000000..e1fe6b6a --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_level.go @@ -0,0 +1,111 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package glog + +import ( + "strings" + + "github.com/gogf/gf/v2/errors/gcode" + "github.com/gogf/gf/v2/errors/gerror" +) + +// Note that the LEVEL_PANI and LEVEL_FATA levels are not used for logging output, +// but for prefix configurations. +const ( + LEVEL_ALL = LEVEL_DEBU | LEVEL_INFO | LEVEL_NOTI | LEVEL_WARN | LEVEL_ERRO | LEVEL_CRIT + LEVEL_DEV = LEVEL_ALL + LEVEL_PROD = LEVEL_WARN | LEVEL_ERRO | LEVEL_CRIT + LEVEL_NONE = 0 + LEVEL_DEBU = 1 << iota // 8 + LEVEL_INFO // 16 + LEVEL_NOTI // 32 + LEVEL_WARN // 64 + LEVEL_ERRO // 128 + LEVEL_CRIT // 256 + LEVEL_PANI // 512 + LEVEL_FATA // 1024 +) + +// defaultLevelPrefixes defines the default level and its mapping prefix string. +var defaultLevelPrefixes = map[int]string{ + LEVEL_DEBU: "DEBU", + LEVEL_INFO: "INFO", + LEVEL_NOTI: "NOTI", + LEVEL_WARN: "WARN", + LEVEL_ERRO: "ERRO", + LEVEL_CRIT: "CRIT", + LEVEL_PANI: "PANI", + LEVEL_FATA: "FATA", +} + +// levelStringMap defines level string name to its level mapping. +var levelStringMap = map[string]int{ + "ALL": LEVEL_DEBU | LEVEL_INFO | LEVEL_NOTI | LEVEL_WARN | LEVEL_ERRO | LEVEL_CRIT, + "DEV": LEVEL_DEBU | LEVEL_INFO | LEVEL_NOTI | LEVEL_WARN | LEVEL_ERRO | LEVEL_CRIT, + "DEVELOP": LEVEL_DEBU | LEVEL_INFO | LEVEL_NOTI | LEVEL_WARN | LEVEL_ERRO | LEVEL_CRIT, + "PROD": LEVEL_WARN | LEVEL_ERRO | LEVEL_CRIT, + "PRODUCT": LEVEL_WARN | LEVEL_ERRO | LEVEL_CRIT, + "DEBU": LEVEL_DEBU | LEVEL_INFO | LEVEL_NOTI | LEVEL_WARN | LEVEL_ERRO | LEVEL_CRIT, + "DEBUG": LEVEL_DEBU | LEVEL_INFO | LEVEL_NOTI | LEVEL_WARN | LEVEL_ERRO | LEVEL_CRIT, + "INFO": LEVEL_INFO | LEVEL_NOTI | LEVEL_WARN | LEVEL_ERRO | LEVEL_CRIT, + "NOTI": LEVEL_NOTI | LEVEL_WARN | LEVEL_ERRO | LEVEL_CRIT, + "NOTICE": LEVEL_NOTI | LEVEL_WARN | LEVEL_ERRO | LEVEL_CRIT, + "WARN": LEVEL_WARN | LEVEL_ERRO | LEVEL_CRIT, + "WARNING": LEVEL_WARN | LEVEL_ERRO | LEVEL_CRIT, + "ERRO": LEVEL_ERRO | LEVEL_CRIT, + "ERROR": LEVEL_ERRO | LEVEL_CRIT, + "CRIT": LEVEL_CRIT, + "CRITICAL": LEVEL_CRIT, +} + +// SetLevel sets the logging level. +// Note that levels ` LEVEL_CRIT | LEVEL_PANI | LEVEL_FATA ` cannot be removed for logging content, +// which are automatically added to levels. +func (l *Logger) SetLevel(level int) { + l.config.Level = level | LEVEL_CRIT | LEVEL_PANI | LEVEL_FATA +} + +// GetLevel returns the logging level value. +func (l *Logger) GetLevel() int { + return l.config.Level +} + +// SetLevelStr sets the logging level by level string. +func (l *Logger) SetLevelStr(levelStr string) error { + if level, ok := levelStringMap[strings.ToUpper(levelStr)]; ok { + l.config.Level = level + } else { + return gerror.NewCodef(gcode.CodeInvalidParameter, `invalid level string: %s`, levelStr) + } + return nil +} + +// SetLevelPrefix sets the prefix string for specified level. +func (l *Logger) SetLevelPrefix(level int, prefix string) { + l.config.LevelPrefixes[level] = prefix +} + +// SetLevelPrefixes sets the level to prefix string mapping for the logger. +func (l *Logger) SetLevelPrefixes(prefixes map[int]string) { + for k, v := range prefixes { + l.config.LevelPrefixes[k] = v + } +} + +// GetLevelPrefix returns the prefix string for specified level. +func (l *Logger) GetLevelPrefix(level int) string { + return l.config.LevelPrefixes[level] +} + +// getLevelPrefixWithBrackets returns the prefix string with brackets for specified level. +func (l *Logger) getLevelPrefixWithBrackets(level int) string { + levelStr := "" + if s, ok := l.config.LevelPrefixes[level]; ok { + levelStr = "[" + s + "]" + } + return levelStr +} diff --git a/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_rotate.go b/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_rotate.go new file mode 100644 index 00000000..b467fb91 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_rotate.go @@ -0,0 +1,302 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package glog + +import ( + "context" + "fmt" + "runtime" + "time" + + "github.com/gogf/gf/v2/container/garray" + "github.com/gogf/gf/v2/encoding/gcompress" + "github.com/gogf/gf/v2/internal/intlog" + "github.com/gogf/gf/v2/os/gfile" + "github.com/gogf/gf/v2/os/gmlock" + "github.com/gogf/gf/v2/os/gtime" + "github.com/gogf/gf/v2/os/gtimer" + "github.com/gogf/gf/v2/text/gregex" +) + +const ( + memoryLockPrefixForRotating = "glog.rotateChecksTimely:" +) + +// rotateFileBySize rotates the current logging file according to the +// configured rotation size. +func (l *Logger) rotateFileBySize(ctx context.Context, now time.Time) { + if l.config.RotateSize <= 0 { + return + } + if err := l.doRotateFile(ctx, l.getFilePath(now)); err != nil { + // panic(err) + intlog.Errorf(ctx, `%+v`, err) + } +} + +// doRotateFile rotates the given logging file. +func (l *Logger) doRotateFile(ctx context.Context, filePath string) error { + memoryLockKey := "glog.doRotateFile:" + filePath + if !gmlock.TryLock(memoryLockKey) { + return nil + } + defer gmlock.Unlock(memoryLockKey) + + intlog.PrintFunc(ctx, func() string { + return fmt.Sprintf(`start rotating file by size: %s, file: %s`, gfile.SizeFormat(filePath), filePath) + }) + defer intlog.PrintFunc(ctx, func() string { + return fmt.Sprintf(`done rotating file by size: %s, size: %s`, gfile.SizeFormat(filePath), filePath) + }) + + // No backups, it then just removes the current logging file. + if l.config.RotateBackupLimit == 0 { + if err := gfile.Remove(filePath); err != nil { + return err + } + intlog.Printf( + ctx, + `%d size exceeds, no backups set, remove original logging file: %s`, + l.config.RotateSize, filePath, + ) + return nil + } + // Else it creates new backup files. + var ( + dirPath = gfile.Dir(filePath) + fileName = gfile.Name(filePath) + fileExtName = gfile.ExtName(filePath) + newFilePath = "" + ) + // Rename the logging file by adding extra datetime information to microseconds, like: + // access.log -> access.20200326101301899002.log + // access.20200326.log -> access.20200326.20200326101301899002.log + for { + var ( + now = gtime.Now() + micro = now.Microsecond() % 1000 + ) + if micro == 0 { + micro = 101 + } else { + for micro < 100 { + micro *= 10 + } + } + newFilePath = gfile.Join( + dirPath, + fmt.Sprintf( + `%s.%s%d.%s`, + fileName, now.Format("YmdHisu"), micro, fileExtName, + ), + ) + if !gfile.Exists(newFilePath) { + break + } else { + intlog.Printf(ctx, `rotation file exists, continue: %s`, newFilePath) + } + } + intlog.Printf(ctx, "rotating file by size from %s to %s", filePath, newFilePath) + if err := gfile.Rename(filePath, newFilePath); err != nil { + return err + } + return nil +} + +// rotateChecksTimely timely checks the backups expiration and the compression. +func (l *Logger) rotateChecksTimely(ctx context.Context) { + defer gtimer.AddOnce(ctx, l.config.RotateCheckInterval, l.rotateChecksTimely) + + // Checks whether file rotation not enabled. + if l.config.RotateSize <= 0 && l.config.RotateExpire == 0 { + intlog.Printf( + ctx, + "logging rotation ignore checks: RotateSize: %d, RotateExpire: %s", + l.config.RotateSize, l.config.RotateExpire.String(), + ) + return + } + + // It here uses memory lock to guarantee the concurrent safety. + memoryLockKey := memoryLockPrefixForRotating + l.config.Path + if !gmlock.TryLock(memoryLockKey) { + return + } + defer gmlock.Unlock(memoryLockKey) + + var ( + now = time.Now() + pattern = "*.log, *.gz" + files, err = gfile.ScanDirFile(l.config.Path, pattern, true) + ) + if err != nil { + intlog.Errorf(ctx, `%+v`, err) + } + intlog.Printf(ctx, "logging rotation start checks: %+v", files) + // ============================================================= + // Rotation of expired file checks. + // ============================================================= + if l.config.RotateExpire > 0 { + var ( + mtime time.Time + subDuration time.Duration + expireRotated bool + ) + for _, file := range files { + if gfile.ExtName(file) == "gz" { + continue + } + mtime = gfile.MTime(file) + subDuration = now.Sub(mtime) + if subDuration > l.config.RotateExpire { + func() { + memoryLockFileKey := memoryLockPrefixForPrintingToFile + file + if !gmlock.TryLock(memoryLockFileKey) { + return + } + defer gmlock.Unlock(memoryLockFileKey) + + fp := l.getOpenedFilePointer(ctx, file) + if fp == nil { + intlog.Errorf(ctx, `got nil file pointer for: %s`, file) + return + } + + if runtime.GOOS == "windows" { + if err := fp.Close(true); err != nil { + intlog.Errorf(ctx, `%+v`, err) + } + } + + expireRotated = true + intlog.Printf( + ctx, + `%v - %v = %v > %v, rotation expire logging file: %s`, + now, mtime, subDuration, l.config.RotateExpire, file, + ) + if err := l.doRotateFile(ctx, file); err != nil { + intlog.Errorf(ctx, `%+v`, err) + } + }() + } + } + if expireRotated { + // Update the files array. + files, err = gfile.ScanDirFile(l.config.Path, pattern, true) + if err != nil { + intlog.Errorf(ctx, `%+v`, err) + } + } + } + + // ============================================================= + // Rotated file compression. + // ============================================================= + needCompressFileArray := garray.NewStrArray() + if l.config.RotateBackupCompress > 0 { + for _, file := range files { + // Eg: access.20200326101301899002.log.gz + if gfile.ExtName(file) == "gz" { + continue + } + // Eg: + // access.20200326101301899002.log + if gregex.IsMatchString(`.+\.\d{20}\.log`, gfile.Basename(file)) { + needCompressFileArray.Append(file) + } + } + if needCompressFileArray.Len() > 0 { + needCompressFileArray.Iterator(func(_ int, path string) bool { + err := gcompress.GzipFile(path, path+".gz") + if err == nil { + intlog.Printf(ctx, `compressed done, remove original logging file: %s`, path) + if err = gfile.Remove(path); err != nil { + intlog.Print(ctx, err) + } + } else { + intlog.Print(ctx, err) + } + return true + }) + // Update the files array. + files, err = gfile.ScanDirFile(l.config.Path, pattern, true) + if err != nil { + intlog.Errorf(ctx, `%+v`, err) + } + } + } + + // ============================================================= + // Backups count limitation and expiration checks. + // ============================================================= + var ( + backupFilesMap = make(map[string]*garray.SortedArray) + originalLoggingFilePath string + ) + if l.config.RotateBackupLimit > 0 || l.config.RotateBackupExpire > 0 { + for _, file := range files { + originalLoggingFilePath, _ = gregex.ReplaceString(`\.\d{20}`, "", file) + if backupFilesMap[originalLoggingFilePath] == nil { + backupFilesMap[originalLoggingFilePath] = garray.NewSortedArray(func(a, b interface{}) int { + // Sorted by rotated/backup file mtime. + // The older rotated/backup file is put in the head of array. + var ( + file1 = a.(string) + file2 = b.(string) + result = gfile.MTimestampMilli(file1) - gfile.MTimestampMilli(file2) + ) + if result <= 0 { + return -1 + } + return 1 + }) + } + // Check if this file a rotated/backup file. + if gregex.IsMatchString(`.+\.\d{20}\.log`, gfile.Basename(file)) { + backupFilesMap[originalLoggingFilePath].Add(file) + } + } + intlog.Printf(ctx, `calculated backup files map: %+v`, backupFilesMap) + for _, array := range backupFilesMap { + diff := array.Len() - l.config.RotateBackupLimit + for i := 0; i < diff; i++ { + path, _ := array.PopLeft() + intlog.Printf(ctx, `remove exceeded backup limit file: %s`, path) + if err := gfile.Remove(path.(string)); err != nil { + intlog.Errorf(ctx, `%+v`, err) + } + } + } + // Backups expiration checking. + if l.config.RotateBackupExpire > 0 { + var ( + mtime time.Time + subDuration time.Duration + ) + for _, array := range backupFilesMap { + array.Iterator(func(_ int, v interface{}) bool { + path := v.(string) + mtime = gfile.MTime(path) + subDuration = now.Sub(mtime) + if subDuration > l.config.RotateBackupExpire { + intlog.Printf( + ctx, + `%v - %v = %v > %v, remove expired backup file: %s`, + now, mtime, subDuration, l.config.RotateBackupExpire, path, + ) + if err := gfile.Remove(path); err != nil { + intlog.Errorf(ctx, `%+v`, err) + } + return true + } else { + return false + } + }) + } + } + } +} diff --git a/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_writer.go b/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_writer.go new file mode 100644 index 00000000..610e5e53 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/glog/glog_logger_writer.go @@ -0,0 +1,19 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package glog + +import ( + "bytes" + "context" +) + +// Write implements the io.Writer interface. +// It just prints the content using Print. +func (l *Logger) Write(p []byte) (n int, err error) { + l.Header(false).Print(context.TODO(), string(bytes.TrimRight(p, "\r\n"))) + return len(p), nil +} diff --git a/vendor/github.com/gogf/gf/v2/os/gmlock/gmlock.go b/vendor/github.com/gogf/gf/v2/os/gmlock/gmlock.go new file mode 100644 index 00000000..92be26f4 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gmlock/gmlock.go @@ -0,0 +1,89 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package gmlock implements a concurrent-safe memory-based locker. +package gmlock + +var ( + // Default locker. + locker = New() +) + +// Lock locks the `key` with writing lock. +// If there's a write/reading lock the `key`, +// it will blocks until the lock is released. +func Lock(key string) { + locker.Lock(key) +} + +// TryLock tries locking the `key` with writing lock, +// it returns true if success, or if there's a write/reading lock the `key`, +// it returns false. +func TryLock(key string) bool { + return locker.TryLock(key) +} + +// Unlock unlocks the writing lock of the `key`. +func Unlock(key string) { + locker.Unlock(key) +} + +// RLock locks the `key` with reading lock. +// If there's a writing lock on `key`, +// it will blocks until the writing lock is released. +func RLock(key string) { + locker.RLock(key) +} + +// TryRLock tries locking the `key` with reading lock. +// It returns true if success, or if there's a writing lock on `key`, it returns false. +func TryRLock(key string) bool { + return locker.TryRLock(key) +} + +// RUnlock unlocks the reading lock of the `key`. +func RUnlock(key string) { + locker.RUnlock(key) +} + +// LockFunc locks the `key` with writing lock and callback function `f`. +// If there's a write/reading lock the `key`, +// it will blocks until the lock is released. +// +// It releases the lock after `f` is executed. +func LockFunc(key string, f func()) { + locker.LockFunc(key, f) +} + +// RLockFunc locks the `key` with reading lock and callback function `f`. +// If there's a writing lock the `key`, +// it will blocks until the lock is released. +// +// It releases the lock after `f` is executed. +func RLockFunc(key string, f func()) { + locker.RLockFunc(key, f) +} + +// TryLockFunc locks the `key` with writing lock and callback function `f`. +// It returns true if success, or else if there's a write/reading lock the `key`, it return false. +// +// It releases the lock after `f` is executed. +func TryLockFunc(key string, f func()) bool { + return locker.TryLockFunc(key, f) +} + +// TryRLockFunc locks the `key` with reading lock and callback function `f`. +// It returns true if success, or else if there's a writing lock the `key`, it returns false. +// +// It releases the lock after `f` is executed. +func TryRLockFunc(key string, f func()) bool { + return locker.TryRLockFunc(key, f) +} + +// Remove removes mutex with given `key`. +func Remove(key string) { + locker.Remove(key) +} diff --git a/vendor/github.com/gogf/gf/v2/os/gmlock/gmlock_locker.go b/vendor/github.com/gogf/gf/v2/os/gmlock/gmlock_locker.go new file mode 100644 index 00000000..5336f92d --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gmlock/gmlock_locker.go @@ -0,0 +1,133 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gmlock + +import ( + "github.com/gogf/gf/v2/container/gmap" + "github.com/gogf/gf/v2/os/gmutex" +) + +// Locker is a memory based locker. +// Note that there's no cache expire mechanism for mutex in locker. +// You need remove certain mutex manually when you do not want use it anymore. +type Locker struct { + m *gmap.StrAnyMap +} + +// New creates and returns a new memory locker. +// A memory locker can lock/unlock with dynamic string key. +func New() *Locker { + return &Locker{ + m: gmap.NewStrAnyMap(true), + } +} + +// Lock locks the `key` with writing lock. +// If there's a write/reading lock the `key`, +// it will block until the lock is released. +func (l *Locker) Lock(key string) { + l.getOrNewMutex(key).Lock() +} + +// TryLock tries locking the `key` with writing lock, +// it returns true if success, or it returns false if there's a writing/reading lock the `key`. +func (l *Locker) TryLock(key string) bool { + return l.getOrNewMutex(key).TryLock() +} + +// Unlock unlocks the writing lock of the `key`. +func (l *Locker) Unlock(key string) { + if v := l.m.Get(key); v != nil { + v.(*gmutex.Mutex).Unlock() + } +} + +// RLock locks the `key` with reading lock. +// If there's a writing lock on `key`, +// it will blocks until the writing lock is released. +func (l *Locker) RLock(key string) { + l.getOrNewMutex(key).RLock() +} + +// TryRLock tries locking the `key` with reading lock. +// It returns true if success, or if there's a writing lock on `key`, it returns false. +func (l *Locker) TryRLock(key string) bool { + return l.getOrNewMutex(key).TryRLock() +} + +// RUnlock unlocks the reading lock of the `key`. +func (l *Locker) RUnlock(key string) { + if v := l.m.Get(key); v != nil { + v.(*gmutex.Mutex).RUnlock() + } +} + +// LockFunc locks the `key` with writing lock and callback function `f`. +// If there's a write/reading lock the `key`, +// it will block until the lock is released. +// +// It releases the lock after `f` is executed. +func (l *Locker) LockFunc(key string, f func()) { + l.Lock(key) + defer l.Unlock(key) + f() +} + +// RLockFunc locks the `key` with reading lock and callback function `f`. +// If there's a writing lock the `key`, +// it will block until the lock is released. +// +// It releases the lock after `f` is executed. +func (l *Locker) RLockFunc(key string, f func()) { + l.RLock(key) + defer l.RUnlock(key) + f() +} + +// TryLockFunc locks the `key` with writing lock and callback function `f`. +// It returns true if success, or else if there's a write/reading lock the `key`, it return false. +// +// It releases the lock after `f` is executed. +func (l *Locker) TryLockFunc(key string, f func()) bool { + if l.TryLock(key) { + defer l.Unlock(key) + f() + return true + } + return false +} + +// TryRLockFunc locks the `key` with reading lock and callback function `f`. +// It returns true if success, or else if there's a writing lock the `key`, it returns false. +// +// It releases the lock after `f` is executed. +func (l *Locker) TryRLockFunc(key string, f func()) bool { + if l.TryRLock(key) { + defer l.RUnlock(key) + f() + return true + } + return false +} + +// Remove removes mutex with given `key` from locker. +func (l *Locker) Remove(key string) { + l.m.Remove(key) +} + +// Clear removes all mutexes from locker. +func (l *Locker) Clear() { + l.m.Clear() +} + +// getOrNewMutex returns the mutex of given `key` if it exists, +// or else creates and returns a new one. +func (l *Locker) getOrNewMutex(key string) *gmutex.Mutex { + return l.m.GetOrSetFuncLock(key, func() interface{} { + return gmutex.New() + }).(*gmutex.Mutex) +} diff --git a/vendor/github.com/gogf/gf/v2/os/gmutex/gmutex.go b/vendor/github.com/gogf/gf/v2/os/gmutex/gmutex.go new file mode 100644 index 00000000..df5e40c4 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gmutex/gmutex.go @@ -0,0 +1,224 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package gmutex implements graceful concurrent-safe mutex with more rich features. +package gmutex + +import ( + "math" + "runtime" + + "github.com/gogf/gf/v2/container/gtype" +) + +// Mutex is a high level Mutex, which implements more rich features for mutex. +type Mutex struct { + state *gtype.Int32 // Indicates the state of mutex. -1: writing locked; > 1 reading locked. + writer *gtype.Int32 // Pending writer count. + reader *gtype.Int32 // Pending reader count. + writing chan struct{} // Channel for writer blocking. + reading chan struct{} // Channel for reader blocking. +} + +// New creates and returns a new mutex. +func New() *Mutex { + return &Mutex{ + state: gtype.NewInt32(), + writer: gtype.NewInt32(), + reader: gtype.NewInt32(), + writing: make(chan struct{}, 1), + reading: make(chan struct{}, math.MaxInt32), + } +} + +// Lock locks the mutex for writing purpose. +// If the mutex is already locked by another goroutine for reading or writing, +// it blocks until the lock is available. +func (m *Mutex) Lock() { + for { + // Using CAS operation to get the writing lock atomically. + if m.state.Cas(0, -1) { + return + } + // It or else blocks to wait for the next chance. + m.writer.Add(1) + <-m.writing + } +} + +// Unlock unlocks writing lock on the mutex. +// It is safe to be called multiple times even there's no locks. +func (m *Mutex) Unlock() { + if m.state.Cas(-1, 0) { + // Note that there might be more than one goroutines can enter this block. + var n int32 + // Writing lock unlocks, then first check the blocked readers. + // If there are readers blocked, it unlocks them with preemption. + for { + if n = m.reader.Val(); n > 0 { + if m.reader.Cas(n, 0) { + for ; n > 0; n-- { + m.reading <- struct{}{} + } + break + } else { + runtime.Gosched() + } + } else { + break + } + } + + // It then also kindly feeds the pending writers with one chance. + if n = m.writer.Val(); n > 0 { + if m.writer.Cas(n, n-1) { + m.writing <- struct{}{} + } + } + } +} + +// TryLock tries locking the mutex for writing purpose. +// It returns true immediately if success, or if there's a write/reading lock on the mutex, +// it returns false immediately. +func (m *Mutex) TryLock() bool { + return m.state.Cas(0, -1) +} + +// RLock locks mutex for reading purpose. +// If the mutex is already locked for writing, +// it blocks until the lock is available. +func (m *Mutex) RLock() { + var n int32 + for { + if n = m.state.Val(); n >= 0 { + // If there's no writing lock currently, then do the reading lock checks. + if m.state.Cas(n, n+1) { + return + } else { + runtime.Gosched() + } + } else { + // It or else pends the reader. + m.reader.Add(1) + <-m.reading + } + } +} + +// RUnlock unlocks the reading lock on the mutex. +// It is safe to be called multiple times even there's no locks. +func (m *Mutex) RUnlock() { + var n int32 + for { + if n = m.state.Val(); n >= 1 { + if m.state.Cas(n, n-1) { + break + } else { + runtime.Gosched() + } + } else { + break + } + } + // Reading lock unlocks, it then only check the blocked writers. + // Note that it is not necessary to check the pending readers here. + // `n == 1` means the state of mutex comes down to zero. + if n == 1 { + if n = m.writer.Val(); n > 0 { + if m.writer.Cas(n, n-1) { + m.writing <- struct{}{} + } + } + } +} + +// TryRLock tries locking the mutex for reading purpose. +// It returns true immediately if success, or if there's a writing lock on the mutex, +// it returns false immediately. +func (m *Mutex) TryRLock() bool { + var n int32 + for { + if n = m.state.Val(); n >= 0 { + if m.state.Cas(n, n+1) { + return true + } else { + runtime.Gosched() + } + } else { + return false + } + } +} + +// IsLocked checks whether the mutex is locked with writing or reading lock. +// Note that the result might be changed after it's called, +// so it cannot be the criterion for atomic operations. +func (m *Mutex) IsLocked() bool { + return m.state.Val() != 0 +} + +// IsWLocked checks whether the mutex is locked by writing lock. +// Note that the result might be changed after it's called, +// so it cannot be the criterion for atomic operations. +func (m *Mutex) IsWLocked() bool { + return m.state.Val() < 0 +} + +// IsRLocked checks whether the mutex is locked by reading lock. +// Note that the result might be changed after it's called, +// so it cannot be the criterion for atomic operations. +func (m *Mutex) IsRLocked() bool { + return m.state.Val() > 0 +} + +// LockFunc locks the mutex for writing with given callback function `f`. +// If there's a write/reading lock the mutex, it will blocks until the lock is released. +// +// It releases the lock after `f` is executed. +func (m *Mutex) LockFunc(f func()) { + m.Lock() + defer m.Unlock() + f() +} + +// RLockFunc locks the mutex for reading with given callback function `f`. +// If there's a writing lock the mutex, it will blocks until the lock is released. +// +// It releases the lock after `f` is executed. +func (m *Mutex) RLockFunc(f func()) { + m.RLock() + defer m.RUnlock() + f() +} + +// TryLockFunc tries locking the mutex for writing with given callback function `f`. +// it returns true immediately if success, or if there's a write/reading lock on the mutex, +// it returns false immediately. +// +// It releases the lock after `f` is executed. +func (m *Mutex) TryLockFunc(f func()) (result bool) { + if m.TryLock() { + result = true + defer m.Unlock() + f() + } + return +} + +// TryRLockFunc tries locking the mutex for reading with given callback function `f`. +// It returns true immediately if success, or if there's a writing lock on the mutex, +// it returns false immediately. +// +// It releases the lock after `f` is executed. +func (m *Mutex) TryRLockFunc(f func()) (result bool) { + if m.TryRLock() { + result = true + defer m.RUnlock() + f() + } + return +} diff --git a/vendor/github.com/gogf/gf/v2/os/grpool/grpool.go b/vendor/github.com/gogf/gf/v2/os/grpool/grpool.go new file mode 100644 index 00000000..dd9dcf7e --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/grpool/grpool.go @@ -0,0 +1,193 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package grpool implements a goroutine reusable pool. +package grpool + +import ( + "context" + "time" + + "github.com/gogf/gf/v2/container/glist" + "github.com/gogf/gf/v2/container/gtype" + "github.com/gogf/gf/v2/errors/gcode" + "github.com/gogf/gf/v2/errors/gerror" + "github.com/gogf/gf/v2/os/gtimer" + "github.com/gogf/gf/v2/util/grand" +) + +// Func is the pool function which contains context parameter. +type Func func(ctx context.Context) + +// RecoverFunc is the pool runtime panic recover function which contains context parameter. +type RecoverFunc func(ctx context.Context, err error) + +// Pool manages the goroutines using pool. +type Pool struct { + limit int // Max goroutine count limit. + count *gtype.Int // Current running goroutine count. + list *glist.List // List for asynchronous job adding purpose. + closed *gtype.Bool // Is pool closed or not. +} + +type localPoolItem struct { + Ctx context.Context + Func Func +} + +const ( + minTimerDuration = 500 * time.Millisecond + maxTimerDuration = 1500 * time.Millisecond +) + +// Default goroutine pool. +var ( + pool = New() +) + +// New creates and returns a new goroutine pool object. +// The parameter `limit` is used to limit the max goroutine count, +// which is not limited in default. +func New(limit ...int) *Pool { + p := &Pool{ + limit: -1, + count: gtype.NewInt(), + list: glist.New(true), + closed: gtype.NewBool(), + } + if len(limit) > 0 && limit[0] > 0 { + p.limit = limit[0] + } + timerDuration := grand.D(minTimerDuration, maxTimerDuration) + gtimer.Add(context.Background(), timerDuration, p.supervisor) + return p +} + +// Add pushes a new job to the pool using default goroutine pool. +// The job will be executed asynchronously. +func Add(ctx context.Context, f Func) error { + return pool.Add(ctx, f) +} + +// AddWithRecover pushes a new job to the pool with specified recover function. +// The optional `recoverFunc` is called when any panic during executing of `userFunc`. +// If `recoverFunc` is not passed or given nil, it ignores the panic from `userFunc`. +// The job will be executed asynchronously. +func AddWithRecover(ctx context.Context, userFunc Func, recoverFunc RecoverFunc) error { + return pool.AddWithRecover(ctx, userFunc, recoverFunc) +} + +// Size returns current goroutine count of default goroutine pool. +func Size() int { + return pool.Size() +} + +// Jobs returns current job count of default goroutine pool. +func Jobs() int { + return pool.Jobs() +} + +// Add pushes a new job to the pool. +// The job will be executed asynchronously. +func (p *Pool) Add(ctx context.Context, f Func) error { + for p.closed.Val() { + return gerror.NewCode( + gcode.CodeInvalidOperation, + "goroutine pool is already closed", + ) + } + p.list.PushFront(&localPoolItem{ + Ctx: ctx, + Func: f, + }) + // Check and fork new worker. + p.checkAndFork() + return nil +} + +// checkAndFork checks and creates a new goroutine worker. +// Note that the worker dies if the job function panics and the job has no recover handling. +func (p *Pool) checkAndFork() { + // Check whether fork new goroutine or not. + var n int + for { + n = p.count.Val() + if p.limit != -1 && n >= p.limit { + // No need fork new goroutine. + return + } + if p.count.Cas(n, n+1) { + // Use CAS to guarantee atomicity. + break + } + } + // Create job function in goroutine. + go func() { + defer p.count.Add(-1) + + var ( + listItem interface{} + poolItem *localPoolItem + ) + for !p.closed.Val() { + listItem = p.list.PopBack() + if listItem == nil { + return + } + poolItem = listItem.(*localPoolItem) + poolItem.Func(poolItem.Ctx) + } + }() +} + +// AddWithRecover pushes a new job to the pool with specified recover function. +// The optional `recoverFunc` is called when any panic during executing of `userFunc`. +// If `recoverFunc` is not passed or given nil, it ignores the panic from `userFunc`. +// The job will be executed asynchronously. +func (p *Pool) AddWithRecover(ctx context.Context, userFunc Func, recoverFunc RecoverFunc) error { + return p.Add(ctx, func(ctx context.Context) { + defer func() { + if exception := recover(); exception != nil { + if recoverFunc != nil { + if v, ok := exception.(error); ok && gerror.HasStack(v) { + recoverFunc(ctx, v) + } else { + recoverFunc(ctx, gerror.Newf(`%+v`, exception)) + } + } + } + }() + userFunc(ctx) + }) +} + +// Cap returns the capacity of the pool. +// This capacity is defined when pool is created. +// It returns -1 if there's no limit. +func (p *Pool) Cap() int { + return p.limit +} + +// Size returns current goroutine count of the pool. +func (p *Pool) Size() int { + return p.count.Val() +} + +// Jobs returns current job count of the pool. +// Note that, it does not return worker/goroutine count but the job/task count. +func (p *Pool) Jobs() int { + return p.list.Size() +} + +// IsClosed returns if pool is closed. +func (p *Pool) IsClosed() bool { + return p.closed.Val() +} + +// Close closes the goroutine pool, which makes all goroutines exit. +func (p *Pool) Close() { + p.closed.Set(true) +} diff --git a/vendor/github.com/gogf/gf/v2/os/grpool/grpool_supervisor.go b/vendor/github.com/gogf/gf/v2/os/grpool/grpool_supervisor.go new file mode 100644 index 00000000..d57e9695 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/grpool/grpool_supervisor.go @@ -0,0 +1,30 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package grpool + +import ( + "context" + + "github.com/gogf/gf/v2/os/gtimer" +) + +// supervisor checks the job list and fork new worker goroutine to handle the job +// if there are jobs but no workers in pool. +func (p *Pool) supervisor(ctx context.Context) { + if p.IsClosed() { + gtimer.Exit() + } + if p.list.Size() > 0 && p.count.Val() == 0 { + var number = p.list.Size() + if p.limit > 0 { + number = p.limit + } + for i := 0; i < number; i++ { + p.checkAndFork() + } + } +} diff --git a/vendor/github.com/gogf/gf/v2/os/gstructs/gstructs.go b/vendor/github.com/gogf/gf/v2/os/gstructs/gstructs.go new file mode 100644 index 00000000..09f911f2 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gstructs/gstructs.go @@ -0,0 +1,62 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package gstructs provides functions for struct information retrieving. +package gstructs + +import ( + "reflect" +) + +// Type wraps reflect.Type for additional features. +type Type struct { + reflect.Type +} + +// Field contains information of a struct field . +type Field struct { + Value reflect.Value // The underlying value of the field. + Field reflect.StructField // The underlying field of the field. + + // Retrieved tag name. It depends TagValue. + TagName string + + // Retrieved tag value. + // There might be more than one tags in the field, but only one can be retrieved according to calling function rules. + TagValue string +} + +// FieldsInput is the input parameter struct type for function Fields. +type FieldsInput struct { + // Pointer should be type of struct/*struct. + Pointer interface{} + + // RecursiveOption specifies the way retrieving the fields recursively if the attribute + // is an embedded struct. It is RecursiveOptionNone in default. + RecursiveOption RecursiveOption +} + +// FieldMapInput is the input parameter struct type for function FieldMap. +type FieldMapInput struct { + // Pointer should be type of struct/*struct. + Pointer interface{} + + // PriorityTagArray specifies the priority tag array for retrieving from high to low. + // If it's given `nil`, it returns map[name]Field, of which the `name` is attribute name. + PriorityTagArray []string + + // RecursiveOption specifies the way retrieving the fields recursively if the attribute + // is an embedded struct. It is RecursiveOptionNone in default. + RecursiveOption RecursiveOption +} + +type RecursiveOption int + +const ( + RecursiveOptionNone RecursiveOption = 0 // No recursively retrieving fields as map if the field is an embedded struct. + RecursiveOptionEmbedded RecursiveOption = 1 // Recursively retrieving fields as map if the field is an embedded struct. + RecursiveOptionEmbeddedNoTag RecursiveOption = 2 // Recursively retrieving fields as map if the field is an embedded struct and the field has no tag. +) diff --git a/vendor/github.com/gogf/gf/v2/os/gstructs/gstructs_field.go b/vendor/github.com/gogf/gf/v2/os/gstructs/gstructs_field.go new file mode 100644 index 00000000..e1d68603 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gstructs/gstructs_field.go @@ -0,0 +1,232 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gstructs + +import ( + "reflect" + + "github.com/gogf/gf/v2/internal/utils" + "github.com/gogf/gf/v2/util/gtag" +) + +// Tag returns the value associated with key in the tag string. If there is no +// such key in the tag, Tag returns the empty string. +func (f *Field) Tag(key string) string { + s := f.Field.Tag.Get(key) + if s != "" { + s = gtag.Parse(s) + } + return s +} + +// TagLookup returns the value associated with key in the tag string. +// If the key is present in the tag the value (which may be empty) +// is returned. Otherwise, the returned value will be the empty string. +// The ok return value reports whether the value was explicitly set in +// the tag string. If the tag does not have the conventional format, +// the value returned by Lookup is unspecified. +func (f *Field) TagLookup(key string) (value string, ok bool) { + value, ok = f.Field.Tag.Lookup(key) + if ok && value != "" { + value = gtag.Parse(value) + } + return +} + +// IsEmbedded returns true if the given field is an anonymous field (embedded) +func (f *Field) IsEmbedded() bool { + return f.Field.Anonymous +} + +// TagStr returns the tag string of the field. +func (f *Field) TagStr() string { + return string(f.Field.Tag) +} + +// TagMap returns all the tag of the field along with its value string as map. +func (f *Field) TagMap() map[string]string { + var ( + data = ParseTag(f.TagStr()) + ) + for k, v := range data { + data[k] = utils.StripSlashes(gtag.Parse(v)) + } + return data +} + +// IsExported returns true if the given field is exported. +func (f *Field) IsExported() bool { + return f.Field.PkgPath == "" +} + +// Name returns the name of the given field. +func (f *Field) Name() string { + return f.Field.Name +} + +// Type returns the type of the given field. +// Note that this Type is not reflect.Type. If you need reflect.Type, please use Field.Type().Type. +func (f *Field) Type() Type { + return Type{ + Type: f.Field.Type, + } +} + +// Kind returns the reflect.Kind for Value of Field `f`. +func (f *Field) Kind() reflect.Kind { + return f.Value.Kind() +} + +// OriginalKind retrieves and returns the original reflect.Kind for Value of Field `f`. +func (f *Field) OriginalKind() reflect.Kind { + var ( + reflectType = f.Value.Type() + reflectKind = reflectType.Kind() + ) + for reflectKind == reflect.Ptr { + reflectType = reflectType.Elem() + reflectKind = reflectType.Kind() + } + return reflectKind +} + +// Fields retrieves and returns the fields of `pointer` as slice. +func Fields(in FieldsInput) ([]Field, error) { + var ( + ok bool + fieldFilterMap = make(map[string]struct{}) + retrievedFields = make([]Field, 0) + currentLevelFieldMap = make(map[string]Field) + ) + rangeFields, err := getFieldValues(in.Pointer) + if err != nil { + return nil, err + } + + for index := 0; index < len(rangeFields); index++ { + field := rangeFields[index] + currentLevelFieldMap[field.Name()] = field + } + + for index := 0; index < len(rangeFields); index++ { + field := rangeFields[index] + if _, ok = fieldFilterMap[field.Name()]; ok { + continue + } + if field.IsEmbedded() { + if in.RecursiveOption != RecursiveOptionNone { + switch in.RecursiveOption { + case RecursiveOptionEmbeddedNoTag: + if field.TagStr() != "" { + break + } + fallthrough + + case RecursiveOptionEmbedded: + structFields, err := Fields(FieldsInput{ + Pointer: field.Value, + RecursiveOption: in.RecursiveOption, + }) + if err != nil { + return nil, err + } + // The current level fields can overwrite the sub-struct fields with the same name. + for i := 0; i < len(structFields); i++ { + var ( + structField = structFields[i] + fieldName = structField.Name() + ) + if _, ok = fieldFilterMap[fieldName]; ok { + continue + } + fieldFilterMap[fieldName] = struct{}{} + if v, ok := currentLevelFieldMap[fieldName]; !ok { + retrievedFields = append(retrievedFields, structField) + } else { + retrievedFields = append(retrievedFields, v) + } + } + continue + } + } + continue + } + fieldFilterMap[field.Name()] = struct{}{} + retrievedFields = append(retrievedFields, field) + } + return retrievedFields, nil +} + +// FieldMap retrieves and returns struct field as map[name/tag]Field from `pointer`. +// +// The parameter `pointer` should be type of struct/*struct. +// +// The parameter `priority` specifies the priority tag array for retrieving from high to low. +// If it's given `nil`, it returns map[name]Field, of which the `name` is attribute name. +// +// The parameter `recursive` specifies the whether retrieving the fields recursively if the attribute +// is an embedded struct. +// +// Note that it only retrieves the exported attributes with first letter upper-case from struct. +func FieldMap(in FieldMapInput) (map[string]Field, error) { + fields, err := getFieldValues(in.Pointer) + if err != nil { + return nil, err + } + var ( + tagValue string + mapField = make(map[string]Field) + ) + for _, field := range fields { + // Only retrieve exported attributes. + if !field.IsExported() { + continue + } + tagValue = "" + for _, p := range in.PriorityTagArray { + tagValue = field.Tag(p) + if tagValue != "" && tagValue != "-" { + break + } + } + tempField := field + tempField.TagValue = tagValue + if tagValue != "" { + mapField[tagValue] = tempField + } else { + if in.RecursiveOption != RecursiveOptionNone && field.IsEmbedded() { + switch in.RecursiveOption { + case RecursiveOptionEmbeddedNoTag: + if field.TagStr() != "" { + mapField[field.Name()] = tempField + break + } + fallthrough + + case RecursiveOptionEmbedded: + m, err := FieldMap(FieldMapInput{ + Pointer: field.Value, + PriorityTagArray: in.PriorityTagArray, + RecursiveOption: in.RecursiveOption, + }) + if err != nil { + return nil, err + } + for k, v := range m { + if _, ok := mapField[k]; !ok { + tempV := v + mapField[k] = tempV + } + } + } + } else { + mapField[field.Name()] = tempField + } + } + } + return mapField, nil +} diff --git a/vendor/github.com/gogf/gf/v2/os/gstructs/gstructs_field_tag.go b/vendor/github.com/gogf/gf/v2/os/gstructs/gstructs_field_tag.go new file mode 100644 index 00000000..2fd03978 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gstructs/gstructs_field_tag.go @@ -0,0 +1,90 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gstructs + +import ( + "strings" + + "github.com/gogf/gf/v2/util/gtag" +) + +// TagJsonName returns the `json` tag name string of the field. +func (f *Field) TagJsonName() string { + if jsonTag := f.Tag(gtag.Json); jsonTag != "" { + return strings.Split(jsonTag, ",")[0] + } + return "" +} + +// TagDefault returns the most commonly used tag `default/d` value of the field. +func (f *Field) TagDefault() string { + v := f.Tag(gtag.Default) + if v == "" { + v = f.Tag(gtag.DefaultShort) + } + return v +} + +// TagParam returns the most commonly used tag `param/p` value of the field. +func (f *Field) TagParam() string { + v := f.Tag(gtag.Param) + if v == "" { + v = f.Tag(gtag.ParamShort) + } + return v +} + +// TagValid returns the most commonly used tag `valid/v` value of the field. +func (f *Field) TagValid() string { + v := f.Tag(gtag.Valid) + if v == "" { + v = f.Tag(gtag.ValidShort) + } + return v +} + +// TagDescription returns the most commonly used tag `description/des/dc` value of the field. +func (f *Field) TagDescription() string { + v := f.Tag(gtag.Description) + if v == "" { + v = f.Tag(gtag.DescriptionShort) + } + if v == "" { + v = f.Tag(gtag.DescriptionShort2) + } + return v +} + +// TagSummary returns the most commonly used tag `summary/sum/sm` value of the field. +func (f *Field) TagSummary() string { + v := f.Tag(gtag.Summary) + if v == "" { + v = f.Tag(gtag.SummaryShort) + } + if v == "" { + v = f.Tag(gtag.SummaryShort2) + } + return v +} + +// TagAdditional returns the most commonly used tag `additional/ad` value of the field. +func (f *Field) TagAdditional() string { + v := f.Tag(gtag.Additional) + if v == "" { + v = f.Tag(gtag.AdditionalShort) + } + return v +} + +// TagExample returns the most commonly used tag `example/eg` value of the field. +func (f *Field) TagExample() string { + v := f.Tag(gtag.Example) + if v == "" { + v = f.Tag(gtag.ExampleShort) + } + return v +} diff --git a/vendor/github.com/gogf/gf/v2/os/gstructs/gstructs_tag.go b/vendor/github.com/gogf/gf/v2/os/gstructs/gstructs_tag.go new file mode 100644 index 00000000..72cbd081 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gstructs/gstructs_tag.go @@ -0,0 +1,225 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gstructs + +import ( + "reflect" + "strconv" + + "github.com/gogf/gf/v2/errors/gcode" + "github.com/gogf/gf/v2/errors/gerror" + "github.com/gogf/gf/v2/util/gtag" +) + +// ParseTag parses tag string into map. +// For example: +// ParseTag(`v:"required" p:"id" d:"1"`) => map[v:required p:id d:1]. +func ParseTag(tag string) map[string]string { + var ( + key string + data = make(map[string]string) + ) + for tag != "" { + // Skip leading space. + i := 0 + for i < len(tag) && tag[i] == ' ' { + i++ + } + tag = tag[i:] + if tag == "" { + break + } + // Scan to colon. A space, a quote or a control character is a syntax error. + // Strictly speaking, control chars include the range [0x7f, 0x9f], not just + // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters + // as it is simpler to inspect the tag's bytes than the tag's runes. + i = 0 + for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f { + i++ + } + if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' { + break + } + key = tag[:i] + tag = tag[i+1:] + + // Scan quoted string to find value. + i = 1 + for i < len(tag) && tag[i] != '"' { + if tag[i] == '\\' { + i++ + } + i++ + } + if i >= len(tag) { + break + } + quotedValue := tag[:i+1] + tag = tag[i+1:] + value, err := strconv.Unquote(quotedValue) + if err != nil { + panic(gerror.WrapCodef(gcode.CodeInvalidParameter, err, `error parsing tag "%s"`, tag)) + } + data[key] = gtag.Parse(value) + } + return data +} + +// TagFields retrieves and returns struct tags as []Field from `pointer`. +// +// The parameter `pointer` should be type of struct/*struct. +// +// Note that, +// 1. It only retrieves the exported attributes with first letter upper-case from struct. +// 2. The parameter `priority` should be given, it only retrieves fields that has given tag. +func TagFields(pointer interface{}, priority []string) ([]Field, error) { + return getFieldValuesByTagPriority(pointer, priority, map[string]struct{}{}) +} + +// TagMapName retrieves and returns struct tags as map[tag]attribute from `pointer`. +// +// The parameter `pointer` should be type of struct/*struct. +// +// Note that, +// 1. It only retrieves the exported attributes with first letter upper-case from struct. +// 2. The parameter `priority` should be given, it only retrieves fields that has given tag. +// 3. If one field has no specified tag, it uses its field name as result map key. +func TagMapName(pointer interface{}, priority []string) (map[string]string, error) { + fields, err := TagFields(pointer, priority) + if err != nil { + return nil, err + } + tagMap := make(map[string]string, len(fields)) + for _, field := range fields { + tagMap[field.TagValue] = field.Name() + } + return tagMap, nil +} + +// TagMapField retrieves struct tags as map[tag]Field from `pointer`, and returns it. +// The parameter `object` should be either type of struct/*struct/[]struct/[]*struct. +// +// Note that, +// 1. It only retrieves the exported attributes with first letter upper-case from struct. +// 2. The parameter `priority` should be given, it only retrieves fields that has given tag. +// 3. If one field has no specified tag, it uses its field name as result map key. +func TagMapField(object interface{}, priority []string) (map[string]Field, error) { + fields, err := TagFields(object, priority) + if err != nil { + return nil, err + } + tagMap := make(map[string]Field, len(fields)) + for _, field := range fields { + tagField := field + tagMap[field.TagValue] = tagField + } + return tagMap, nil +} + +func getFieldValues(value interface{}) ([]Field, error) { + var ( + reflectValue reflect.Value + reflectKind reflect.Kind + ) + if v, ok := value.(reflect.Value); ok { + reflectValue = v + reflectKind = reflectValue.Kind() + } else { + reflectValue = reflect.ValueOf(value) + reflectKind = reflectValue.Kind() + } + for { + switch reflectKind { + case reflect.Ptr: + if !reflectValue.IsValid() || reflectValue.IsNil() { + // If pointer is type of *struct and nil, then automatically create a temporary struct. + reflectValue = reflect.New(reflectValue.Type().Elem()).Elem() + reflectKind = reflectValue.Kind() + } else { + reflectValue = reflectValue.Elem() + reflectKind = reflectValue.Kind() + } + case reflect.Array, reflect.Slice: + reflectValue = reflect.New(reflectValue.Type().Elem()).Elem() + reflectKind = reflectValue.Kind() + default: + goto exitLoop + } + } + +exitLoop: + for reflectKind == reflect.Ptr { + reflectValue = reflectValue.Elem() + reflectKind = reflectValue.Kind() + } + if reflectKind != reflect.Struct { + return nil, gerror.NewCode( + gcode.CodeInvalidParameter, + "given value should be either type of struct/*struct/[]struct/[]*struct", + ) + } + var ( + structType = reflectValue.Type() + length = reflectValue.NumField() + fields = make([]Field, length) + ) + for i := 0; i < length; i++ { + fields[i] = Field{ + Value: reflectValue.Field(i), + Field: structType.Field(i), + } + } + return fields, nil +} + +func getFieldValuesByTagPriority( + pointer interface{}, priority []string, repeatedTagFilteringMap map[string]struct{}, +) ([]Field, error) { + fields, err := getFieldValues(pointer) + if err != nil { + return nil, err + } + var ( + tagName string + tagValue string + tagFields = make([]Field, 0) + ) + for _, field := range fields { + // Only retrieve exported attributes. + if !field.IsExported() { + continue + } + tagValue = "" + for _, p := range priority { + tagName = p + tagValue = field.Tag(p) + if tagValue != "" && tagValue != "-" { + break + } + } + if tagValue != "" { + // Filter repeated tag. + if _, ok := repeatedTagFilteringMap[tagValue]; ok { + continue + } + tagField := field + tagField.TagName = tagName + tagField.TagValue = tagValue + tagFields = append(tagFields, tagField) + } + // If this is an embedded attribute, it retrieves the tags recursively. + if field.IsEmbedded() && field.OriginalKind() == reflect.Struct { + subTagFields, err := getFieldValuesByTagPriority(field.Value, priority, repeatedTagFilteringMap) + if err != nil { + return nil, err + } else { + tagFields = append(tagFields, subTagFields...) + } + } + } + return tagFields, nil +} diff --git a/vendor/github.com/gogf/gf/v2/os/gstructs/gstructs_type.go b/vendor/github.com/gogf/gf/v2/os/gstructs/gstructs_type.go new file mode 100644 index 00000000..82d24de6 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gstructs/gstructs_type.go @@ -0,0 +1,75 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gstructs + +import ( + "reflect" + + "github.com/gogf/gf/v2/errors/gerror" +) + +// StructType retrieves and returns the struct Type of specified struct/*struct. +// The parameter `object` should be either type of struct/*struct/[]struct/[]*struct. +func StructType(object interface{}) (*Type, error) { + var ( + reflectValue reflect.Value + reflectKind reflect.Kind + reflectType reflect.Type + ) + if rv, ok := object.(reflect.Value); ok { + reflectValue = rv + } else { + reflectValue = reflect.ValueOf(object) + } + reflectKind = reflectValue.Kind() + for { + switch reflectKind { + case reflect.Ptr: + if !reflectValue.IsValid() || reflectValue.IsNil() { + // If pointer is type of *struct and nil, then automatically create a temporary struct. + reflectValue = reflect.New(reflectValue.Type().Elem()).Elem() + reflectKind = reflectValue.Kind() + } else { + reflectValue = reflectValue.Elem() + reflectKind = reflectValue.Kind() + } + + case reflect.Array, reflect.Slice: + reflectValue = reflect.New(reflectValue.Type().Elem()).Elem() + reflectKind = reflectValue.Kind() + + default: + goto exitLoop + } + } + +exitLoop: + if reflectKind != reflect.Struct { + return nil, gerror.Newf( + `invalid object kind "%s", kind of "struct" is required`, + reflectKind, + ) + } + reflectType = reflectValue.Type() + return &Type{ + Type: reflectType, + }, nil +} + +// Signature returns a unique string as this type. +func (t Type) Signature() string { + return t.PkgPath() + "/" + t.String() +} + +// FieldKeys returns the keys of current struct/map. +func (t Type) FieldKeys() []string { + keys := make([]string, t.NumField()) + for i := 0; i < t.NumField(); i++ { + keys[i] = t.Field(i).Name + } + return keys +} diff --git a/vendor/github.com/gogf/gf/v2/os/gtime/gtime.go b/vendor/github.com/gogf/gf/v2/os/gtime/gtime.go new file mode 100644 index 00000000..5cbe0020 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gtime/gtime.go @@ -0,0 +1,452 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package gtime provides functionality for measuring and displaying time. +// +// This package should keep much less dependencies with other packages. +package gtime + +import ( + "context" + "fmt" + "regexp" + "strconv" + "strings" + "time" + + "github.com/gogf/gf/v2/errors/gcode" + "github.com/gogf/gf/v2/errors/gerror" + "github.com/gogf/gf/v2/internal/intlog" + "github.com/gogf/gf/v2/internal/utils" + "github.com/gogf/gf/v2/text/gregex" +) + +const ( + // Short writes for common usage durations. + + D = 24 * time.Hour + H = time.Hour + M = time.Minute + S = time.Second + MS = time.Millisecond + US = time.Microsecond + NS = time.Nanosecond + + // Regular expression1(datetime separator supports '-', '/', '.'). + // Eg: + // "2017-12-14 04:51:34 +0805 LMT", + // "2017-12-14 04:51:34 +0805 LMT", + // "2006-01-02T15:04:05Z07:00", + // "2014-01-17T01:19:15+08:00", + // "2018-02-09T20:46:17.897Z", + // "2018-02-09 20:46:17.897", + // "2018-02-09T20:46:17Z", + // "2018-02-09 20:46:17", + // "2018/10/31 - 16:38:46" + // "2018-02-09", + // "2018.02.09", + timeRegexPattern1 = `(\d{4}[-/\.]\d{1,2}[-/\.]\d{1,2})[:\sT-]*(\d{0,2}:{0,1}\d{0,2}:{0,1}\d{0,2}){0,1}\.{0,1}(\d{0,9})([\sZ]{0,1})([\+-]{0,1})([:\d]*)` + + // Regular expression2(datetime separator supports '-', '/', '.'). + // Eg: + // 01-Nov-2018 11:50:28 + // 01/Nov/2018 11:50:28 + // 01.Nov.2018 11:50:28 + // 01.Nov.2018:11:50:28 + timeRegexPattern2 = `(\d{1,2}[-/\.][A-Za-z]{3,}[-/\.]\d{4})[:\sT-]*(\d{0,2}:{0,1}\d{0,2}:{0,1}\d{0,2}){0,1}\.{0,1}(\d{0,9})([\sZ]{0,1})([\+-]{0,1})([:\d]*)` + + // Regular expression3(time). + // Eg: + // 11:50:28 + // 11:50:28.897 + timeRegexPattern3 = `(\d{2}):(\d{2}):(\d{2})\.{0,1}(\d{0,9})` +) + +var ( + // It's more high performance using regular expression + // than time.ParseInLocation to parse the datetime string. + timeRegex1, _ = regexp.Compile(timeRegexPattern1) + timeRegex2, _ = regexp.Compile(timeRegexPattern2) + timeRegex3, _ = regexp.Compile(timeRegexPattern3) + + // Month words to arabic numerals mapping. + monthMap = map[string]int{ + "jan": 1, + "feb": 2, + "mar": 3, + "apr": 4, + "may": 5, + "jun": 6, + "jul": 7, + "aug": 8, + "sep": 9, + "sept": 9, + "oct": 10, + "nov": 11, + "dec": 12, + "january": 1, + "february": 2, + "march": 3, + "april": 4, + "june": 6, + "july": 7, + "august": 8, + "september": 9, + "october": 10, + "november": 11, + "december": 12, + } +) + +// Timestamp retrieves and returns the timestamp in seconds. +func Timestamp() int64 { + return Now().Timestamp() +} + +// TimestampMilli retrieves and returns the timestamp in milliseconds. +func TimestampMilli() int64 { + return Now().TimestampMilli() +} + +// TimestampMicro retrieves and returns the timestamp in microseconds. +func TimestampMicro() int64 { + return Now().TimestampMicro() +} + +// TimestampNano retrieves and returns the timestamp in nanoseconds. +func TimestampNano() int64 { + return Now().TimestampNano() +} + +// TimestampStr is a convenience method which retrieves and returns +// the timestamp in seconds as string. +func TimestampStr() string { + return Now().TimestampStr() +} + +// TimestampMilliStr is a convenience method which retrieves and returns +// the timestamp in milliseconds as string. +func TimestampMilliStr() string { + return Now().TimestampMilliStr() +} + +// TimestampMicroStr is a convenience method which retrieves and returns +// the timestamp in microseconds as string. +func TimestampMicroStr() string { + return Now().TimestampMicroStr() +} + +// TimestampNanoStr is a convenience method which retrieves and returns +// the timestamp in nanoseconds as string. +func TimestampNanoStr() string { + return Now().TimestampNanoStr() +} + +// Date returns current date in string like "2006-01-02". +func Date() string { + return time.Now().Format("2006-01-02") +} + +// Datetime returns current datetime in string like "2006-01-02 15:04:05". +func Datetime() string { + return time.Now().Format("2006-01-02 15:04:05") +} + +// ISO8601 returns current datetime in ISO8601 format like "2006-01-02T15:04:05-07:00". +func ISO8601() string { + return time.Now().Format("2006-01-02T15:04:05-07:00") +} + +// RFC822 returns current datetime in RFC822 format like "Mon, 02 Jan 06 15:04 MST". +func RFC822() string { + return time.Now().Format("Mon, 02 Jan 06 15:04 MST") +} + +// parseDateStr parses the string to year, month and day numbers. +func parseDateStr(s string) (year, month, day int) { + array := strings.Split(s, "-") + if len(array) < 3 { + array = strings.Split(s, "/") + } + if len(array) < 3 { + array = strings.Split(s, ".") + } + // Parsing failed. + if len(array) < 3 { + return + } + // Checking the year in head or tail. + if utils.IsNumeric(array[1]) { + year, _ = strconv.Atoi(array[0]) + month, _ = strconv.Atoi(array[1]) + day, _ = strconv.Atoi(array[2]) + } else { + if v, ok := monthMap[strings.ToLower(array[1])]; ok { + month = v + } else { + return + } + year, _ = strconv.Atoi(array[2]) + day, _ = strconv.Atoi(array[0]) + } + return +} + +// StrToTime converts string to *Time object. It also supports timestamp string. +// The parameter `format` is unnecessary, which specifies the format for converting like "Y-m-d H:i:s". +// If `format` is given, it acts as same as function StrToTimeFormat. +// If `format` is not given, it converts string as a "standard" datetime string. +// Note that, it fails and returns error if there's no date string in `str`. +func StrToTime(str string, format ...string) (*Time, error) { + if str == "" { + return &Time{wrapper{time.Time{}}}, nil + } + if len(format) > 0 { + return StrToTimeFormat(str, format[0]) + } + if isTimestampStr(str) { + timestamp, _ := strconv.ParseInt(str, 10, 64) + return NewFromTimeStamp(timestamp), nil + } + var ( + year, month, day int + hour, min, sec, nsec int + match []string + local = time.Local + ) + if match = timeRegex1.FindStringSubmatch(str); len(match) > 0 && match[1] != "" { + year, month, day = parseDateStr(match[1]) + } else if match = timeRegex2.FindStringSubmatch(str); len(match) > 0 && match[1] != "" { + year, month, day = parseDateStr(match[1]) + } else if match = timeRegex3.FindStringSubmatch(str); len(match) > 0 && match[1] != "" { + s := strings.ReplaceAll(match[2], ":", "") + if len(s) < 6 { + s += strings.Repeat("0", 6-len(s)) + } + hour, _ = strconv.Atoi(match[1]) + min, _ = strconv.Atoi(match[2]) + sec, _ = strconv.Atoi(match[3]) + nsec, _ = strconv.Atoi(match[4]) + for i := 0; i < 9-len(match[4]); i++ { + nsec *= 10 + } + return NewFromTime(time.Date(0, time.Month(1), 1, hour, min, sec, nsec, local)), nil + } else { + return nil, gerror.NewCodef(gcode.CodeInvalidParameter, `unsupported time converting for string "%s"`, str) + } + + // Time + if len(match[2]) > 0 { + s := strings.ReplaceAll(match[2], ":", "") + if len(s) < 6 { + s += strings.Repeat("0", 6-len(s)) + } + hour, _ = strconv.Atoi(s[0:2]) + min, _ = strconv.Atoi(s[2:4]) + sec, _ = strconv.Atoi(s[4:6]) + } + // Nanoseconds, check and perform bits filling + if len(match[3]) > 0 { + nsec, _ = strconv.Atoi(match[3]) + for i := 0; i < 9-len(match[3]); i++ { + nsec *= 10 + } + } + // If there's zone information in the string, + // it then performs time zone conversion, which converts the time zone to UTC. + if match[4] != "" && match[6] == "" { + match[6] = "000000" + } + // If there's offset in the string, it then firstly processes the offset. + if match[6] != "" { + zone := strings.ReplaceAll(match[6], ":", "") + zone = strings.TrimLeft(zone, "+-") + if len(zone) <= 6 { + zone += strings.Repeat("0", 6-len(zone)) + h, _ := strconv.Atoi(zone[0:2]) + m, _ := strconv.Atoi(zone[2:4]) + s, _ := strconv.Atoi(zone[4:6]) + if h > 24 || m > 59 || s > 59 { + return nil, gerror.NewCodef(gcode.CodeInvalidParameter, `invalid zone string "%s"`, match[6]) + } + operation := match[5] + if operation != "+" && operation != "-" { + operation = "-" + } + // Comparing the given time zone whether equals to current time zone, + // it converts it to UTC if they do not equal. + _, localOffset := time.Now().Zone() + // Comparing in seconds. + if (h*3600+m*60+s) != localOffset || + (localOffset > 0 && operation == "-") || + (localOffset < 0 && operation == "+") { + local = time.UTC + // UTC conversion. + switch operation { + case "+": + if h > 0 { + hour -= h + } + if m > 0 { + min -= m + } + if s > 0 { + sec -= s + } + case "-": + if h > 0 { + hour += h + } + if m > 0 { + min += m + } + if s > 0 { + sec += s + } + } + } + } + } + if month <= 0 || day <= 0 { + return nil, gerror.NewCodef(gcode.CodeInvalidParameter, `invalid time string "%s"`, str) + } + return NewFromTime(time.Date(year, time.Month(month), day, hour, min, sec, nsec, local)), nil +} + +// ConvertZone converts time in string `strTime` from `fromZone` to `toZone`. +// The parameter `fromZone` is unnecessary, it is current time zone in default. +func ConvertZone(strTime string, toZone string, fromZone ...string) (*Time, error) { + t, err := StrToTime(strTime) + if err != nil { + return nil, err + } + var l *time.Location + if len(fromZone) > 0 { + if l, err = time.LoadLocation(fromZone[0]); err != nil { + err = gerror.WrapCodef(gcode.CodeInvalidParameter, err, `time.LoadLocation failed for name "%s"`, fromZone[0]) + return nil, err + } else { + t.Time = time.Date(t.Year(), time.Month(t.Month()), t.Day(), t.Hour(), t.Minute(), t.Time.Second(), t.Time.Nanosecond(), l) + } + } + if l, err = time.LoadLocation(toZone); err != nil { + err = gerror.WrapCodef(gcode.CodeInvalidParameter, err, `time.LoadLocation failed for name "%s"`, toZone) + return nil, err + } else { + return t.ToLocation(l), nil + } +} + +// StrToTimeFormat parses string `str` to *Time object with given format `format`. +// The parameter `format` is like "Y-m-d H:i:s". +func StrToTimeFormat(str string, format string) (*Time, error) { + return StrToTimeLayout(str, formatToStdLayout(format)) +} + +// StrToTimeLayout parses string `str` to *Time object with given format `layout`. +// The parameter `layout` is in stdlib format like "2006-01-02 15:04:05". +func StrToTimeLayout(str string, layout string) (*Time, error) { + if t, err := time.ParseInLocation(layout, str, time.Local); err == nil { + return NewFromTime(t), nil + } else { + return nil, gerror.WrapCodef( + gcode.CodeInvalidParameter, err, + `time.ParseInLocation failed for layout "%s" and value "%s"`, + layout, str, + ) + } +} + +// ParseTimeFromContent retrieves time information for content string, it then parses and returns it +// as *Time object. +// It returns the first time information if there are more than one time string in the content. +// It only retrieves and parses the time information with given `format` if it's passed. +func ParseTimeFromContent(content string, format ...string) *Time { + var ( + err error + match []string + ) + if len(format) > 0 { + match, err = gregex.MatchString(formatToRegexPattern(format[0]), content) + if err != nil { + intlog.Errorf(context.TODO(), `%+v`, err) + } + if len(match) > 0 { + return NewFromStrFormat(match[0], format[0]) + } + } else { + if match = timeRegex1.FindStringSubmatch(content); len(match) >= 1 { + return NewFromStr(strings.Trim(match[0], "./_- \n\r")) + } else if match = timeRegex2.FindStringSubmatch(content); len(match) >= 1 { + return NewFromStr(strings.Trim(match[0], "./_- \n\r")) + } else if match = timeRegex3.FindStringSubmatch(content); len(match) >= 1 { + return NewFromStr(strings.Trim(match[0], "./_- \n\r")) + } + } + return nil +} + +// ParseDuration parses a duration string. +// A duration string is a possibly signed sequence of +// decimal numbers, each with optional fraction and a unit suffix, +// such as "300ms", "-1.5h", "1d" or "2h45m". +// Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h", "d". +// +// Very note that it supports unit "d" more than function time.ParseDuration. +func ParseDuration(s string) (duration time.Duration, err error) { + var ( + num int64 + ) + if utils.IsNumeric(s) { + num, err = strconv.ParseInt(s, 10, 64) + if err != nil { + err = gerror.WrapCodef(gcode.CodeInvalidParameter, err, `strconv.ParseInt failed for string "%s"`, s) + return 0, err + } + return time.Duration(num), nil + } + match, err := gregex.MatchString(`^([\-\d]+)[dD](.*)$`, s) + if err != nil { + return 0, err + } + if len(match) == 3 { + num, err = strconv.ParseInt(match[1], 10, 64) + if err != nil { + err = gerror.WrapCodef(gcode.CodeInvalidParameter, err, `strconv.ParseInt failed for string "%s"`, match[1]) + return 0, err + } + s = fmt.Sprintf(`%dh%s`, num*24, match[2]) + duration, err = time.ParseDuration(s) + if err != nil { + err = gerror.WrapCodef(gcode.CodeInvalidParameter, err, `time.ParseDuration failed for string "%s"`, s) + } + return + } + duration, err = time.ParseDuration(s) + err = gerror.WrapCodef(gcode.CodeInvalidParameter, err, `time.ParseDuration failed for string "%s"`, s) + return +} + +// FuncCost calculates the cost time of function `f` in nanoseconds. +func FuncCost(f func()) time.Duration { + t := time.Now() + f() + return time.Since(t) +} + +// isTimestampStr checks and returns whether given string a timestamp string. +func isTimestampStr(s string) bool { + length := len(s) + if length == 0 { + return false + } + for i := 0; i < len(s); i++ { + if s[i] < '0' || s[i] > '9' { + return false + } + } + return true +} diff --git a/vendor/github.com/gogf/gf/v2/os/gtime/gtime_format.go b/vendor/github.com/gogf/gf/v2/os/gtime/gtime_format.go new file mode 100644 index 00000000..6e0898e3 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gtime/gtime_format.go @@ -0,0 +1,280 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gtime + +import ( + "bytes" + "strconv" + "strings" + + "github.com/gogf/gf/v2/text/gregex" +) + +var ( + // Refer: http://php.net/manual/en/function.date.php + formats = map[byte]string{ + 'd': "02", // Day: Day of the month, 2 digits with leading zeros. Eg: 01 to 31. + 'D': "Mon", // Day: A textual representation of a day, three letters. Eg: Mon through Sun. + 'w': "Monday", // Day: Numeric representation of the day of the week. Eg: 0 (for Sunday) through 6 (for Saturday). + 'N': "Monday", // Day: ISO-8601 numeric representation of the day of the week. Eg: 1 (for Monday) through 7 (for Sunday). + 'j': "=j=02", // Day: Day of the month without leading zeros. Eg: 1 to 31. + 'S': "02", // Day: English ordinal suffix for the day of the month, 2 characters. Eg: st, nd, rd or th. Works well with j. + 'l': "Monday", // Day: A full textual representation of the day of the week. Eg: Sunday through Saturday. + 'z': "", // Day: The day of the year (starting from 0). Eg: 0 through 365. + 'W': "", // Week: ISO-8601 week number of year, weeks starting on Monday. Eg: 42 (the 42nd week in the year). + 'F': "January", // Month: A full textual representation of a month, such as January or March. Eg: January through December. + 'm': "01", // Month: Numeric representation of a month, with leading zeros. Eg: 01 through 12. + 'M': "Jan", // Month: A short textual representation of a month, three letters. Eg: Jan through Dec. + 'n': "1", // Month: Numeric representation of a month, without leading zeros. Eg: 1 through 12. + 't': "", // Month: Number of days in the given month. Eg: 28 through 31. + 'Y': "2006", // Year: A full numeric representation of a year, 4 digits. Eg: 1999 or 2003. + 'y': "06", // Year: A two digit representation of a year. Eg: 99 or 03. + 'a': "pm", // Time: Lowercase Ante meridiem and Post meridiem. Eg: am or pm. + 'A': "PM", // Time: Uppercase Ante meridiem and Post meridiem. Eg: AM or PM. + 'g': "3", // Time: 12-hour format of an hour without leading zeros. Eg: 1 through 12. + 'G': "=G=15", // Time: 24-hour format of an hour without leading zeros. Eg: 0 through 23. + 'h': "03", // Time: 12-hour format of an hour with leading zeros. Eg: 01 through 12. + 'H': "15", // Time: 24-hour format of an hour with leading zeros. Eg: 00 through 23. + 'i': "04", // Time: Minutes with leading zeros. Eg: 00 to 59. + 's': "05", // Time: Seconds with leading zeros. Eg: 00 through 59. + 'u': "=u=.000", // Time: Milliseconds. Eg: 234, 678. + 'U': "", // Time: Seconds since the Unix Epoch (January 1 1970 00:00:00 GMT). + 'O': "-0700", // Zone: Difference to Greenwich time (GMT) in hours. Eg: +0200. + 'P': "-07:00", // Zone: Difference to Greenwich time (GMT) with colon between hours and minutes. Eg: +02:00. + 'T': "MST", // Zone: Timezone abbreviation. Eg: UTC, EST, MDT ... + 'c': "2006-01-02T15:04:05-07:00", // Format: ISO 8601 date. Eg: 2004-02-12T15:19:21+00:00. + 'r': "Mon, 02 Jan 06 15:04 MST", // Format: RFC 2822 formatted date. Eg: Thu, 21 Dec 2000 16:01:07 +0200. + } + + // Week to number mapping. + weekMap = map[string]string{ + "Sunday": "0", + "Monday": "1", + "Tuesday": "2", + "Wednesday": "3", + "Thursday": "4", + "Friday": "5", + "Saturday": "6", + } + + // Day count of each month which is not in leap year. + dayOfMonth = []int{0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334} +) + +// Format formats and returns the formatted result with custom `format`. +func (t *Time) Format(format string) string { + if t == nil { + return "" + } + runes := []rune(format) + buffer := bytes.NewBuffer(nil) + for i := 0; i < len(runes); { + switch runes[i] { + case '\\': + if i < len(runes)-1 { + buffer.WriteRune(runes[i+1]) + i += 2 + continue + } else { + return buffer.String() + } + case 'W': + buffer.WriteString(strconv.Itoa(t.WeeksOfYear())) + case 'z': + buffer.WriteString(strconv.Itoa(t.DayOfYear())) + case 't': + buffer.WriteString(strconv.Itoa(t.DaysInMonth())) + case 'U': + buffer.WriteString(strconv.FormatInt(t.Unix(), 10)) + default: + if runes[i] > 255 { + buffer.WriteRune(runes[i]) + break + } + if f, ok := formats[byte(runes[i])]; ok { + result := t.Time.Format(f) + // Particular chars should be handled here. + switch runes[i] { + case 'j': + for _, s := range []string{"=j=0", "=j="} { + result = strings.ReplaceAll(result, s, "") + } + buffer.WriteString(result) + case 'G': + for _, s := range []string{"=G=0", "=G="} { + result = strings.ReplaceAll(result, s, "") + } + buffer.WriteString(result) + case 'u': + buffer.WriteString(strings.ReplaceAll(result, "=u=.", "")) + case 'w': + buffer.WriteString(weekMap[result]) + case 'N': + buffer.WriteString(strings.ReplaceAll(weekMap[result], "0", "7")) + case 'S': + buffer.WriteString(formatMonthDaySuffixMap(result)) + default: + buffer.WriteString(result) + } + } else { + buffer.WriteRune(runes[i]) + } + } + i++ + } + return buffer.String() +} + +// FormatNew formats and returns a new Time object with given custom `format`. +func (t *Time) FormatNew(format string) *Time { + if t == nil { + return nil + } + return NewFromStr(t.Format(format)) +} + +// FormatTo formats `t` with given custom `format`. +func (t *Time) FormatTo(format string) *Time { + if t == nil { + return nil + } + t.Time = NewFromStr(t.Format(format)).Time + return t +} + +// Layout formats the time with stdlib layout and returns the formatted result. +func (t *Time) Layout(layout string) string { + if t == nil { + return "" + } + return t.Time.Format(layout) +} + +// LayoutNew formats the time with stdlib layout and returns the new Time object. +func (t *Time) LayoutNew(layout string) *Time { + if t == nil { + return nil + } + return NewFromStr(t.Layout(layout)) +} + +// LayoutTo formats `t` with stdlib layout. +func (t *Time) LayoutTo(layout string) *Time { + if t == nil { + return nil + } + t.Time = NewFromStr(t.Layout(layout)).Time + return t +} + +// IsLeapYear checks whether the time is leap year. +func (t *Time) IsLeapYear() bool { + year := t.Year() + if (year%4 == 0 && year%100 != 0) || year%400 == 0 { + return true + } + return false +} + +// DayOfYear checks and returns the position of the day for the year. +func (t *Time) DayOfYear() int { + var ( + day = t.Day() + month = t.Month() + ) + if t.IsLeapYear() { + if month > 2 { + return dayOfMonth[month-1] + day + } + return dayOfMonth[month-1] + day - 1 + } + return dayOfMonth[month-1] + day - 1 +} + +// DaysInMonth returns the day count of current month. +func (t *Time) DaysInMonth() int { + switch t.Month() { + case 1, 3, 5, 7, 8, 10, 12: + return 31 + case 4, 6, 9, 11: + return 30 + } + if t.IsLeapYear() { + return 29 + } + return 28 +} + +// WeeksOfYear returns the point of current week for the year. +func (t *Time) WeeksOfYear() int { + _, week := t.ISOWeek() + return week +} + +// formatToStdLayout converts custom format to stdlib layout. +func formatToStdLayout(format string) string { + b := bytes.NewBuffer(nil) + for i := 0; i < len(format); { + switch format[i] { + case '\\': + if i < len(format)-1 { + b.WriteByte(format[i+1]) + i += 2 + continue + } else { + return b.String() + } + + default: + if f, ok := formats[format[i]]; ok { + // Handle particular chars. + switch format[i] { + case 'j': + b.WriteString("2") + case 'G': + b.WriteString("15") + case 'u': + if i > 0 && format[i-1] == '.' { + b.WriteString("000") + } else { + b.WriteString(".000") + } + + default: + b.WriteString(f) + } + } else { + b.WriteByte(format[i]) + } + i++ + } + } + return b.String() +} + +// formatToRegexPattern converts the custom format to its corresponding regular expression. +func formatToRegexPattern(format string) string { + s := gregex.Quote(formatToStdLayout(format)) + s, _ = gregex.ReplaceString(`[0-9]`, `[0-9]`, s) + s, _ = gregex.ReplaceString(`[A-Za-z]`, `[A-Za-z]`, s) + s, _ = gregex.ReplaceString(`\s+`, `\s+`, s) + return s +} + +// formatMonthDaySuffixMap returns the short english word for current day. +func formatMonthDaySuffixMap(day string) string { + switch day { + case "01", "21", "31": + return "st" + case "02", "22": + return "nd" + case "03", "23": + return "rd" + default: + return "th" + } +} diff --git a/vendor/github.com/gogf/gf/v2/os/gtime/gtime_sql.go b/vendor/github.com/gogf/gf/v2/os/gtime/gtime_sql.go new file mode 100644 index 00000000..e3c79a81 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gtime/gtime_sql.go @@ -0,0 +1,28 @@ +package gtime + +import ( + "database/sql/driver" +) + +// Scan implements interface used by Scan in package database/sql for Scanning value +// from database to local golang variable. +func (t *Time) Scan(value interface{}) error { + if t == nil { + return nil + } + newTime := New(value) + *t = *newTime + return nil +} + +// Value is the interface providing the Value method for package database/sql/driver +// for retrieving value from golang variable to database. +func (t *Time) Value() (driver.Value, error) { + if t == nil { + return nil, nil + } + if t.IsZero() { + return nil, nil + } + return t.Time, nil +} diff --git a/vendor/github.com/gogf/gf/v2/os/gtime/gtime_time.go b/vendor/github.com/gogf/gf/v2/os/gtime/gtime_time.go new file mode 100644 index 00000000..e66967bc --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gtime/gtime_time.go @@ -0,0 +1,518 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gtime + +import ( + "bytes" + "strconv" + "time" + + "github.com/gogf/gf/v2/errors/gcode" + "github.com/gogf/gf/v2/errors/gerror" +) + +// Time is a wrapper for time.Time for additional features. +type Time struct { + wrapper +} + +// iUnixNano is an interface definition commonly for custom time.Time wrapper. +type iUnixNano interface { + UnixNano() int64 +} + +// New creates and returns a Time object with given parameter. +// The optional parameter can be type of: time.Time/*time.Time, string or integer. +func New(param ...interface{}) *Time { + if len(param) > 0 { + switch r := param[0].(type) { + case time.Time: + return NewFromTime(r) + case *time.Time: + return NewFromTime(*r) + + case Time: + return &r + + case *Time: + return r + + case string: + if len(param) > 1 { + switch t := param[1].(type) { + case string: + return NewFromStrFormat(r, t) + case []byte: + return NewFromStrFormat(r, string(t)) + } + } + return NewFromStr(r) + + case []byte: + if len(param) > 1 { + switch t := param[1].(type) { + case string: + return NewFromStrFormat(string(r), t) + case []byte: + return NewFromStrFormat(string(r), string(t)) + } + } + return NewFromStr(string(r)) + + case int: + return NewFromTimeStamp(int64(r)) + + case int64: + return NewFromTimeStamp(r) + + default: + if v, ok := r.(iUnixNano); ok { + return NewFromTimeStamp(v.UnixNano()) + } + } + } + return &Time{ + wrapper{time.Time{}}, + } +} + +// Now creates and returns a time object of now. +func Now() *Time { + return &Time{ + wrapper{time.Now()}, + } +} + +// NewFromTime creates and returns a Time object with given time.Time object. +func NewFromTime(t time.Time) *Time { + return &Time{ + wrapper{t}, + } +} + +// NewFromStr creates and returns a Time object with given string. +// Note that it returns nil if there's error occurs. +func NewFromStr(str string) *Time { + if t, err := StrToTime(str); err == nil { + return t + } + return nil +} + +// NewFromStrFormat creates and returns a Time object with given string and +// custom format like: Y-m-d H:i:s. +// Note that it returns nil if there's error occurs. +func NewFromStrFormat(str string, format string) *Time { + if t, err := StrToTimeFormat(str, format); err == nil { + return t + } + return nil +} + +// NewFromStrLayout creates and returns a Time object with given string and +// stdlib layout like: 2006-01-02 15:04:05. +// Note that it returns nil if there's error occurs. +func NewFromStrLayout(str string, layout string) *Time { + if t, err := StrToTimeLayout(str, layout); err == nil { + return t + } + return nil +} + +// NewFromTimeStamp creates and returns a Time object with given timestamp, +// which can be in seconds to nanoseconds. +// Eg: 1600443866 and 1600443866199266000 are both considered as valid timestamp number. +func NewFromTimeStamp(timestamp int64) *Time { + if timestamp == 0 { + return &Time{} + } + var sec, nano int64 + if timestamp > 1e9 { + for timestamp < 1e18 { + timestamp *= 10 + } + sec = timestamp / 1e9 + nano = timestamp % 1e9 + } else { + sec = timestamp + } + return &Time{ + wrapper{time.Unix(sec, nano)}, + } +} + +// Timestamp returns the timestamp in seconds. +func (t *Time) Timestamp() int64 { + return t.UnixNano() / 1e9 +} + +// TimestampMilli returns the timestamp in milliseconds. +func (t *Time) TimestampMilli() int64 { + return t.UnixNano() / 1e6 +} + +// TimestampMicro returns the timestamp in microseconds. +func (t *Time) TimestampMicro() int64 { + return t.UnixNano() / 1e3 +} + +// TimestampNano returns the timestamp in nanoseconds. +func (t *Time) TimestampNano() int64 { + return t.UnixNano() +} + +// TimestampStr is a convenience method which retrieves and returns +// the timestamp in seconds as string. +func (t *Time) TimestampStr() string { + return strconv.FormatInt(t.Timestamp(), 10) +} + +// TimestampMilliStr is a convenience method which retrieves and returns +// the timestamp in milliseconds as string. +func (t *Time) TimestampMilliStr() string { + return strconv.FormatInt(t.TimestampMilli(), 10) +} + +// TimestampMicroStr is a convenience method which retrieves and returns +// the timestamp in microseconds as string. +func (t *Time) TimestampMicroStr() string { + return strconv.FormatInt(t.TimestampMicro(), 10) +} + +// TimestampNanoStr is a convenience method which retrieves and returns +// the timestamp in nanoseconds as string. +func (t *Time) TimestampNanoStr() string { + return strconv.FormatInt(t.TimestampNano(), 10) +} + +// Month returns the month of the year specified by t. +func (t *Time) Month() int { + return int(t.Time.Month()) +} + +// Second returns the second offset within the minute specified by t, +// in the range [0, 59]. +func (t *Time) Second() int { + return t.Time.Second() +} + +// Millisecond returns the millisecond offset within the second specified by t, +// in the range [0, 999]. +func (t *Time) Millisecond() int { + return t.Time.Nanosecond() / 1e6 +} + +// Microsecond returns the microsecond offset within the second specified by t, +// in the range [0, 999999]. +func (t *Time) Microsecond() int { + return t.Time.Nanosecond() / 1e3 +} + +// Nanosecond returns the nanosecond offset within the second specified by t, +// in the range [0, 999999999]. +func (t *Time) Nanosecond() int { + return t.Time.Nanosecond() +} + +// String returns current time object as string. +func (t *Time) String() string { + if t == nil { + return "" + } + if t.IsZero() { + return "" + } + return t.wrapper.String() +} + +// IsZero reports whether t represents the zero time instant, +// January 1, year 1, 00:00:00 UTC. +func (t *Time) IsZero() bool { + if t == nil { + return true + } + return t.Time.IsZero() +} + +// Clone returns a new Time object which is a clone of current time object. +func (t *Time) Clone() *Time { + return New(t.Time) +} + +// Add adds the duration to current time. +func (t *Time) Add(d time.Duration) *Time { + newTime := t.Clone() + newTime.Time = newTime.Time.Add(d) + return newTime +} + +// AddStr parses the given duration as string and adds it to current time. +func (t *Time) AddStr(duration string) (*Time, error) { + if d, err := time.ParseDuration(duration); err != nil { + err = gerror.Wrapf(err, `time.ParseDuration failed for string "%s"`, duration) + return nil, err + } else { + return t.Add(d), nil + } +} + +// UTC converts current time to UTC timezone. +func (t *Time) UTC() *Time { + newTime := t.Clone() + newTime.Time = newTime.Time.UTC() + return newTime +} + +// ISO8601 formats the time as ISO8601 and returns it as string. +func (t *Time) ISO8601() string { + return t.Layout("2006-01-02T15:04:05-07:00") +} + +// RFC822 formats the time as RFC822 and returns it as string. +func (t *Time) RFC822() string { + return t.Layout("Mon, 02 Jan 06 15:04 MST") +} + +// AddDate adds year, month and day to the time. +func (t *Time) AddDate(years int, months int, days int) *Time { + newTime := t.Clone() + newTime.Time = newTime.Time.AddDate(years, months, days) + return newTime +} + +// Round returns the result of rounding t to the nearest multiple of d (since the zero time). +// The rounding behavior for halfway values is to round up. +// If d <= 0, Round returns t stripped of any monotonic clock reading but otherwise unchanged. +// +// Round operates on the time as an absolute duration since the +// zero time; it does not operate on the presentation form of the +// time. Thus, Round(Hour) may return a time with a non-zero +// minute, depending on the time's Location. +func (t *Time) Round(d time.Duration) *Time { + newTime := t.Clone() + newTime.Time = newTime.Time.Round(d) + return newTime +} + +// Truncate returns the result of rounding t down to a multiple of d (since the zero time). +// If d <= 0, Truncate returns t stripped of any monotonic clock reading but otherwise unchanged. +// +// Truncate operates on the time as an absolute duration since the +// zero time; it does not operate on the presentation form of the +// time. Thus, Truncate(Hour) may return a time with a non-zero +// minute, depending on the time's Location. +func (t *Time) Truncate(d time.Duration) *Time { + newTime := t.Clone() + newTime.Time = newTime.Time.Truncate(d) + return newTime +} + +// Equal reports whether t and u represent the same time instant. +// Two times can be equal even if they are in different locations. +// For example, 6:00 +0200 CEST and 4:00 UTC are Equal. +// See the documentation on the Time type for the pitfalls of using == with +// Time values; most code should use Equal instead. +func (t *Time) Equal(u *Time) bool { + switch { + case t == nil && u != nil: + return false + case t == nil && u == nil: + return true + case t != nil && u == nil: + return false + default: + return t.Time.Equal(u.Time) + } +} + +// Before reports whether the time instant t is before u. +func (t *Time) Before(u *Time) bool { + return t.Time.Before(u.Time) +} + +// After reports whether the time instant t is after u. +func (t *Time) After(u *Time) bool { + switch { + case t == nil: + return false + case t != nil && u == nil: + return true + default: + return t.Time.After(u.Time) + } +} + +// Sub returns the duration t-u. If the result exceeds the maximum (or minimum) +// value that can be stored in a Duration, the maximum (or minimum) duration +// will be returned. +// To compute t-d for a duration d, use t.Add(-d). +func (t *Time) Sub(u *Time) time.Duration { + if t == nil || u == nil { + return 0 + } + return t.Time.Sub(u.Time) +} + +// StartOfMinute clones and returns a new time of which the seconds is set to 0. +func (t *Time) StartOfMinute() *Time { + newTime := t.Clone() + newTime.Time = newTime.Time.Truncate(time.Minute) + return newTime +} + +// StartOfHour clones and returns a new time of which the hour, minutes and seconds are set to 0. +func (t *Time) StartOfHour() *Time { + y, m, d := t.Date() + newTime := t.Clone() + newTime.Time = time.Date(y, m, d, newTime.Time.Hour(), 0, 0, 0, newTime.Time.Location()) + return newTime +} + +// StartOfDay clones and returns a new time which is the start of day, its time is set to 00:00:00. +func (t *Time) StartOfDay() *Time { + y, m, d := t.Date() + newTime := t.Clone() + newTime.Time = time.Date(y, m, d, 0, 0, 0, 0, newTime.Time.Location()) + return newTime +} + +// StartOfWeek clones and returns a new time which is the first day of week and its time is set to +// 00:00:00. +func (t *Time) StartOfWeek() *Time { + weekday := int(t.Weekday()) + return t.StartOfDay().AddDate(0, 0, -weekday) +} + +// StartOfMonth clones and returns a new time which is the first day of the month and its is set to +// 00:00:00 +func (t *Time) StartOfMonth() *Time { + y, m, _ := t.Date() + newTime := t.Clone() + newTime.Time = time.Date(y, m, 1, 0, 0, 0, 0, newTime.Time.Location()) + return newTime +} + +// StartOfQuarter clones and returns a new time which is the first day of the quarter and its time is set +// to 00:00:00. +func (t *Time) StartOfQuarter() *Time { + month := t.StartOfMonth() + offset := (int(month.Month()) - 1) % 3 + return month.AddDate(0, -offset, 0) +} + +// StartOfHalf clones and returns a new time which is the first day of the half year and its time is set +// to 00:00:00. +func (t *Time) StartOfHalf() *Time { + month := t.StartOfMonth() + offset := (int(month.Month()) - 1) % 6 + return month.AddDate(0, -offset, 0) +} + +// StartOfYear clones and returns a new time which is the first day of the year and its time is set to +// 00:00:00. +func (t *Time) StartOfYear() *Time { + y, _, _ := t.Date() + newTime := t.Clone() + newTime.Time = time.Date(y, time.January, 1, 0, 0, 0, 0, newTime.Time.Location()) + return newTime +} + +// getPrecisionDelta returns the precision parameter for time calculation depending on `withNanoPrecision` option. +func getPrecisionDelta(withNanoPrecision ...bool) time.Duration { + if len(withNanoPrecision) > 0 && withNanoPrecision[0] { + return time.Nanosecond + } + return time.Second +} + +// EndOfMinute clones and returns a new time of which the seconds is set to 59. +func (t *Time) EndOfMinute(withNanoPrecision ...bool) *Time { + return t.StartOfMinute().Add(time.Minute - getPrecisionDelta(withNanoPrecision...)) +} + +// EndOfHour clones and returns a new time of which the minutes and seconds are both set to 59. +func (t *Time) EndOfHour(withNanoPrecision ...bool) *Time { + return t.StartOfHour().Add(time.Hour - getPrecisionDelta(withNanoPrecision...)) +} + +// EndOfDay clones and returns a new time which is the end of day the and its time is set to 23:59:59. +func (t *Time) EndOfDay(withNanoPrecision ...bool) *Time { + y, m, d := t.Date() + newTime := t.Clone() + newTime.Time = time.Date( + y, m, d, 23, 59, 59, int(time.Second-getPrecisionDelta(withNanoPrecision...)), newTime.Time.Location(), + ) + return newTime +} + +// EndOfWeek clones and returns a new time which is the end of week and its time is set to 23:59:59. +func (t *Time) EndOfWeek(withNanoPrecision ...bool) *Time { + return t.StartOfWeek().AddDate(0, 0, 7).Add(-getPrecisionDelta(withNanoPrecision...)) +} + +// EndOfMonth clones and returns a new time which is the end of the month and its time is set to 23:59:59. +func (t *Time) EndOfMonth(withNanoPrecision ...bool) *Time { + return t.StartOfMonth().AddDate(0, 1, 0).Add(-getPrecisionDelta(withNanoPrecision...)) +} + +// EndOfQuarter clones and returns a new time which is end of the quarter and its time is set to 23:59:59. +func (t *Time) EndOfQuarter(withNanoPrecision ...bool) *Time { + return t.StartOfQuarter().AddDate(0, 3, 0).Add(-getPrecisionDelta(withNanoPrecision...)) +} + +// EndOfHalf clones and returns a new time which is the end of the half year and its time is set to 23:59:59. +func (t *Time) EndOfHalf(withNanoPrecision ...bool) *Time { + return t.StartOfHalf().AddDate(0, 6, 0).Add(-getPrecisionDelta(withNanoPrecision...)) +} + +// EndOfYear clones and returns a new time which is the end of the year and its time is set to 23:59:59. +func (t *Time) EndOfYear(withNanoPrecision ...bool) *Time { + return t.StartOfYear().AddDate(1, 0, 0).Add(-getPrecisionDelta(withNanoPrecision...)) +} + +// MarshalJSON implements the interface MarshalJSON for json.Marshal. +// Note that, DO NOT use `(t *Time) MarshalJSON() ([]byte, error)` as it looses interface +// implement of `MarshalJSON` for struct of Time. +func (t Time) MarshalJSON() ([]byte, error) { + return []byte(`"` + t.String() + `"`), nil +} + +// UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal. +func (t *Time) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + t.Time = time.Time{} + return nil + } + newTime, err := StrToTime(string(bytes.Trim(b, `"`))) + if err != nil { + return err + } + t.Time = newTime.Time + return nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// Note that it overwrites the same implementer of `time.Time`. +func (t *Time) UnmarshalText(data []byte) error { + vTime := New(data) + if vTime != nil { + *t = *vTime + return nil + } + return gerror.NewCodef(gcode.CodeInvalidParameter, `invalid time value: %s`, data) +} + +// NoValidation marks this struct object will not be validated by package gvalid. +func (t *Time) NoValidation() {} + +// DeepCopy implements interface for deep copy of current type. +func (t *Time) DeepCopy() interface{} { + if t == nil { + return nil + } + return New(t.Time) +} diff --git a/vendor/github.com/gogf/gf/v2/os/gtime/gtime_time_wrapper.go b/vendor/github.com/gogf/gf/v2/os/gtime/gtime_time_wrapper.go new file mode 100644 index 00000000..28f8b9bf --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gtime/gtime_time_wrapper.go @@ -0,0 +1,29 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gtime + +import ( + "time" +) + +// wrapper is a wrapper for stdlib struct time.Time. +// It's used for overwriting some functions of time.Time, for example: String. +type wrapper struct { + time.Time +} + +// String overwrites the String function of time.Time. +func (t wrapper) String() string { + if t.IsZero() { + return "" + } + if t.Year() == 0 { + // Only time. + return t.Format("15:04:05") + } + return t.Format("2006-01-02 15:04:05") +} diff --git a/vendor/github.com/gogf/gf/v2/os/gtime/gtime_time_zone.go b/vendor/github.com/gogf/gf/v2/os/gtime/gtime_time_zone.go new file mode 100644 index 00000000..ce6a8c03 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gtime/gtime_time_zone.go @@ -0,0 +1,120 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gtime + +import ( + "os" + "strings" + "sync" + "time" + + "github.com/gogf/gf/v2/errors/gcode" + "github.com/gogf/gf/v2/errors/gerror" +) + +var ( + setTimeZoneMu sync.Mutex + setTimeZoneName string + zoneMap = make(map[string]*time.Location) + zoneMu sync.RWMutex +) + +// SetTimeZone sets the time zone for current whole process. +// The parameter `zone` is an area string specifying corresponding time zone, +// eg: Asia/Shanghai. +// +// PLEASE VERY NOTE THAT: +// 1. This should be called before package "time" import. +// 2. This function should be called once. +// 3. Please refer to issue: https://github.com/golang/go/issues/34814 +func SetTimeZone(zone string) (err error) { + setTimeZoneMu.Lock() + defer setTimeZoneMu.Unlock() + if setTimeZoneName != "" && !strings.EqualFold(zone, setTimeZoneName) { + return gerror.NewCodef( + gcode.CodeInvalidOperation, + `process timezone already set using "%s"`, + setTimeZoneName, + ) + } + defer func() { + if err == nil { + setTimeZoneName = zone + } + }() + + // It is already set to time.Local. + if strings.EqualFold(zone, time.Local.String()) { + return + } + + // Load zone info from specified name. + location, err := time.LoadLocation(zone) + if err != nil { + err = gerror.WrapCodef(gcode.CodeInvalidParameter, err, `time.LoadLocation failed for zone "%s"`, zone) + return err + } + + // Update the time.Local for once. + time.Local = location + + // Update the timezone environment for *nix systems. + var ( + envKey = "TZ" + envValue = location.String() + ) + if err = os.Setenv(envKey, envValue); err != nil { + err = gerror.WrapCodef( + gcode.CodeUnknown, + err, + `set environment failed with key "%s", value "%s"`, + envKey, envValue, + ) + } + return +} + +// ToLocation converts current time to specified location. +func (t *Time) ToLocation(location *time.Location) *Time { + newTime := t.Clone() + newTime.Time = newTime.Time.In(location) + return newTime +} + +// ToZone converts current time to specified zone like: Asia/Shanghai. +func (t *Time) ToZone(zone string) (*Time, error) { + if location, err := t.getLocationByZoneName(zone); err == nil { + return t.ToLocation(location), nil + } else { + return nil, err + } +} + +func (t *Time) getLocationByZoneName(name string) (location *time.Location, err error) { + zoneMu.RLock() + location = zoneMap[name] + zoneMu.RUnlock() + if location == nil { + location, err = time.LoadLocation(name) + if err != nil { + err = gerror.Wrapf(err, `time.LoadLocation failed for name "%s"`, name) + } + if location != nil { + zoneMu.Lock() + zoneMap[name] = location + zoneMu.Unlock() + } + } + return +} + +// Local converts the time to local timezone. +func (t *Time) Local() *Time { + newTime := t.Clone() + newTime.Time = newTime.Time.Local() + return newTime +} diff --git a/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer.go b/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer.go new file mode 100644 index 00000000..89e201f6 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer.go @@ -0,0 +1,160 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package gtimer implements timer for interval/delayed jobs running and management. +// +// This package is designed for management for millions of timing jobs. The differences +// between gtimer and gcron are as follows: +// 1. package gcron is implemented based on package gtimer. +// 2. gtimer is designed for high performance and for millions of timing jobs. +// 3. gcron supports configuration pattern grammar like linux crontab, which is more manually +// readable. +// 4. gtimer's benchmark OP is measured in nanoseconds, and gcron's benchmark OP is measured +// in microseconds. +// +// ALSO VERY NOTE the common delay of the timer: https://github.com/golang/go/issues/14410 +package gtimer + +import ( + "context" + "strconv" + "sync" + "time" + + "github.com/gogf/gf/v2/container/gtype" + "github.com/gogf/gf/v2/errors/gcode" + "github.com/gogf/gf/v2/errors/gerror" + "github.com/gogf/gf/v2/internal/command" +) + +// Timer is the timer manager, which uses ticks to calculate the timing interval. +type Timer struct { + mu sync.RWMutex + queue *priorityQueue // queue is a priority queue based on heap structure. + status *gtype.Int // status is the current timer status. + ticks *gtype.Int64 // ticks is the proceeded interval number by the timer. + options TimerOptions // timer options is used for timer configuration. +} + +// TimerOptions is the configuration object for Timer. +type TimerOptions struct { + Interval time.Duration // Interval is the interval escaped of the timer. +} + +// internalPanic is the custom panic for internal usage. +type internalPanic string + +const ( + StatusReady = 0 // Job or Timer is ready for running. + StatusRunning = 1 // Job or Timer is already running. + StatusStopped = 2 // Job or Timer is stopped. + StatusClosed = -1 // Job or Timer is closed and waiting to be deleted. + panicExit internalPanic = "exit" // panicExit is used for custom job exit with panic. + defaultTimerInterval = "100" // defaultTimerInterval is the default timer interval in milliseconds. + // commandEnvKeyForInterval is the key for command argument or environment configuring default interval duration for timer. + commandEnvKeyForInterval = "gf.gtimer.interval" +) + +var ( + defaultInterval = getDefaultInterval() + defaultTimer = New() +) + +func getDefaultInterval() time.Duration { + interval := command.GetOptWithEnv(commandEnvKeyForInterval, defaultTimerInterval) + n, err := strconv.Atoi(interval) + if err != nil { + panic(gerror.WrapCodef( + gcode.CodeInvalidConfiguration, err, `error converting string "%s" to int number`, + interval, + )) + } + return time.Duration(n) * time.Millisecond +} + +// DefaultOptions creates and returns a default options object for Timer creation. +func DefaultOptions() TimerOptions { + return TimerOptions{ + Interval: defaultInterval, + } +} + +// SetTimeout runs the job once after duration of `delay`. +// It is like the one in javascript. +func SetTimeout(ctx context.Context, delay time.Duration, job JobFunc) { + AddOnce(ctx, delay, job) +} + +// SetInterval runs the job every duration of `delay`. +// It is like the one in javascript. +func SetInterval(ctx context.Context, interval time.Duration, job JobFunc) { + Add(ctx, interval, job) +} + +// Add adds a timing job to the default timer, which runs in interval of `interval`. +func Add(ctx context.Context, interval time.Duration, job JobFunc) *Entry { + return defaultTimer.Add(ctx, interval, job) +} + +// AddEntry adds a timing job to the default timer with detailed parameters. +// +// The parameter `interval` specifies the running interval of the job. +// +// The parameter `singleton` specifies whether the job running in singleton mode. +// There's only one of the same job is allowed running when its a singleton mode job. +// +// The parameter `times` specifies limit for the job running times, which means the job +// exits if its run times exceeds the `times`. +// +// The parameter `status` specifies the job status when it's firstly added to the timer. +func AddEntry(ctx context.Context, interval time.Duration, job JobFunc, isSingleton bool, times int, status int) *Entry { + return defaultTimer.AddEntry(ctx, interval, job, isSingleton, times, status) +} + +// AddSingleton is a convenience function for add singleton mode job. +func AddSingleton(ctx context.Context, interval time.Duration, job JobFunc) *Entry { + return defaultTimer.AddSingleton(ctx, interval, job) +} + +// AddOnce is a convenience function for adding a job which only runs once and then exits. +func AddOnce(ctx context.Context, interval time.Duration, job JobFunc) *Entry { + return defaultTimer.AddOnce(ctx, interval, job) +} + +// AddTimes is a convenience function for adding a job which is limited running times. +func AddTimes(ctx context.Context, interval time.Duration, times int, job JobFunc) *Entry { + return defaultTimer.AddTimes(ctx, interval, times, job) +} + +// DelayAdd adds a timing job after delay of `interval` duration. +// Also see Add. +func DelayAdd(ctx context.Context, delay time.Duration, interval time.Duration, job JobFunc) { + defaultTimer.DelayAdd(ctx, delay, interval, job) +} + +// DelayAddEntry adds a timing job after delay of `interval` duration. +// Also see AddEntry. +func DelayAddEntry(ctx context.Context, delay time.Duration, interval time.Duration, job JobFunc, isSingleton bool, times int, status int) { + defaultTimer.DelayAddEntry(ctx, delay, interval, job, isSingleton, times, status) +} + +// DelayAddSingleton adds a timing job after delay of `interval` duration. +// Also see AddSingleton. +func DelayAddSingleton(ctx context.Context, delay time.Duration, interval time.Duration, job JobFunc) { + defaultTimer.DelayAddSingleton(ctx, delay, interval, job) +} + +// DelayAddOnce adds a timing job after delay of `interval` duration. +// Also see AddOnce. +func DelayAddOnce(ctx context.Context, delay time.Duration, interval time.Duration, job JobFunc) { + defaultTimer.DelayAddOnce(ctx, delay, interval, job) +} + +// DelayAddTimes adds a timing job after delay of `interval` duration. +// Also see AddTimes. +func DelayAddTimes(ctx context.Context, delay time.Duration, interval time.Duration, times int, job JobFunc) { + defaultTimer.DelayAddTimes(ctx, delay, interval, times, job) +} diff --git a/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_entry.go b/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_entry.go new file mode 100644 index 00000000..dc389d08 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_entry.go @@ -0,0 +1,146 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gtimer + +import ( + "context" + + "github.com/gogf/gf/v2/container/gtype" + "github.com/gogf/gf/v2/errors/gerror" +) + +// Entry is the timing job. +type Entry struct { + job JobFunc // The job function. + ctx context.Context // The context for the job, for READ ONLY. + timer *Timer // Belonged timer. + ticks int64 // The job runs every tick. + times *gtype.Int // Limit running times. + status *gtype.Int // Job status. + isSingleton *gtype.Bool // Singleton mode. + nextTicks *gtype.Int64 // Next run ticks of the job. + infinite *gtype.Bool // No times limit. +} + +// JobFunc is the timing called job function in timer. +type JobFunc = func(ctx context.Context) + +// Status returns the status of the job. +func (entry *Entry) Status() int { + return entry.status.Val() +} + +// Run runs the timer job asynchronously. +func (entry *Entry) Run() { + if !entry.infinite.Val() { + leftRunningTimes := entry.times.Add(-1) + // It checks its running times exceeding. + if leftRunningTimes < 0 { + entry.status.Set(StatusClosed) + return + } + } + go func() { + defer func() { + if exception := recover(); exception != nil { + if exception != panicExit { + if v, ok := exception.(error); ok && gerror.HasStack(v) { + panic(v) + } else { + panic(gerror.Newf(`exception recovered: %+v`, exception)) + } + } else { + entry.Close() + return + } + } + if entry.Status() == StatusRunning { + entry.SetStatus(StatusReady) + } + }() + entry.job(entry.ctx) + }() +} + +// doCheckAndRunByTicks checks the if job can run in given timer ticks, +// it runs asynchronously if the given `currentTimerTicks` meets or else +// it increments its ticks and waits for next running check. +func (entry *Entry) doCheckAndRunByTicks(currentTimerTicks int64) { + // Ticks check. + if currentTimerTicks < entry.nextTicks.Val() { + return + } + entry.nextTicks.Set(currentTimerTicks + entry.ticks) + // Perform job checking. + switch entry.status.Val() { + case StatusRunning: + if entry.IsSingleton() { + return + } + case StatusReady: + if !entry.status.Cas(StatusReady, StatusRunning) { + return + } + case StatusStopped: + return + case StatusClosed: + return + } + // Perform job running. + entry.Run() +} + +// SetStatus custom sets the status for the job. +func (entry *Entry) SetStatus(status int) int { + return entry.status.Set(status) +} + +// Start starts the job. +func (entry *Entry) Start() { + entry.status.Set(StatusReady) +} + +// Stop stops the job. +func (entry *Entry) Stop() { + entry.status.Set(StatusStopped) +} + +// Close closes the job, and then it will be removed from the timer. +func (entry *Entry) Close() { + entry.status.Set(StatusClosed) +} + +// Reset resets the job, which resets its ticks for next running. +func (entry *Entry) Reset() { + entry.nextTicks.Set(entry.timer.ticks.Val() + entry.ticks) +} + +// IsSingleton checks and returns whether the job in singleton mode. +func (entry *Entry) IsSingleton() bool { + return entry.isSingleton.Val() +} + +// SetSingleton sets the job singleton mode. +func (entry *Entry) SetSingleton(enabled bool) { + entry.isSingleton.Set(enabled) +} + +// Job returns the job function of this job. +func (entry *Entry) Job() JobFunc { + return entry.job +} + +// Ctx returns the initialized context of this job. +func (entry *Entry) Ctx() context.Context { + return entry.ctx +} + +// SetTimes sets the limit running times for the job. +func (entry *Entry) SetTimes(times int) { + entry.times.Set(times) + entry.infinite.Set(false) +} diff --git a/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_exit.go b/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_exit.go new file mode 100644 index 00000000..2ff3e698 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_exit.go @@ -0,0 +1,15 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gtimer + +// Exit is used in timing job internally, which exits and marks it closed from timer. +// The timing job will be automatically removed from timer later. It uses "panic-recover" +// mechanism internally implementing this feature, which is designed for simplification +// and convenience. +func Exit() { + panic(panicExit) +} diff --git a/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_queue.go b/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_queue.go new file mode 100644 index 00000000..92b57523 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_queue.go @@ -0,0 +1,84 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gtimer + +import ( + "container/heap" + "math" + "sync" + + "github.com/gogf/gf/v2/container/gtype" +) + +// priorityQueue is an abstract data type similar to a regular queue or stack data structure in which +// each element additionally has a "priority" associated with it. In a priority queue, an element with +// high priority is served before an element with low priority. +// priorityQueue is based on heap structure. +type priorityQueue struct { + mu sync.Mutex + heap *priorityQueueHeap // the underlying queue items manager using heap. + nextPriority *gtype.Int64 // nextPriority stores the next priority value of the heap, which is used to check if necessary to call the Pop of heap by Timer. +} + +// priorityQueueHeap is a heap manager, of which the underlying `array` is an array implementing a heap structure. +type priorityQueueHeap struct { + array []priorityQueueItem +} + +// priorityQueueItem stores the queue item which has a `priority` attribute to sort itself in heap. +type priorityQueueItem struct { + value interface{} + priority int64 +} + +// newPriorityQueue creates and returns a priority queue. +func newPriorityQueue() *priorityQueue { + queue := &priorityQueue{ + heap: &priorityQueueHeap{array: make([]priorityQueueItem, 0)}, + nextPriority: gtype.NewInt64(math.MaxInt64), + } + heap.Init(queue.heap) + return queue +} + +// NextPriority retrieves and returns the minimum and the most priority value of the queue. +func (q *priorityQueue) NextPriority() int64 { + return q.nextPriority.Val() +} + +// Push pushes a value to the queue. +// The `priority` specifies the priority of the value. +// The lesser the `priority` value the higher priority of the `value`. +func (q *priorityQueue) Push(value interface{}, priority int64) { + q.mu.Lock() + defer q.mu.Unlock() + heap.Push(q.heap, priorityQueueItem{ + value: value, + priority: priority, + }) + // Update the minimum priority using atomic operation. + nextPriority := q.nextPriority.Val() + if priority >= nextPriority { + return + } + q.nextPriority.Set(priority) +} + +// Pop retrieves, removes and returns the most high priority value from the queue. +func (q *priorityQueue) Pop() interface{} { + q.mu.Lock() + defer q.mu.Unlock() + if v := heap.Pop(q.heap); v != nil { + var nextPriority int64 = math.MaxInt64 + if len(q.heap.array) > 0 { + nextPriority = q.heap.array[0].priority + } + q.nextPriority.Set(nextPriority) + return v.(priorityQueueItem).value + } + return nil +} diff --git a/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_queue_heap.go b/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_queue_heap.go new file mode 100644 index 00000000..c4b2f5db --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_queue_heap.go @@ -0,0 +1,42 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gtimer + +// Len is used to implement the interface of sort.Interface. +func (h *priorityQueueHeap) Len() int { + return len(h.array) +} + +// Less is used to implement the interface of sort.Interface. +// The least one is placed to the top of the heap. +func (h *priorityQueueHeap) Less(i, j int) bool { + return h.array[i].priority < h.array[j].priority +} + +// Swap is used to implement the interface of sort.Interface. +func (h *priorityQueueHeap) Swap(i, j int) { + if len(h.array) == 0 { + return + } + h.array[i], h.array[j] = h.array[j], h.array[i] +} + +// Push pushes an item to the heap. +func (h *priorityQueueHeap) Push(x interface{}) { + h.array = append(h.array, x.(priorityQueueItem)) +} + +// Pop retrieves, removes and returns the most high priority item from the heap. +func (h *priorityQueueHeap) Pop() interface{} { + length := len(h.array) + if length == 0 { + return nil + } + item := h.array[length-1] + h.array = h.array[0 : length-1] + return item +} diff --git a/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_timer.go b/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_timer.go new file mode 100644 index 00000000..7badd0c2 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_timer.go @@ -0,0 +1,198 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gtimer + +import ( + "context" + "time" + + "github.com/gogf/gf/v2/container/gtype" +) + +// New creates and returns a Timer. +func New(options ...TimerOptions) *Timer { + t := &Timer{ + queue: newPriorityQueue(), + status: gtype.NewInt(StatusRunning), + ticks: gtype.NewInt64(), + } + if len(options) > 0 { + t.options = options[0] + } else { + t.options = DefaultOptions() + } + go t.loop() + return t +} + +// Add adds a timing job to the timer, which runs in interval of `interval`. +func (t *Timer) Add(ctx context.Context, interval time.Duration, job JobFunc) *Entry { + return t.createEntry(createEntryInput{ + Ctx: ctx, + Interval: interval, + Job: job, + IsSingleton: false, + Times: -1, + Status: StatusReady, + }) +} + +// AddEntry adds a timing job to the timer with detailed parameters. +// +// The parameter `interval` specifies the running interval of the job. +// +// The parameter `singleton` specifies whether the job running in singleton mode. +// There's only one of the same job is allowed running when it's a singleton mode job. +// +// The parameter `times` specifies limit for the job running times, which means the job +// exits if its run times exceeds the `times`. +// +// The parameter `status` specifies the job status when it's firstly added to the timer. +func (t *Timer) AddEntry(ctx context.Context, interval time.Duration, job JobFunc, isSingleton bool, times int, status int) *Entry { + return t.createEntry(createEntryInput{ + Ctx: ctx, + Interval: interval, + Job: job, + IsSingleton: isSingleton, + Times: times, + Status: status, + }) +} + +// AddSingleton is a convenience function for add singleton mode job. +func (t *Timer) AddSingleton(ctx context.Context, interval time.Duration, job JobFunc) *Entry { + return t.createEntry(createEntryInput{ + Ctx: ctx, + Interval: interval, + Job: job, + IsSingleton: true, + Times: -1, + Status: StatusReady, + }) +} + +// AddOnce is a convenience function for adding a job which only runs once and then exits. +func (t *Timer) AddOnce(ctx context.Context, interval time.Duration, job JobFunc) *Entry { + return t.createEntry(createEntryInput{ + Ctx: ctx, + Interval: interval, + Job: job, + IsSingleton: true, + Times: 1, + Status: StatusReady, + }) +} + +// AddTimes is a convenience function for adding a job which is limited running times. +func (t *Timer) AddTimes(ctx context.Context, interval time.Duration, times int, job JobFunc) *Entry { + return t.createEntry(createEntryInput{ + Ctx: ctx, + Interval: interval, + Job: job, + IsSingleton: true, + Times: times, + Status: StatusReady, + }) +} + +// DelayAdd adds a timing job after delay of `delay` duration. +// Also see Add. +func (t *Timer) DelayAdd(ctx context.Context, delay time.Duration, interval time.Duration, job JobFunc) { + t.AddOnce(ctx, delay, func(ctx context.Context) { + t.Add(ctx, interval, job) + }) +} + +// DelayAddEntry adds a timing job after delay of `delay` duration. +// Also see AddEntry. +func (t *Timer) DelayAddEntry(ctx context.Context, delay time.Duration, interval time.Duration, job JobFunc, isSingleton bool, times int, status int) { + t.AddOnce(ctx, delay, func(ctx context.Context) { + t.AddEntry(ctx, interval, job, isSingleton, times, status) + }) +} + +// DelayAddSingleton adds a timing job after delay of `delay` duration. +// Also see AddSingleton. +func (t *Timer) DelayAddSingleton(ctx context.Context, delay time.Duration, interval time.Duration, job JobFunc) { + t.AddOnce(ctx, delay, func(ctx context.Context) { + t.AddSingleton(ctx, interval, job) + }) +} + +// DelayAddOnce adds a timing job after delay of `delay` duration. +// Also see AddOnce. +func (t *Timer) DelayAddOnce(ctx context.Context, delay time.Duration, interval time.Duration, job JobFunc) { + t.AddOnce(ctx, delay, func(ctx context.Context) { + t.AddOnce(ctx, interval, job) + }) +} + +// DelayAddTimes adds a timing job after delay of `delay` duration. +// Also see AddTimes. +func (t *Timer) DelayAddTimes(ctx context.Context, delay time.Duration, interval time.Duration, times int, job JobFunc) { + t.AddOnce(ctx, delay, func(ctx context.Context) { + t.AddTimes(ctx, interval, times, job) + }) +} + +// Start starts the timer. +func (t *Timer) Start() { + t.status.Set(StatusRunning) +} + +// Stop stops the timer. +func (t *Timer) Stop() { + t.status.Set(StatusStopped) +} + +// Close closes the timer. +func (t *Timer) Close() { + t.status.Set(StatusClosed) +} + +type createEntryInput struct { + Ctx context.Context + Interval time.Duration + Job JobFunc + IsSingleton bool + Times int + Status int +} + +// createEntry creates and adds a timing job to the timer. +func (t *Timer) createEntry(in createEntryInput) *Entry { + var ( + infinite = false + ) + if in.Times <= 0 { + infinite = true + } + var ( + intervalTicksOfJob = int64(in.Interval / t.options.Interval) + ) + if intervalTicksOfJob == 0 { + // If the given interval is lesser than the one of the wheel, + // then sets it to one tick, which means it will be run in one interval. + intervalTicksOfJob = 1 + } + var ( + nextTicks = t.ticks.Val() + intervalTicksOfJob + entry = &Entry{ + job: in.Job, + ctx: in.Ctx, + timer: t, + ticks: intervalTicksOfJob, + times: gtype.NewInt(in.Times), + status: gtype.NewInt(in.Status), + isSingleton: gtype.NewBool(in.IsSingleton), + nextTicks: gtype.NewInt64(nextTicks), + infinite: gtype.NewBool(infinite), + } + ) + t.queue.Push(entry, nextTicks) + return entry +} diff --git a/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_timer_loop.go b/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_timer_loop.go new file mode 100644 index 00000000..ae94bd31 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/os/gtimer/gtimer_timer_loop.go @@ -0,0 +1,67 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gtimer + +import "time" + +// loop starts the ticker using a standalone goroutine. +func (t *Timer) loop() { + go func() { + var ( + currentTimerTicks int64 + timerIntervalTicker = time.NewTicker(t.options.Interval) + ) + defer timerIntervalTicker.Stop() + for { + select { + case <-timerIntervalTicker.C: + // Check the timer status. + switch t.status.Val() { + case StatusRunning: + // Timer proceeding. + if currentTimerTicks = t.ticks.Add(1); currentTimerTicks >= t.queue.NextPriority() { + t.proceed(currentTimerTicks) + } + + case StatusStopped: + // Do nothing. + + case StatusClosed: + // Timer exits. + return + } + } + } + }() +} + +// proceed function proceeds the timer job checking and running logic. +func (t *Timer) proceed(currentTimerTicks int64) { + var ( + value interface{} + ) + for { + value = t.queue.Pop() + if value == nil { + break + } + entry := value.(*Entry) + // It checks if it meets the ticks' requirement. + if jobNextTicks := entry.nextTicks.Val(); currentTimerTicks < jobNextTicks { + // It pushes the job back if current ticks does not meet its running ticks requirement. + t.queue.Push(entry, entry.nextTicks.Val()) + break + } + // It checks the job running requirements and then does asynchronous running. + entry.doCheckAndRunByTicks(currentTimerTicks) + // Status check: push back or ignore it. + if entry.Status() != StatusClosed { + // It pushes the job back to queue for next running. + t.queue.Push(entry, entry.nextTicks.Val()) + } + } +} diff --git a/vendor/github.com/gogf/gf/v2/text/gregex/gregex.go b/vendor/github.com/gogf/gf/v2/text/gregex/gregex.go new file mode 100644 index 00000000..a1dfc968 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/text/gregex/gregex.go @@ -0,0 +1,149 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package gregex provides high performance API for regular expression functionality. +package gregex + +import ( + "regexp" +) + +// Quote quotes `s` by replacing special chars in `s` +// to match the rules of regular expression pattern. +// And returns the copy. +// +// Eg: Quote(`[foo]`) returns `\[foo\]`. +func Quote(s string) string { + return regexp.QuoteMeta(s) +} + +// Validate checks whether given regular expression pattern `pattern` valid. +func Validate(pattern string) error { + _, err := getRegexp(pattern) + return err +} + +// IsMatch checks whether given bytes `src` matches `pattern`. +func IsMatch(pattern string, src []byte) bool { + if r, err := getRegexp(pattern); err == nil { + return r.Match(src) + } + return false +} + +// IsMatchString checks whether given string `src` matches `pattern`. +func IsMatchString(pattern string, src string) bool { + return IsMatch(pattern, []byte(src)) +} + +// Match return bytes slice that matched `pattern`. +func Match(pattern string, src []byte) ([][]byte, error) { + if r, err := getRegexp(pattern); err == nil { + return r.FindSubmatch(src), nil + } else { + return nil, err + } +} + +// MatchString return strings that matched `pattern`. +func MatchString(pattern string, src string) ([]string, error) { + if r, err := getRegexp(pattern); err == nil { + return r.FindStringSubmatch(src), nil + } else { + return nil, err + } +} + +// MatchAll return all bytes slices that matched `pattern`. +func MatchAll(pattern string, src []byte) ([][][]byte, error) { + if r, err := getRegexp(pattern); err == nil { + return r.FindAllSubmatch(src, -1), nil + } else { + return nil, err + } +} + +// MatchAllString return all strings that matched `pattern`. +func MatchAllString(pattern string, src string) ([][]string, error) { + if r, err := getRegexp(pattern); err == nil { + return r.FindAllStringSubmatch(src, -1), nil + } else { + return nil, err + } +} + +// Replace replaces all matched `pattern` in bytes `src` with bytes `replace`. +func Replace(pattern string, replace, src []byte) ([]byte, error) { + if r, err := getRegexp(pattern); err == nil { + return r.ReplaceAll(src, replace), nil + } else { + return nil, err + } +} + +// ReplaceString replace all matched `pattern` in string `src` with string `replace`. +func ReplaceString(pattern, replace, src string) (string, error) { + r, e := Replace(pattern, []byte(replace), []byte(src)) + return string(r), e +} + +// ReplaceFunc replace all matched `pattern` in bytes `src` +// with custom replacement function `replaceFunc`. +func ReplaceFunc(pattern string, src []byte, replaceFunc func(b []byte) []byte) ([]byte, error) { + if r, err := getRegexp(pattern); err == nil { + return r.ReplaceAllFunc(src, replaceFunc), nil + } else { + return nil, err + } +} + +// ReplaceFuncMatch replace all matched `pattern` in bytes `src` +// with custom replacement function `replaceFunc`. +// The parameter `match` type for `replaceFunc` is [][]byte, +// which is the result contains all sub-patterns of `pattern` using Match function. +func ReplaceFuncMatch(pattern string, src []byte, replaceFunc func(match [][]byte) []byte) ([]byte, error) { + if r, err := getRegexp(pattern); err == nil { + return r.ReplaceAllFunc(src, func(bytes []byte) []byte { + match, _ := Match(pattern, bytes) + return replaceFunc(match) + }), nil + } else { + return nil, err + } +} + +// ReplaceStringFunc replace all matched `pattern` in string `src` +// with custom replacement function `replaceFunc`. +func ReplaceStringFunc(pattern string, src string, replaceFunc func(s string) string) (string, error) { + bytes, err := ReplaceFunc(pattern, []byte(src), func(bytes []byte) []byte { + return []byte(replaceFunc(string(bytes))) + }) + return string(bytes), err +} + +// ReplaceStringFuncMatch replace all matched `pattern` in string `src` +// with custom replacement function `replaceFunc`. +// The parameter `match` type for `replaceFunc` is []string, +// which is the result contains all sub-patterns of `pattern` using MatchString function. +func ReplaceStringFuncMatch(pattern string, src string, replaceFunc func(match []string) string) (string, error) { + if r, err := getRegexp(pattern); err == nil { + return string(r.ReplaceAllFunc([]byte(src), func(bytes []byte) []byte { + match, _ := MatchString(pattern, string(bytes)) + return []byte(replaceFunc(match)) + })), nil + } else { + return "", err + } +} + +// Split slices `src` into substrings separated by the expression and returns a slice of +// the substrings between those expression matches. +func Split(pattern string, src string) []string { + if r, err := getRegexp(pattern); err == nil { + return r.Split(src, -1) + } + return nil +} diff --git a/vendor/github.com/gogf/gf/v2/text/gregex/gregex_cache.go b/vendor/github.com/gogf/gf/v2/text/gregex/gregex_cache.go new file mode 100644 index 00000000..1cc9099c --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/text/gregex/gregex_cache.go @@ -0,0 +1,50 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gregex + +import ( + "regexp" + "sync" + + "github.com/gogf/gf/v2/errors/gerror" +) + +var ( + regexMu = sync.RWMutex{} + // Cache for regex object. + // Note that: + // 1. It uses sync.RWMutex ensuring the concurrent safety. + // 2. There's no expiring logic for this map. + regexMap = make(map[string]*regexp.Regexp) +) + +// getRegexp returns *regexp.Regexp object with given `pattern`. +// It uses cache to enhance the performance for compiling regular expression pattern, +// which means, it will return the same *regexp.Regexp object with the same regular +// expression pattern. +// +// It is concurrent-safe for multiple goroutines. +func getRegexp(pattern string) (regex *regexp.Regexp, err error) { + // Retrieve the regular expression object using reading lock. + regexMu.RLock() + regex = regexMap[pattern] + regexMu.RUnlock() + if regex != nil { + return + } + // If it does not exist in the cache, + // it compiles the pattern and creates one. + if regex, err = regexp.Compile(pattern); err != nil { + err = gerror.Wrapf(err, `regexp.Compile failed for pattern "%s"`, pattern) + return + } + // Cache the result object using writing lock. + regexMu.Lock() + regexMap[pattern] = regex + regexMu.Unlock() + return +} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr.go new file mode 100644 index 00000000..9932c590 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/text/gstr/gstr.go @@ -0,0 +1,17 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package gstr provides functions for string handling. +package gstr + +const ( + // NotFoundIndex is the position index for string not found in searching functions. + NotFoundIndex = -1 +) + +const ( + defaultSuffixForStrLimit = "..." +) diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_array.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_array.go new file mode 100644 index 00000000..1e467023 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_array.go @@ -0,0 +1,31 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gstr + +// SearchArray searches string `s` in string slice `a` case-sensitively, +// returns its index in `a`. +// If `s` is not found in `a`, it returns -1. +func SearchArray(a []string, s string) int { + for i, v := range a { + if s == v { + return i + } + } + return NotFoundIndex +} + +// InArray checks whether string `s` in slice `a`. +func InArray(a []string, s string) bool { + return SearchArray(a, s) != NotFoundIndex +} + +// PrefixArray adds `prefix` string for each item of `array`. +func PrefixArray(array []string, prefix string) { + for k, v := range array { + array[k] = prefix + v + } +} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_case.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_case.go new file mode 100644 index 00000000..c07fff72 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_case.go @@ -0,0 +1,184 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. +// +// | Function | Result | +// |-----------------------------------|--------------------| +// | CaseSnake(s) | any_kind_of_string | +// | CaseSnakeScreaming(s) | ANY_KIND_OF_STRING | +// | CaseSnakeFirstUpper("RGBCodeMd5") | rgb_code_md5 | +// | CaseKebab(s) | any-kind-of-string | +// | CaseKebabScreaming(s) | ANY-KIND-OF-STRING | +// | CaseDelimited(s, '.') | any.kind.of.string | +// | CaseDelimitedScreaming(s, '.') | ANY.KIND.OF.STRING | +// | CaseCamel(s) | AnyKindOfString | +// | CaseCamelLower(s) | anyKindOfString | + +package gstr + +import ( + "regexp" + "strings" +) + +var ( + numberSequence = regexp.MustCompile(`([a-zA-Z]{0,1})(\d+)([a-zA-Z]{0,1})`) + firstCamelCaseStart = regexp.MustCompile(`([A-Z]+)([A-Z]?[_a-z\d]+)|$`) + firstCamelCaseEnd = regexp.MustCompile(`([\w\W]*?)([_]?[A-Z]+)$`) +) + +// CaseCamel converts a string to CamelCase. +func CaseCamel(s string) string { + return toCamelInitCase(s, true) +} + +// CaseCamelLower converts a string to lowerCamelCase. +func CaseCamelLower(s string) string { + if s == "" { + return s + } + if r := rune(s[0]); r >= 'A' && r <= 'Z' { + s = strings.ToLower(string(r)) + s[1:] + } + return toCamelInitCase(s, false) +} + +// CaseSnake converts a string to snake_case. +func CaseSnake(s string) string { + return CaseDelimited(s, '_') +} + +// CaseSnakeScreaming converts a string to SNAKE_CASE_SCREAMING. +func CaseSnakeScreaming(s string) string { + return CaseDelimitedScreaming(s, '_', true) +} + +// CaseSnakeFirstUpper converts a string like "RGBCodeMd5" to "rgb_code_md5". +// TODO for efficiency should change regexp to traversing string in future. +func CaseSnakeFirstUpper(word string, underscore ...string) string { + replace := "_" + if len(underscore) > 0 { + replace = underscore[0] + } + + m := firstCamelCaseEnd.FindAllStringSubmatch(word, 1) + if len(m) > 0 { + word = m[0][1] + replace + TrimLeft(ToLower(m[0][2]), replace) + } + + for { + m = firstCamelCaseStart.FindAllStringSubmatch(word, 1) + if len(m) > 0 && m[0][1] != "" { + w := strings.ToLower(m[0][1]) + w = w[:len(w)-1] + replace + string(w[len(w)-1]) + + word = strings.Replace(word, m[0][1], w, 1) + } else { + break + } + } + + return TrimLeft(word, replace) +} + +// CaseKebab converts a string to kebab-case +func CaseKebab(s string) string { + return CaseDelimited(s, '-') +} + +// CaseKebabScreaming converts a string to KEBAB-CASE-SCREAMING. +func CaseKebabScreaming(s string) string { + return CaseDelimitedScreaming(s, '-', true) +} + +// CaseDelimited converts a string to snake.case.delimited. +func CaseDelimited(s string, del byte) string { + return CaseDelimitedScreaming(s, del, false) +} + +// CaseDelimitedScreaming converts a string to DELIMITED.SCREAMING.CASE or delimited.screaming.case. +func CaseDelimitedScreaming(s string, del uint8, screaming bool) string { + s = addWordBoundariesToNumbers(s) + s = strings.Trim(s, " ") + n := "" + for i, v := range s { + // treat acronyms as words, eg for JSONData -> JSON is a whole word + nextCaseIsChanged := false + if i+1 < len(s) { + next := s[i+1] + if (v >= 'A' && v <= 'Z' && next >= 'a' && next <= 'z') || (v >= 'a' && v <= 'z' && next >= 'A' && next <= 'Z') { + nextCaseIsChanged = true + } + } + + if i > 0 && n[len(n)-1] != del && nextCaseIsChanged { + // add underscore if next letter case type is changed + if v >= 'A' && v <= 'Z' { + n += string(del) + string(v) + } else if v >= 'a' && v <= 'z' { + n += string(v) + string(del) + } + } else if v == ' ' || v == '_' || v == '-' || v == '.' { + // replace spaces/underscores with delimiters + n += string(del) + } else { + n = n + string(v) + } + } + + if screaming { + n = strings.ToUpper(n) + } else { + n = strings.ToLower(n) + } + return n +} + +func addWordBoundariesToNumbers(s string) string { + r := numberSequence.ReplaceAllFunc([]byte(s), func(bytes []byte) []byte { + var result []byte + match := numberSequence.FindSubmatch(bytes) + if len(match[1]) > 0 { + result = append(result, match[1]...) + result = append(result, []byte(" ")...) + } + result = append(result, match[2]...) + if len(match[3]) > 0 { + result = append(result, []byte(" ")...) + result = append(result, match[3]...) + } + return result + }) + return string(r) +} + +// Converts a string to CamelCase +func toCamelInitCase(s string, initCase bool) string { + s = addWordBoundariesToNumbers(s) + s = strings.Trim(s, " ") + n := "" + capNext := initCase + for _, v := range s { + if v >= 'A' && v <= 'Z' { + n += string(v) + } + if v >= '0' && v <= '9' { + n += string(v) + } + if v >= 'a' && v <= 'z' { + if capNext { + n += strings.ToUpper(string(v)) + } else { + n += string(v) + } + } + if v == '_' || v == ' ' || v == '-' || v == '.' { + capNext = true + } else { + capNext = false + } + } + return n +} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_compare.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_compare.go new file mode 100644 index 00000000..ae877f67 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_compare.go @@ -0,0 +1,21 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gstr + +import "strings" + +// Compare returns an integer comparing two strings lexicographically. +// The result will be 0 if a==b, -1 if a < b, and +1 if a > b. +func Compare(a, b string) int { + return strings.Compare(a, b) +} + +// Equal reports whether `a` and `b`, interpreted as UTF-8 strings, +// are equal under Unicode case-folding, case-insensitively. +func Equal(a, b string) bool { + return strings.EqualFold(a, b) +} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_contain.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_contain.go new file mode 100644 index 00000000..82994580 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_contain.go @@ -0,0 +1,24 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gstr + +import "strings" + +// Contains reports whether `substr` is within `str`, case-sensitively. +func Contains(str, substr string) bool { + return strings.Contains(str, substr) +} + +// ContainsI reports whether substr is within str, case-insensitively. +func ContainsI(str, substr string) bool { + return PosI(str, substr) != -1 +} + +// ContainsAny reports whether any Unicode code points in `chars` are within `s`. +func ContainsAny(s, chars string) bool { + return strings.ContainsAny(s, chars) +} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_convert.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_convert.go new file mode 100644 index 00000000..fc29f11c --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_convert.go @@ -0,0 +1,265 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gstr + +import ( + "bytes" + "fmt" + "math" + "regexp" + "strconv" + "strings" + "unicode" + + "github.com/gogf/gf/v2/util/grand" +) + +var ( + // octReg is the regular expression object for checks octal string. + octReg = regexp.MustCompile(`\\[0-7]{3}`) +) + +// Chr return the ascii string of a number(0-255). +func Chr(ascii int) string { + return string([]byte{byte(ascii % 256)}) +} + +// Ord converts the first byte of a string to a value between 0 and 255. +func Ord(char string) int { + return int(char[0]) +} + +// OctStr converts string container octal string to its original string, +// for example, to Chinese string. +// Eg: `\346\200\241` -> 怡 +func OctStr(str string) string { + return octReg.ReplaceAllStringFunc( + str, + func(s string) string { + i, _ := strconv.ParseInt(s[1:], 8, 0) + return string([]byte{byte(i)}) + }, + ) +} + +// Reverse returns a string which is the reverse of `str`. +func Reverse(str string) string { + runes := []rune(str) + for i, j := 0, len(runes)-1; i < j; i, j = i+1, j-1 { + runes[i], runes[j] = runes[j], runes[i] + } + return string(runes) +} + +// NumberFormat formats a number with grouped thousands. +// `decimals`: Sets the number of decimal points. +// `decPoint`: Sets the separator for the decimal point. +// `thousandsSep`: Sets the thousands' separator. +// See http://php.net/manual/en/function.number-format.php. +func NumberFormat(number float64, decimals int, decPoint, thousandsSep string) string { + neg := false + if number < 0 { + number = -number + neg = true + } + // Will round off + str := fmt.Sprintf("%."+strconv.Itoa(decimals)+"F", number) + prefix, suffix := "", "" + if decimals > 0 { + prefix = str[:len(str)-(decimals+1)] + suffix = str[len(str)-decimals:] + } else { + prefix = str + } + sep := []byte(thousandsSep) + n, l1, l2 := 0, len(prefix), len(sep) + // thousands sep num + c := (l1 - 1) / 3 + tmp := make([]byte, l2*c+l1) + pos := len(tmp) - 1 + for i := l1 - 1; i >= 0; i, n, pos = i-1, n+1, pos-1 { + if l2 > 0 && n > 0 && n%3 == 0 { + for j := range sep { + tmp[pos] = sep[l2-j-1] + pos-- + } + } + tmp[pos] = prefix[i] + } + s := string(tmp) + if decimals > 0 { + s += decPoint + suffix + } + if neg { + s = "-" + s + } + + return s +} + +// Shuffle randomly shuffles a string. +// It considers parameter `str` as unicode string. +func Shuffle(str string) string { + runes := []rune(str) + s := make([]rune, len(runes)) + for i, v := range grand.Perm(len(runes)) { + s[i] = runes[v] + } + return string(s) +} + +// HideStr replaces part of the string `str` to `hide` by `percentage` from the `middle`. +// It considers parameter `str` as unicode string. +func HideStr(str string, percent int, hide string) string { + array := strings.Split(str, "@") + if len(array) > 1 { + str = array[0] + } + var ( + rs = []rune(str) + length = len(rs) + mid = math.Floor(float64(length / 2)) + hideLen = int(math.Floor(float64(length) * (float64(percent) / 100))) + start = int(mid - math.Floor(float64(hideLen)/2)) + hideStr = []rune("") + hideRune = []rune(hide) + ) + for i := 0; i < hideLen; i++ { + hideStr = append(hideStr, hideRune...) + } + buffer := bytes.NewBuffer(nil) + buffer.WriteString(string(rs[0:start])) + buffer.WriteString(string(hideStr)) + buffer.WriteString(string(rs[start+hideLen:])) + if len(array) > 1 { + buffer.WriteString("@" + array[1]) + } + return buffer.String() +} + +// Nl2Br inserts HTML line breaks(`br`|
) before all newlines in a string: +// \n\r, \r\n, \r, \n. +// It considers parameter `str` as unicode string. +func Nl2Br(str string, isXhtml ...bool) string { + r, n, runes := '\r', '\n', []rune(str) + var br []byte + if len(isXhtml) > 0 && isXhtml[0] { + br = []byte("
") + } else { + br = []byte("
") + } + skip := false + length := len(runes) + var buf bytes.Buffer + for i, v := range runes { + if skip { + skip = false + continue + } + switch v { + case n, r: + if (i+1 < length) && ((v == r && runes[i+1] == n) || (v == n && runes[i+1] == r)) { + buf.Write(br) + skip = true + continue + } + buf.Write(br) + default: + buf.WriteRune(v) + } + } + return buf.String() +} + +// WordWrap wraps a string to a given number of characters. +// This function supports cut parameters of both english and chinese punctuations. +// TODO: Enable custom cut parameter, see http://php.net/manual/en/function.wordwrap.php. +func WordWrap(str string, width int, br string) string { + if br == "" { + br = "\n" + } + var ( + current int + wordBuf, spaceBuf bytes.Buffer + init = make([]byte, 0, len(str)) + buf = bytes.NewBuffer(init) + strRunes = []rune(str) + ) + for _, char := range strRunes { + switch { + case char == '\n': + if wordBuf.Len() == 0 { + if current+spaceBuf.Len() > width { + current = 0 + } else { + current += spaceBuf.Len() + _, _ = spaceBuf.WriteTo(buf) + } + spaceBuf.Reset() + } else { + current += spaceBuf.Len() + wordBuf.Len() + _, _ = spaceBuf.WriteTo(buf) + spaceBuf.Reset() + _, _ = wordBuf.WriteTo(buf) + wordBuf.Reset() + } + buf.WriteRune(char) + current = 0 + + case unicode.IsSpace(char): + if spaceBuf.Len() == 0 || wordBuf.Len() > 0 { + current += spaceBuf.Len() + wordBuf.Len() + _, _ = spaceBuf.WriteTo(buf) + spaceBuf.Reset() + _, _ = wordBuf.WriteTo(buf) + wordBuf.Reset() + } + spaceBuf.WriteRune(char) + + case isPunctuation(char): + wordBuf.WriteRune(char) + if spaceBuf.Len() == 0 || wordBuf.Len() > 0 { + current += spaceBuf.Len() + wordBuf.Len() + _, _ = spaceBuf.WriteTo(buf) + spaceBuf.Reset() + _, _ = wordBuf.WriteTo(buf) + wordBuf.Reset() + } + + default: + wordBuf.WriteRune(char) + if current+spaceBuf.Len()+wordBuf.Len() > width && wordBuf.Len() < width { + buf.WriteString(br) + current = 0 + spaceBuf.Reset() + } + } + } + + if wordBuf.Len() == 0 { + if current+spaceBuf.Len() <= width { + _, _ = spaceBuf.WriteTo(buf) + } + } else { + _, _ = spaceBuf.WriteTo(buf) + _, _ = wordBuf.WriteTo(buf) + } + return buf.String() +} + +func isPunctuation(char int32) bool { + switch char { + // English Punctuations. + case ';', '.', ',', ':', '~': + return true + // Chinese Punctuations. + case ';', ',', '。', ':', '?', '!', '…', '、': + return true + default: + return false + } +} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_count.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_count.go new file mode 100644 index 00000000..cd0f0a19 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_count.go @@ -0,0 +1,63 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gstr + +import ( + "bytes" + "strings" + "unicode" +) + +// Count counts the number of `substr` appears in `s`. +// It returns 0 if no `substr` found in `s`. +func Count(s, substr string) int { + return strings.Count(s, substr) +} + +// CountI counts the number of `substr` appears in `s`, case-insensitively. +// It returns 0 if no `substr` found in `s`. +func CountI(s, substr string) int { + return strings.Count(ToLower(s), ToLower(substr)) +} + +// CountWords returns information about words' count used in a string. +// It considers parameter `str` as unicode string. +func CountWords(str string) map[string]int { + m := make(map[string]int) + buffer := bytes.NewBuffer(nil) + for _, r := range []rune(str) { + if unicode.IsSpace(r) { + if buffer.Len() > 0 { + m[buffer.String()]++ + buffer.Reset() + } + } else { + buffer.WriteRune(r) + } + } + if buffer.Len() > 0 { + m[buffer.String()]++ + } + return m +} + +// CountChars returns information about chars' count used in a string. +// It considers parameter `str` as unicode string. +func CountChars(str string, noSpace ...bool) map[string]int { + m := make(map[string]int) + countSpace := true + if len(noSpace) > 0 && noSpace[0] { + countSpace = false + } + for _, r := range []rune(str) { + if !countSpace && unicode.IsSpace(r) { + continue + } + m[string(r)]++ + } + return m +} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_create.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_create.go new file mode 100644 index 00000000..8e5ff312 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_create.go @@ -0,0 +1,14 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gstr + +import "strings" + +// Repeat returns a new string consisting of multiplier copies of the string input. +func Repeat(input string, multiplier int) string { + return strings.Repeat(input, multiplier) +} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_domain.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_domain.go new file mode 100644 index 00000000..35b78d48 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_domain.go @@ -0,0 +1,56 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gstr + +import "strings" + +// IsSubDomain checks whether `subDomain` is sub-domain of mainDomain. +// It supports '*' in `mainDomain`. +func IsSubDomain(subDomain string, mainDomain string) bool { + if p := strings.IndexByte(subDomain, ':'); p != -1 { + subDomain = subDomain[0:p] + } + if p := strings.IndexByte(mainDomain, ':'); p != -1 { + mainDomain = mainDomain[0:p] + } + var ( + subArray = strings.Split(subDomain, ".") + mainArray = strings.Split(mainDomain, ".") + subLength = len(subArray) + mainLength = len(mainArray) + ) + // Eg: + // "goframe.org" is not sub-domain of "s.goframe.org". + if mainLength > subLength { + for i := range mainArray[0 : mainLength-subLength] { + if mainArray[i] != "*" { + return false + } + } + } + + // Eg: + // "s.s.goframe.org" is not sub-domain of "*.goframe.org" + // but + // "s.s.goframe.org" is sub-domain of "goframe.org" + if mainLength > 2 && subLength > mainLength { + return false + } + minLength := subLength + if mainLength < minLength { + minLength = mainLength + } + for i := minLength; i > 0; i-- { + if mainArray[mainLength-i] == "*" { + continue + } + if mainArray[mainLength-i] != subArray[subLength-i] { + return false + } + } + return true +} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_is.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_is.go new file mode 100644 index 00000000..2f52e949 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_is.go @@ -0,0 +1,14 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gstr + +import "github.com/gogf/gf/v2/internal/utils" + +// IsNumeric tests whether the given string s is numeric. +func IsNumeric(s string) bool { + return utils.IsNumeric(s) +} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_length.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_length.go new file mode 100644 index 00000000..9e5b915b --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_length.go @@ -0,0 +1,14 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gstr + +import "unicode/utf8" + +// LenRune returns string length of unicode. +func LenRune(str string) int { + return utf8.RuneCountInString(str) +} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_parse.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_parse.go new file mode 100644 index 00000000..52878bc7 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_parse.go @@ -0,0 +1,181 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gstr + +import ( + "net/url" + "strings" + + "github.com/gogf/gf/v2/errors/gcode" + "github.com/gogf/gf/v2/errors/gerror" +) + +// Parse parses the string into map[string]interface{}. +// +// v1=m&v2=n -> map[v1:m v2:n] +// v[a]=m&v[b]=n -> map[v:map[a:m b:n]] +// v[a][a]=m&v[a][b]=n -> map[v:map[a:map[a:m b:n]]] +// v[]=m&v[]=n -> map[v:[m n]] +// v[a][]=m&v[a][]=n -> map[v:map[a:[m n]]] +// v[][]=m&v[][]=n -> map[v:[map[]]] // Currently does not support nested slice. +// v=m&v[a]=n -> error +// a .[[b=c -> map[a___[b:c] +func Parse(s string) (result map[string]interface{}, err error) { + if s == "" { + return nil, nil + } + result = make(map[string]interface{}) + parts := strings.Split(s, "&") + for _, part := range parts { + pos := strings.Index(part, "=") + if pos <= 0 { + continue + } + key, err := url.QueryUnescape(part[:pos]) + if err != nil { + err = gerror.Wrapf(err, `url.QueryUnescape failed for string "%s"`, part[:pos]) + return nil, err + } + + for len(key) > 0 && key[0] == ' ' { + key = key[1:] + } + + if key == "" || key[0] == '[' { + continue + } + value, err := url.QueryUnescape(part[pos+1:]) + if err != nil { + err = gerror.Wrapf(err, `url.QueryUnescape failed for string "%s"`, part[pos+1:]) + return nil, err + } + // split into multiple keys + var keys []string + left := 0 + for i, k := range key { + if k == '[' && left == 0 { + left = i + } else if k == ']' { + if left > 0 { + if len(keys) == 0 { + keys = append(keys, key[:left]) + } + keys = append(keys, key[left+1:i]) + left = 0 + if i+1 < len(key) && key[i+1] != '[' { + break + } + } + } + } + if len(keys) == 0 { + keys = append(keys, key) + } + // first key + first := "" + for i, chr := range keys[0] { + if chr == ' ' || chr == '.' || chr == '[' { + first += "_" + } else { + first += string(chr) + } + if chr == '[' { + first += keys[0][i+1:] + break + } + } + keys[0] = first + + // build nested map + if err = build(result, keys, value); err != nil { + return nil, err + } + } + return result, nil +} + +// build nested map. +func build(result map[string]interface{}, keys []string, value interface{}) error { + var ( + length = len(keys) + key = strings.Trim(keys[0], "'\"") + ) + if length == 1 { + result[key] = value + return nil + } + + // The end is slice. like f[], f[a][] + if keys[1] == "" && length == 2 { + // TODO nested slice + if key == "" { + return nil + } + val, ok := result[key] + if !ok { + result[key] = []interface{}{value} + return nil + } + children, ok := val.([]interface{}) + if !ok { + return gerror.NewCodef( + gcode.CodeInvalidParameter, + "expected type '[]interface{}' for key '%s', but got '%T'", + key, val, + ) + } + result[key] = append(children, value) + return nil + } + // The end is slice + map. like v[][a] + if keys[1] == "" && length > 2 && keys[2] != "" { + val, ok := result[key] + if !ok { + result[key] = []interface{}{} + val = result[key] + } + children, ok := val.([]interface{}) + if !ok { + return gerror.NewCodef( + gcode.CodeInvalidParameter, + "expected type '[]interface{}' for key '%s', but got '%T'", + key, val, + ) + } + if l := len(children); l > 0 { + if child, ok := children[l-1].(map[string]interface{}); ok { + if _, ok := child[keys[2]]; !ok { + _ = build(child, keys[2:], value) + return nil + } + } + } + child := map[string]interface{}{} + _ = build(child, keys[2:], value) + result[key] = append(children, child) + return nil + } + + // map, like v[a], v[a][b] + val, ok := result[key] + if !ok { + result[key] = map[string]interface{}{} + val = result[key] + } + children, ok := val.(map[string]interface{}) + if !ok { + return gerror.NewCodef( + gcode.CodeInvalidParameter, + "expected type 'map[string]interface{}' for key '%s', but got '%T'", + key, val, + ) + } + if err := build(children, keys[1:], value); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_pos.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_pos.go new file mode 100644 index 00000000..bf76a629 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_pos.go @@ -0,0 +1,140 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gstr + +import "strings" + +// Pos returns the position of the first occurrence of `needle` +// in `haystack` from `startOffset`, case-sensitively. +// It returns -1, if not found. +func Pos(haystack, needle string, startOffset ...int) int { + length := len(haystack) + offset := 0 + if len(startOffset) > 0 { + offset = startOffset[0] + } + if length == 0 || offset > length || -offset > length { + return -1 + } + if offset < 0 { + offset += length + } + pos := strings.Index(haystack[offset:], needle) + if pos == NotFoundIndex { + return NotFoundIndex + } + return pos + offset +} + +// PosRune acts like function Pos but considers `haystack` and `needle` as unicode string. +func PosRune(haystack, needle string, startOffset ...int) int { + pos := Pos(haystack, needle, startOffset...) + if pos < 3 { + return pos + } + return len([]rune(haystack[:pos])) +} + +// PosI returns the position of the first occurrence of `needle` +// in `haystack` from `startOffset`, case-insensitively. +// It returns -1, if not found. +func PosI(haystack, needle string, startOffset ...int) int { + length := len(haystack) + offset := 0 + if len(startOffset) > 0 { + offset = startOffset[0] + } + if length == 0 || offset > length || -offset > length { + return -1 + } + + if offset < 0 { + offset += length + } + pos := strings.Index(strings.ToLower(haystack[offset:]), strings.ToLower(needle)) + if pos == -1 { + return -1 + } + return pos + offset +} + +// PosIRune acts like function PosI but considers `haystack` and `needle` as unicode string. +func PosIRune(haystack, needle string, startOffset ...int) int { + pos := PosI(haystack, needle, startOffset...) + if pos < 3 { + return pos + } + return len([]rune(haystack[:pos])) +} + +// PosR returns the position of the last occurrence of `needle` +// in `haystack` from `startOffset`, case-sensitively. +// It returns -1, if not found. +func PosR(haystack, needle string, startOffset ...int) int { + offset := 0 + if len(startOffset) > 0 { + offset = startOffset[0] + } + pos, length := 0, len(haystack) + if length == 0 || offset > length || -offset > length { + return -1 + } + + if offset < 0 { + haystack = haystack[:offset+length+1] + } else { + haystack = haystack[offset:] + } + pos = strings.LastIndex(haystack, needle) + if offset > 0 && pos != -1 { + pos += offset + } + return pos +} + +// PosRRune acts like function PosR but considers `haystack` and `needle` as unicode string. +func PosRRune(haystack, needle string, startOffset ...int) int { + pos := PosR(haystack, needle, startOffset...) + if pos < 3 { + return pos + } + return len([]rune(haystack[:pos])) +} + +// PosRI returns the position of the last occurrence of `needle` +// in `haystack` from `startOffset`, case-insensitively. +// It returns -1, if not found. +func PosRI(haystack, needle string, startOffset ...int) int { + offset := 0 + if len(startOffset) > 0 { + offset = startOffset[0] + } + pos, length := 0, len(haystack) + if length == 0 || offset > length || -offset > length { + return -1 + } + + if offset < 0 { + haystack = haystack[:offset+length+1] + } else { + haystack = haystack[offset:] + } + pos = strings.LastIndex(strings.ToLower(haystack), strings.ToLower(needle)) + if offset > 0 && pos != -1 { + pos += offset + } + return pos +} + +// PosRIRune acts like function PosRI but considers `haystack` and `needle` as unicode string. +func PosRIRune(haystack, needle string, startOffset ...int) int { + pos := PosRI(haystack, needle, startOffset...) + if pos < 3 { + return pos + } + return len([]rune(haystack[:pos])) +} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_replace.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_replace.go new file mode 100644 index 00000000..1449f9be --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_replace.go @@ -0,0 +1,94 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gstr + +import ( + "strings" + + "github.com/gogf/gf/v2/internal/utils" +) + +// Replace returns a copy of the string `origin` +// in which string `search` replaced by `replace` case-sensitively. +func Replace(origin, search, replace string, count ...int) string { + n := -1 + if len(count) > 0 { + n = count[0] + } + return strings.Replace(origin, search, replace, n) +} + +// ReplaceI returns a copy of the string `origin` +// in which string `search` replaced by `replace` case-insensitively. +func ReplaceI(origin, search, replace string, count ...int) string { + n := -1 + if len(count) > 0 { + n = count[0] + } + if n == 0 { + return origin + } + var ( + searchLength = len(search) + replaceLength = len(replace) + searchLower = strings.ToLower(search) + originLower string + pos int + ) + for { + originLower = strings.ToLower(origin) + if pos = Pos(originLower, searchLower, pos); pos != -1 { + origin = origin[:pos] + replace + origin[pos+searchLength:] + pos += replaceLength + if n--; n == 0 { + break + } + } else { + break + } + } + return origin +} + +// ReplaceByArray returns a copy of `origin`, +// which is replaced by a slice in order, case-sensitively. +func ReplaceByArray(origin string, array []string) string { + for i := 0; i < len(array); i += 2 { + if i+1 >= len(array) { + break + } + origin = Replace(origin, array[i], array[i+1]) + } + return origin +} + +// ReplaceIByArray returns a copy of `origin`, +// which is replaced by a slice in order, case-insensitively. +func ReplaceIByArray(origin string, array []string) string { + for i := 0; i < len(array); i += 2 { + if i+1 >= len(array) { + break + } + origin = ReplaceI(origin, array[i], array[i+1]) + } + return origin +} + +// ReplaceByMap returns a copy of `origin`, +// which is replaced by a map in unordered way, case-sensitively. +func ReplaceByMap(origin string, replaces map[string]string) string { + return utils.ReplaceByMap(origin, replaces) +} + +// ReplaceIByMap returns a copy of `origin`, +// which is replaced by a map in unordered way, case-insensitively. +func ReplaceIByMap(origin string, replaces map[string]string) string { + for k, v := range replaces { + origin = ReplaceI(origin, k, v) + } + return origin +} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_similar.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_similar.go new file mode 100644 index 00000000..7c19618f --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_similar.go @@ -0,0 +1,158 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gstr + +// Levenshtein calculates Levenshtein distance between two strings. +// costIns: Defines the cost of insertion. +// costRep: Defines the cost of replacement. +// costDel: Defines the cost of deletion. +// See http://php.net/manual/en/function.levenshtein.php. +func Levenshtein(str1, str2 string, costIns, costRep, costDel int) int { + var maxLen = 255 + l1 := len(str1) + l2 := len(str2) + if l1 == 0 { + return l2 * costIns + } + if l2 == 0 { + return l1 * costDel + } + if l1 > maxLen || l2 > maxLen { + return -1 + } + + tmp := make([]int, l2+1) + p1 := make([]int, l2+1) + p2 := make([]int, l2+1) + var c0, c1, c2 int + var i1, i2 int + for i2 := 0; i2 <= l2; i2++ { + p1[i2] = i2 * costIns + } + for i1 = 0; i1 < l1; i1++ { + p2[0] = p1[0] + costDel + for i2 = 0; i2 < l2; i2++ { + if str1[i1] == str2[i2] { + c0 = p1[i2] + } else { + c0 = p1[i2] + costRep + } + c1 = p1[i2+1] + costDel + if c1 < c0 { + c0 = c1 + } + c2 = p2[i2] + costIns + if c2 < c0 { + c0 = c2 + } + p2[i2+1] = c0 + } + tmp = p1 + p1 = p2 + p2 = tmp + } + c0 = p1[l2] + + return c0 +} + +// SimilarText calculates the similarity between two strings. +// See http://php.net/manual/en/function.similar-text.php. +func SimilarText(first, second string, percent *float64) int { + var similarText func(string, string, int, int) int + similarText = func(str1, str2 string, len1, len2 int) int { + var sum, max int + pos1, pos2 := 0, 0 + + // Find the longest segment of the same section in two strings + for i := 0; i < len1; i++ { + for j := 0; j < len2; j++ { + for l := 0; (i+l < len1) && (j+l < len2) && (str1[i+l] == str2[j+l]); l++ { + if l+1 > max { + max = l + 1 + pos1 = i + pos2 = j + } + } + } + } + + if sum = max; sum > 0 { + if pos1 > 0 && pos2 > 0 { + sum += similarText(str1, str2, pos1, pos2) + } + if (pos1+max < len1) && (pos2+max < len2) { + s1 := []byte(str1) + s2 := []byte(str2) + sum += similarText(string(s1[pos1+max:]), string(s2[pos2+max:]), len1-pos1-max, len2-pos2-max) + } + } + + return sum + } + + l1, l2 := len(first), len(second) + if l1+l2 == 0 { + return 0 + } + sim := similarText(first, second, l1, l2) + if percent != nil { + *percent = float64(sim*200) / float64(l1+l2) + } + return sim +} + +// Soundex calculates the soundex key of a string. +// See http://php.net/manual/en/function.soundex.php. +func Soundex(str string) string { + if str == "" { + panic("str: cannot be an empty string") + } + table := [26]rune{ + '0', '1', '2', '3', // A, B, C, D + '0', '1', '2', // E, F, G + '0', // H + '0', '2', '2', '4', '5', '5', // I, J, K, L, M, N + '0', '1', '2', '6', '2', '3', // O, P, Q, R, S, T + '0', '1', // U, V + '0', '2', // W, X + '0', '2', // Y, Z + } + last, code, small := -1, 0, 0 + sd := make([]rune, 4) + // build soundex string + for i := 0; i < len(str) && small < 4; i++ { + // ToUpper + char := str[i] + if char < '\u007F' && 'a' <= char && char <= 'z' { + code = int(char - 'a' + 'A') + } else { + code = int(char) + } + if code >= 'A' && code <= 'Z' { + if small == 0 { + sd[small] = rune(code) + small++ + last = int(table[code-'A']) + } else { + code = int(table[code-'A']) + if code != last { + if code != 0 { + sd[small] = rune(code) + small++ + } + last = code + } + } + } + } + // pad with "0" + for ; small < 4; small++ { + sd[small] = '0' + } + return string(sd) +} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_slashes.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_slashes.go new file mode 100644 index 00000000..2fed1814 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_slashes.go @@ -0,0 +1,54 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gstr + +import ( + "bytes" + + "github.com/gogf/gf/v2/internal/utils" +) + +// AddSlashes quotes chars('"\) with slashes. +func AddSlashes(str string) string { + var buf bytes.Buffer + for _, char := range str { + switch char { + case '\'', '"', '\\': + buf.WriteRune('\\') + } + buf.WriteRune(char) + } + return buf.String() +} + +// StripSlashes un-quotes a quoted string by AddSlashes. +func StripSlashes(str string) string { + return utils.StripSlashes(str) +} + +// QuoteMeta returns a version of str with a backslash character (\) +// before every character that is among: .\+*?[^]($) +func QuoteMeta(str string, chars ...string) string { + var buf bytes.Buffer + for _, char := range str { + if len(chars) > 0 { + for _, c := range chars[0] { + if c == char { + buf.WriteRune('\\') + break + } + } + } else { + switch char { + case '.', '+', '\\', '(', '$', ')', '[', '^', ']', '*', '?': + buf.WriteRune('\\') + } + } + buf.WriteRune(char) + } + return buf.String() +} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_split_join.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_split_join.go new file mode 100644 index 00000000..8858cacf --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_split_join.go @@ -0,0 +1,83 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gstr + +import ( + "strings" + + "github.com/gogf/gf/v2/internal/utils" + "github.com/gogf/gf/v2/util/gconv" +) + +// Split splits string `str` by a string `delimiter`, to an array. +func Split(str, delimiter string) []string { + return strings.Split(str, delimiter) +} + +// SplitAndTrim splits string `str` by a string `delimiter` to an array, +// and calls Trim to every element of this array. It ignores the elements +// which are empty after Trim. +func SplitAndTrim(str, delimiter string, characterMask ...string) []string { + return utils.SplitAndTrim(str, delimiter, characterMask...) +} + +// Join concatenates the elements of `array` to create a single string. The separator string +// `sep` is placed between elements in the resulting string. +func Join(array []string, sep string) string { + return strings.Join(array, sep) +} + +// JoinAny concatenates the elements of `array` to create a single string. The separator string +// `sep` is placed between elements in the resulting string. +// +// The parameter `array` can be any type of slice, which be converted to string array. +func JoinAny(array interface{}, sep string) string { + return strings.Join(gconv.Strings(array), sep) +} + +// Explode splits string `str` by a string `delimiter`, to an array. +// See http://php.net/manual/en/function.explode.php. +func Explode(delimiter, str string) []string { + return Split(str, delimiter) +} + +// Implode joins array elements `pieces` with a string `glue`. +// http://php.net/manual/en/function.implode.php +func Implode(glue string, pieces []string) string { + return strings.Join(pieces, glue) +} + +// ChunkSplit splits a string into smaller chunks. +// Can be used to split a string into smaller chunks which is useful for +// e.g. converting BASE64 string output to match RFC 2045 semantics. +// It inserts end every chunkLen characters. +// It considers parameter `body` and `end` as unicode string. +func ChunkSplit(body string, chunkLen int, end string) string { + if end == "" { + end = "\r\n" + } + runes, endRunes := []rune(body), []rune(end) + l := len(runes) + if l <= 1 || l < chunkLen { + return body + end + } + ns := make([]rune, 0, len(runes)+len(endRunes)) + for i := 0; i < l; i += chunkLen { + if i+chunkLen > l { + ns = append(ns, runes[i:]...) + } else { + ns = append(ns, runes[i:i+chunkLen]...) + } + ns = append(ns, endRunes...) + } + return string(ns) +} + +// Fields returns the words used in a string as slice. +func Fields(str string) []string { + return strings.Fields(str) +} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_sub.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_sub.go new file mode 100644 index 00000000..e237cee3 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_sub.go @@ -0,0 +1,199 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gstr + +import "strings" + +// Str returns part of `haystack` string starting from and including +// the first occurrence of `needle` to the end of `haystack`. +// See http://php.net/manual/en/function.strstr.php. +func Str(haystack string, needle string) string { + if needle == "" { + return "" + } + pos := strings.Index(haystack, needle) + if pos == NotFoundIndex { + return "" + } + return haystack[pos+len([]byte(needle))-1:] +} + +// StrEx returns part of `haystack` string starting from and excluding +// the first occurrence of `needle` to the end of `haystack`. +func StrEx(haystack string, needle string) string { + if s := Str(haystack, needle); s != "" { + return s[1:] + } + return "" +} + +// StrTill returns part of `haystack` string ending to and including +// the first occurrence of `needle` from the start of `haystack`. +func StrTill(haystack string, needle string) string { + pos := strings.Index(haystack, needle) + if pos == NotFoundIndex || pos == 0 { + return "" + } + return haystack[:pos+1] +} + +// StrTillEx returns part of `haystack` string ending to and excluding +// the first occurrence of `needle` from the start of `haystack`. +func StrTillEx(haystack string, needle string) string { + pos := strings.Index(haystack, needle) + if pos == NotFoundIndex || pos == 0 { + return "" + } + return haystack[:pos] +} + +// SubStr returns a portion of string `str` specified by the `start` and `length` parameters. +// The parameter `length` is optional, it uses the length of `str` in default. +func SubStr(str string, start int, length ...int) (substr string) { + strLength := len(str) + if start < 0 { + if -start > strLength { + start = 0 + } else { + start = strLength + start + } + } else if start > strLength { + return "" + } + realLength := 0 + if len(length) > 0 { + realLength = length[0] + if realLength < 0 { + if -realLength > strLength-start { + realLength = 0 + } else { + realLength = strLength - start + realLength + } + } else if realLength > strLength-start { + realLength = strLength - start + } + } else { + realLength = strLength - start + } + + if realLength == strLength { + return str + } else { + end := start + realLength + return str[start:end] + } +} + +// SubStrRune returns a portion of string `str` specified by the `start` and `length` parameters. +// SubStrRune considers parameter `str` as unicode string. +// The parameter `length` is optional, it uses the length of `str` in default. +func SubStrRune(str string, start int, length ...int) (substr string) { + // Converting to []rune to support unicode. + var ( + runes = []rune(str) + runesLength = len(runes) + ) + + strLength := runesLength + if start < 0 { + if -start > strLength { + start = 0 + } else { + start = strLength + start + } + } else if start > strLength { + return "" + } + realLength := 0 + if len(length) > 0 { + realLength = length[0] + if realLength < 0 { + if -realLength > strLength-start { + realLength = 0 + } else { + realLength = strLength - start + realLength + } + } else if realLength > strLength-start { + realLength = strLength - start + } + } else { + realLength = strLength - start + } + end := start + realLength + if end > runesLength { + end = runesLength + } + return string(runes[start:end]) +} + +// StrLimit returns a portion of string `str` specified by `length` parameters, if the length +// of `str` is greater than `length`, then the `suffix` will be appended to the result string. +func StrLimit(str string, length int, suffix ...string) string { + if len(str) < length { + return str + } + suffixStr := defaultSuffixForStrLimit + if len(suffix) > 0 { + suffixStr = suffix[0] + } + return str[0:length] + suffixStr +} + +// StrLimitRune returns a portion of string `str` specified by `length` parameters, if the length +// of `str` is greater than `length`, then the `suffix` will be appended to the result string. +// StrLimitRune considers parameter `str` as unicode string. +func StrLimitRune(str string, length int, suffix ...string) string { + runes := []rune(str) + if len(runes) < length { + return str + } + suffixStr := defaultSuffixForStrLimit + if len(suffix) > 0 { + suffixStr = suffix[0] + } + return string(runes[0:length]) + suffixStr +} + +// SubStrFrom returns a portion of string `str` starting from first occurrence of and including `need` +// to the end of `str`. +func SubStrFrom(str string, need string) (substr string) { + pos := Pos(str, need) + if pos < 0 { + return "" + } + return str[pos:] +} + +// SubStrFromEx returns a portion of string `str` starting from first occurrence of and excluding `need` +// to the end of `str`. +func SubStrFromEx(str string, need string) (substr string) { + pos := Pos(str, need) + if pos < 0 { + return "" + } + return str[pos+len(need):] +} + +// SubStrFromR returns a portion of string `str` starting from last occurrence of and including `need` +// to the end of `str`. +func SubStrFromR(str string, need string) (substr string) { + pos := PosR(str, need) + if pos < 0 { + return "" + } + return str[pos:] +} + +// SubStrFromREx returns a portion of string `str` starting from last occurrence of and excluding `need` +// to the end of `str`. +func SubStrFromREx(str string, need string) (substr string) { + pos := PosR(str, need) + if pos < 0 { + return "" + } + return str[pos+len(need):] +} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_trim.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_trim.go new file mode 100644 index 00000000..f7701505 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_trim.go @@ -0,0 +1,114 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gstr + +import ( + "strings" + + "github.com/gogf/gf/v2/internal/utils" +) + +// Trim strips whitespace (or other characters) from the beginning and end of a string. +// The optional parameter `characterMask` specifies the additional stripped characters. +func Trim(str string, characterMask ...string) string { + return utils.Trim(str, characterMask...) +} + +// TrimStr strips all the given `cut` string from the beginning and end of a string. +// Note that it does not strip the whitespaces of its beginning or end. +func TrimStr(str string, cut string, count ...int) string { + return TrimLeftStr(TrimRightStr(str, cut, count...), cut, count...) +} + +// TrimLeft strips whitespace (or other characters) from the beginning of a string. +func TrimLeft(str string, characterMask ...string) string { + trimChars := utils.DefaultTrimChars + if len(characterMask) > 0 { + trimChars += characterMask[0] + } + return strings.TrimLeft(str, trimChars) +} + +// TrimLeftStr strips all the given `cut` string from the beginning of a string. +// Note that it does not strip the whitespaces of its beginning. +func TrimLeftStr(str string, cut string, count ...int) string { + var ( + lenCut = len(cut) + cutCount = 0 + ) + for len(str) >= lenCut && str[0:lenCut] == cut { + str = str[lenCut:] + cutCount++ + if len(count) > 0 && count[0] != -1 && cutCount >= count[0] { + break + } + } + return str +} + +// TrimRight strips whitespace (or other characters) from the end of a string. +func TrimRight(str string, characterMask ...string) string { + trimChars := utils.DefaultTrimChars + if len(characterMask) > 0 { + trimChars += characterMask[0] + } + return strings.TrimRight(str, trimChars) +} + +// TrimRightStr strips all the given `cut` string from the end of a string. +// Note that it does not strip the whitespaces of its end. +func TrimRightStr(str string, cut string, count ...int) string { + var ( + lenStr = len(str) + lenCut = len(cut) + cutCount = 0 + ) + for lenStr >= lenCut && str[lenStr-lenCut:lenStr] == cut { + lenStr = lenStr - lenCut + str = str[:lenStr] + cutCount++ + if len(count) > 0 && count[0] != -1 && cutCount >= count[0] { + break + } + } + return str +} + +// TrimAll trims all characters in string `str`. +func TrimAll(str string, characterMask ...string) string { + trimChars := utils.DefaultTrimChars + if len(characterMask) > 0 { + trimChars += characterMask[0] + } + var ( + filtered bool + slice = make([]rune, 0, len(str)) + ) + for _, char := range str { + filtered = false + for _, trimChar := range trimChars { + if char == trimChar { + filtered = true + break + } + } + if !filtered { + slice = append(slice, char) + } + } + return string(slice) +} + +// HasPrefix tests whether the string s begins with prefix. +func HasPrefix(s, prefix string) bool { + return strings.HasPrefix(s, prefix) +} + +// HasSuffix tests whether the string s ends with suffix. +func HasSuffix(s, suffix string) bool { + return strings.HasSuffix(s, suffix) +} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_upper_lower.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_upper_lower.go new file mode 100644 index 00000000..69ad78c1 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_upper_lower.go @@ -0,0 +1,54 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gstr + +import ( + "strings" + + "github.com/gogf/gf/v2/internal/utils" +) + +// ToLower returns a copy of the string s with all Unicode letters mapped to their lower case. +func ToLower(s string) string { + return strings.ToLower(s) +} + +// ToUpper returns a copy of the string s with all Unicode letters mapped to their upper case. +func ToUpper(s string) string { + return strings.ToUpper(s) +} + +// UcFirst returns a copy of the string s with the first letter mapped to its upper case. +func UcFirst(s string) string { + return utils.UcFirst(s) +} + +// LcFirst returns a copy of the string s with the first letter mapped to its lower case. +func LcFirst(s string) string { + if len(s) == 0 { + return s + } + if IsLetterUpper(s[0]) { + return string(s[0]+32) + s[1:] + } + return s +} + +// UcWords uppercase the first character of each word in a string. +func UcWords(str string) string { + return strings.Title(str) +} + +// IsLetterLower tests whether the given byte b is in lower case. +func IsLetterLower(b byte) bool { + return utils.IsLetterLower(b) +} + +// IsLetterUpper tests whether the given byte b is in upper case. +func IsLetterUpper(b byte) bool { + return utils.IsLetterUpper(b) +} diff --git a/vendor/github.com/gogf/gf/v2/text/gstr/gstr_version.go b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_version.go new file mode 100644 index 00000000..f931b2c9 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/text/gstr/gstr_version.go @@ -0,0 +1,189 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gstr + +import ( + "strings" + + "github.com/gogf/gf/v2/util/gconv" +) + +// IsGNUVersion checks and returns whether given `version` is valid GNU version string. +func IsGNUVersion(version string) bool { + if version != "" && (version[0] == 'v' || version[0] == 'V') { + version = version[1:] + } + if version == "" { + return false + } + var array = strings.Split(version, ".") + if len(array) > 3 { + return false + } + for _, v := range array { + if v == "" { + return false + } + if !IsNumeric(v) { + return false + } + if v[0] == '-' || v[0] == '+' { + return false + } + } + return true +} + +// CompareVersion compares `a` and `b` as standard GNU version. +// +// It returns 1 if `a` > `b`. +// +// It returns -1 if `a` < `b`. +// +// It returns 0 if `a` = `b`. +// +// GNU standard version is like: +// v1.0 +// 1 +// 1.0.0 +// v1.0.1 +// v2.10.8 +// 10.2.0 +// etc. +func CompareVersion(a, b string) int { + if a != "" && a[0] == 'v' { + a = a[1:] + } + if b != "" && b[0] == 'v' { + b = b[1:] + } + var ( + array1 = strings.Split(a, ".") + array2 = strings.Split(b, ".") + diff int + ) + diff = len(array2) - len(array1) + for i := 0; i < diff; i++ { + array1 = append(array1, "0") + } + diff = len(array1) - len(array2) + for i := 0; i < diff; i++ { + array2 = append(array2, "0") + } + v1 := 0 + v2 := 0 + for i := 0; i < len(array1); i++ { + v1 = gconv.Int(array1[i]) + v2 = gconv.Int(array2[i]) + if v1 > v2 { + return 1 + } + if v1 < v2 { + return -1 + } + } + return 0 +} + +// CompareVersionGo compares `a` and `b` as standard Golang version. +// +// It returns 1 if `a` > `b`. +// +// It returns -1 if `a` < `b`. +// +// It returns 0 if `a` = `b`. +// +// Golang standard version is like: +// 1.0.0 +// v1.0.1 +// v2.10.8 +// 10.2.0 +// v0.0.0-20190626092158-b2ccc519800e +// v1.12.2-0.20200413154443-b17e3a6804fa +// v4.20.0+incompatible +// etc. +// +// Docs: https://go.dev/doc/modules/version-numbers +func CompareVersionGo(a, b string) int { + a = Trim(a) + b = Trim(b) + if a != "" && a[0] == 'v' { + a = a[1:] + } + if b != "" && b[0] == 'v' { + b = b[1:] + } + var ( + rawA = a + rawB = b + ) + if Count(a, "-") > 1 { + if i := PosR(a, "-"); i > 0 { + a = a[:i] + } + } + if Count(b, "-") > 1 { + if i := PosR(b, "-"); i > 0 { + b = b[:i] + } + } + if i := Pos(a, "+"); i > 0 { + a = a[:i] + } + if i := Pos(b, "+"); i > 0 { + b = b[:i] + } + a = Replace(a, "-", ".") + b = Replace(b, "-", ".") + var ( + array1 = strings.Split(a, ".") + array2 = strings.Split(b, ".") + diff = len(array1) - len(array2) + ) + + for i := diff; i < 0; i++ { + array1 = append(array1, "0") + } + for i := 0; i < diff; i++ { + array2 = append(array2, "0") + } + + // check Major.Minor.Patch first + v1, v2 := 0, 0 + for i := 0; i < len(array1); i++ { + v1, v2 = gconv.Int(array1[i]), gconv.Int(array2[i]) + // Specially in Golang: + // "v1.12.2-0.20200413154443-b17e3a6804fa" < "v1.12.2" + // "v1.12.3-0.20200413154443-b17e3a6804fa" > "v1.12.2" + if i == 4 && v1 != v2 && (v1 == 0 || v2 == 0) { + if v1 > v2 { + return -1 + } else { + return 1 + } + } + + if v1 > v2 { + return 1 + } + if v1 < v2 { + return -1 + } + } + + // Specially in Golang: + // "v4.20.1+incompatible" < "v4.20.1" + inA, inB := Contains(rawA, "+incompatible"), Contains(rawB, "+incompatible") + if inA && !inB { + return -1 + } + if !inA && inB { + return 1 + } + + return 0 +} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv.go new file mode 100644 index 00000000..85ea1e42 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/util/gconv/gconv.go @@ -0,0 +1,286 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package gconv implements powerful and convenient converting functionality for any types of variables. +// +// This package should keep much less dependencies with other packages. +package gconv + +import ( + "context" + "fmt" + "math" + "reflect" + "strconv" + "strings" + "time" + + "github.com/gogf/gf/v2/encoding/gbinary" + "github.com/gogf/gf/v2/internal/intlog" + "github.com/gogf/gf/v2/internal/json" + "github.com/gogf/gf/v2/internal/reflection" + "github.com/gogf/gf/v2/os/gtime" + "github.com/gogf/gf/v2/util/gtag" +) + +var ( + // Empty strings. + emptyStringMap = map[string]struct{}{ + "": {}, + "0": {}, + "no": {}, + "off": {}, + "false": {}, + } + + // StructTagPriority defines the default priority tags for Map*/Struct* functions. + // Note that, the `gconv/param` tags are used by old version of package. + // It is strongly recommended using short tag `c/p` instead in the future. + StructTagPriority = []string{ + gtag.GConv, gtag.Param, gtag.GConvShort, gtag.ParamShort, gtag.Json, + } +) + +// Byte converts `any` to byte. +func Byte(any interface{}) byte { + if v, ok := any.(byte); ok { + return v + } + return Uint8(any) +} + +// Bytes converts `any` to []byte. +func Bytes(any interface{}) []byte { + if any == nil { + return nil + } + switch value := any.(type) { + case string: + return []byte(value) + + case []byte: + return value + + default: + if f, ok := value.(iBytes); ok { + return f.Bytes() + } + originValueAndKind := reflection.OriginValueAndKind(any) + switch originValueAndKind.OriginKind { + case reflect.Map: + bytes, err := json.Marshal(any) + if err != nil { + intlog.Errorf(context.TODO(), `%+v`, err) + } + return bytes + + case reflect.Array, reflect.Slice: + var ( + ok = true + bytes = make([]byte, originValueAndKind.OriginValue.Len()) + ) + for i := range bytes { + int32Value := Int32(originValueAndKind.OriginValue.Index(i).Interface()) + if int32Value < 0 || int32Value > math.MaxUint8 { + ok = false + break + } + bytes[i] = byte(int32Value) + } + if ok { + return bytes + } + } + return gbinary.Encode(any) + } +} + +// Rune converts `any` to rune. +func Rune(any interface{}) rune { + if v, ok := any.(rune); ok { + return v + } + return Int32(any) +} + +// Runes converts `any` to []rune. +func Runes(any interface{}) []rune { + if v, ok := any.([]rune); ok { + return v + } + return []rune(String(any)) +} + +// String converts `any` to string. +// It's most commonly used converting function. +func String(any interface{}) string { + if any == nil { + return "" + } + switch value := any.(type) { + case int: + return strconv.Itoa(value) + case int8: + return strconv.Itoa(int(value)) + case int16: + return strconv.Itoa(int(value)) + case int32: + return strconv.Itoa(int(value)) + case int64: + return strconv.FormatInt(value, 10) + case uint: + return strconv.FormatUint(uint64(value), 10) + case uint8: + return strconv.FormatUint(uint64(value), 10) + case uint16: + return strconv.FormatUint(uint64(value), 10) + case uint32: + return strconv.FormatUint(uint64(value), 10) + case uint64: + return strconv.FormatUint(value, 10) + case float32: + return strconv.FormatFloat(float64(value), 'f', -1, 32) + case float64: + return strconv.FormatFloat(value, 'f', -1, 64) + case bool: + return strconv.FormatBool(value) + case string: + return value + case []byte: + return string(value) + case time.Time: + if value.IsZero() { + return "" + } + return value.String() + case *time.Time: + if value == nil { + return "" + } + return value.String() + case gtime.Time: + if value.IsZero() { + return "" + } + return value.String() + case *gtime.Time: + if value == nil { + return "" + } + return value.String() + default: + // Empty checks. + if value == nil { + return "" + } + if f, ok := value.(iString); ok { + // If the variable implements the String() interface, + // then use that interface to perform the conversion + return f.String() + } + if f, ok := value.(iError); ok { + // If the variable implements the Error() interface, + // then use that interface to perform the conversion + return f.Error() + } + // Reflect checks. + var ( + rv = reflect.ValueOf(value) + kind = rv.Kind() + ) + switch kind { + case reflect.Chan, + reflect.Map, + reflect.Slice, + reflect.Func, + reflect.Ptr, + reflect.Interface, + reflect.UnsafePointer: + if rv.IsNil() { + return "" + } + case reflect.String: + return rv.String() + } + if kind == reflect.Ptr { + return String(rv.Elem().Interface()) + } + // Finally, we use json.Marshal to convert. + if jsonContent, err := json.Marshal(value); err != nil { + return fmt.Sprint(value) + } else { + return string(jsonContent) + } + } +} + +// Bool converts `any` to bool. +// It returns false if `any` is: false, "", 0, "false", "off", "no", empty slice/map. +func Bool(any interface{}) bool { + if any == nil { + return false + } + switch value := any.(type) { + case bool: + return value + case []byte: + if _, ok := emptyStringMap[strings.ToLower(string(value))]; ok { + return false + } + return true + case string: + if _, ok := emptyStringMap[strings.ToLower(value)]; ok { + return false + } + return true + default: + if f, ok := value.(iBool); ok { + return f.Bool() + } + rv := reflect.ValueOf(any) + switch rv.Kind() { + case reflect.Ptr: + return !rv.IsNil() + case reflect.Map: + fallthrough + case reflect.Array: + fallthrough + case reflect.Slice: + return rv.Len() != 0 + case reflect.Struct: + return true + default: + s := strings.ToLower(String(any)) + if _, ok := emptyStringMap[s]; ok { + return false + } + return true + } + } +} + +// checkJsonAndUnmarshalUseNumber checks if given `any` is JSON formatted string value and does converting using `json.UnmarshalUseNumber`. +func checkJsonAndUnmarshalUseNumber(any interface{}, target interface{}) bool { + switch r := any.(type) { + case []byte: + if json.Valid(r) { + if err := json.UnmarshalUseNumber(r, &target); err != nil { + return false + } + return true + } + + case string: + anyAsBytes := []byte(r) + if json.Valid(anyAsBytes) { + if err := json.UnmarshalUseNumber(anyAsBytes, &target); err != nil { + return false + } + return true + } + } + return false +} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_convert.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_convert.go new file mode 100644 index 00000000..baf48b48 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_convert.go @@ -0,0 +1,284 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gconv + +import ( + "reflect" + "time" + + "github.com/gogf/gf/v2/os/gtime" +) + +// Convert converts the variable `fromValue` to the type `toTypeName`, the type `toTypeName` is specified by string. +// The optional parameter `extraParams` is used for additional necessary parameter for this conversion. +// It supports common types conversion as its conversion based on type name string. +func Convert(fromValue interface{}, toTypeName string, extraParams ...interface{}) interface{} { + return doConvert(doConvertInput{ + FromValue: fromValue, + ToTypeName: toTypeName, + ReferValue: nil, + Extra: extraParams, + }) +} + +type doConvertInput struct { + FromValue interface{} // Value that is converted from. + ToTypeName string // Target value type name in string. + ReferValue interface{} // Referred value, a value in type `ToTypeName`. + Extra []interface{} // Extra values for implementing the converting. + // Marks that the value is already converted and set to `ReferValue`. Caller can ignore the returned result. + // It is an attribute for internal usage purpose. + alreadySetToReferValue bool +} + +// doConvert does commonly use types converting. +func doConvert(in doConvertInput) (convertedValue interface{}) { + switch in.ToTypeName { + case "int": + return Int(in.FromValue) + case "*int": + if _, ok := in.FromValue.(*int); ok { + return in.FromValue + } + v := Int(in.FromValue) + return &v + + case "int8": + return Int8(in.FromValue) + case "*int8": + if _, ok := in.FromValue.(*int8); ok { + return in.FromValue + } + v := Int8(in.FromValue) + return &v + + case "int16": + return Int16(in.FromValue) + case "*int16": + if _, ok := in.FromValue.(*int16); ok { + return in.FromValue + } + v := Int16(in.FromValue) + return &v + + case "int32": + return Int32(in.FromValue) + case "*int32": + if _, ok := in.FromValue.(*int32); ok { + return in.FromValue + } + v := Int32(in.FromValue) + return &v + + case "int64": + return Int64(in.FromValue) + case "*int64": + if _, ok := in.FromValue.(*int64); ok { + return in.FromValue + } + v := Int64(in.FromValue) + return &v + + case "uint": + return Uint(in.FromValue) + case "*uint": + if _, ok := in.FromValue.(*uint); ok { + return in.FromValue + } + v := Uint(in.FromValue) + return &v + + case "uint8": + return Uint8(in.FromValue) + case "*uint8": + if _, ok := in.FromValue.(*uint8); ok { + return in.FromValue + } + v := Uint8(in.FromValue) + return &v + + case "uint16": + return Uint16(in.FromValue) + case "*uint16": + if _, ok := in.FromValue.(*uint16); ok { + return in.FromValue + } + v := Uint16(in.FromValue) + return &v + + case "uint32": + return Uint32(in.FromValue) + case "*uint32": + if _, ok := in.FromValue.(*uint32); ok { + return in.FromValue + } + v := Uint32(in.FromValue) + return &v + + case "uint64": + return Uint64(in.FromValue) + case "*uint64": + if _, ok := in.FromValue.(*uint64); ok { + return in.FromValue + } + v := Uint64(in.FromValue) + return &v + + case "float32": + return Float32(in.FromValue) + case "*float32": + if _, ok := in.FromValue.(*float32); ok { + return in.FromValue + } + v := Float32(in.FromValue) + return &v + + case "float64": + return Float64(in.FromValue) + case "*float64": + if _, ok := in.FromValue.(*float64); ok { + return in.FromValue + } + v := Float64(in.FromValue) + return &v + + case "bool": + return Bool(in.FromValue) + case "*bool": + if _, ok := in.FromValue.(*bool); ok { + return in.FromValue + } + v := Bool(in.FromValue) + return &v + + case "string": + return String(in.FromValue) + case "*string": + if _, ok := in.FromValue.(*string); ok { + return in.FromValue + } + v := String(in.FromValue) + return &v + + case "[]byte": + return Bytes(in.FromValue) + case "[]int": + return Ints(in.FromValue) + case "[]int32": + return Int32s(in.FromValue) + case "[]int64": + return Int64s(in.FromValue) + case "[]uint": + return Uints(in.FromValue) + case "[]uint8": + return Bytes(in.FromValue) + case "[]uint32": + return Uint32s(in.FromValue) + case "[]uint64": + return Uint64s(in.FromValue) + case "[]float32": + return Float32s(in.FromValue) + case "[]float64": + return Float64s(in.FromValue) + case "[]string": + return Strings(in.FromValue) + + case "Time", "time.Time": + if len(in.Extra) > 0 { + return Time(in.FromValue, String(in.Extra[0])) + } + return Time(in.FromValue) + case "*time.Time": + var v interface{} + if len(in.Extra) > 0 { + v = Time(in.FromValue, String(in.Extra[0])) + } else { + if _, ok := in.FromValue.(*time.Time); ok { + return in.FromValue + } + v = Time(in.FromValue) + } + return &v + + case "GTime", "gtime.Time": + if len(in.Extra) > 0 { + if v := GTime(in.FromValue, String(in.Extra[0])); v != nil { + return *v + } else { + return *gtime.New() + } + } + if v := GTime(in.FromValue); v != nil { + return *v + } else { + return *gtime.New() + } + case "*gtime.Time": + if len(in.Extra) > 0 { + if v := GTime(in.FromValue, String(in.Extra[0])); v != nil { + return v + } else { + return gtime.New() + } + } + if v := GTime(in.FromValue); v != nil { + return v + } else { + return gtime.New() + } + + case "Duration", "time.Duration": + return Duration(in.FromValue) + case "*time.Duration": + if _, ok := in.FromValue.(*time.Duration); ok { + return in.FromValue + } + v := Duration(in.FromValue) + return &v + + case "map[string]string": + return MapStrStr(in.FromValue) + + case "map[string]interface{}": + return Map(in.FromValue) + + case "[]map[string]interface{}": + return Maps(in.FromValue) + + case "json.RawMessage": + return Bytes(in.FromValue) + + default: + if in.ReferValue != nil { + var referReflectValue reflect.Value + if v, ok := in.ReferValue.(reflect.Value); ok { + referReflectValue = v + } else { + referReflectValue = reflect.ValueOf(in.ReferValue) + } + defer func() { + if recover() != nil { + if err := bindVarToReflectValue(referReflectValue, in.FromValue, nil); err == nil { + in.alreadySetToReferValue = true + convertedValue = referReflectValue.Interface() + } + } + }() + in.ToTypeName = referReflectValue.Kind().String() + in.ReferValue = nil + return reflect.ValueOf(doConvert(in)).Convert(referReflectValue.Type()).Interface() + } + return in.FromValue + } +} + +func doConvertWithReflectValueSet(reflectValue reflect.Value, in doConvertInput) { + convertedValue := doConvert(in) + if !in.alreadySetToReferValue { + reflectValue.Set(reflect.ValueOf(convertedValue)) + } +} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_float.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_float.go new file mode 100644 index 00000000..41cfb1cc --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_float.go @@ -0,0 +1,55 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gconv + +import ( + "strconv" + + "github.com/gogf/gf/v2/encoding/gbinary" +) + +// Float32 converts `any` to float32. +func Float32(any interface{}) float32 { + if any == nil { + return 0 + } + switch value := any.(type) { + case float32: + return value + case float64: + return float32(value) + case []byte: + return gbinary.DecodeToFloat32(value) + default: + if f, ok := value.(iFloat32); ok { + return f.Float32() + } + v, _ := strconv.ParseFloat(String(any), 64) + return float32(v) + } +} + +// Float64 converts `any` to float64. +func Float64(any interface{}) float64 { + if any == nil { + return 0 + } + switch value := any.(type) { + case float32: + return float64(value) + case float64: + return value + case []byte: + return gbinary.DecodeToFloat64(value) + default: + if f, ok := value.(iFloat64); ok { + return f.Float64() + } + v, _ := strconv.ParseFloat(String(any), 64) + return v + } +} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_int.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_int.go new file mode 100644 index 00000000..48e26fc2 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_int.go @@ -0,0 +1,136 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gconv + +import ( + "math" + "strconv" + + "github.com/gogf/gf/v2/encoding/gbinary" +) + +// Int converts `any` to int. +func Int(any interface{}) int { + if any == nil { + return 0 + } + if v, ok := any.(int); ok { + return v + } + return int(Int64(any)) +} + +// Int8 converts `any` to int8. +func Int8(any interface{}) int8 { + if any == nil { + return 0 + } + if v, ok := any.(int8); ok { + return v + } + return int8(Int64(any)) +} + +// Int16 converts `any` to int16. +func Int16(any interface{}) int16 { + if any == nil { + return 0 + } + if v, ok := any.(int16); ok { + return v + } + return int16(Int64(any)) +} + +// Int32 converts `any` to int32. +func Int32(any interface{}) int32 { + if any == nil { + return 0 + } + if v, ok := any.(int32); ok { + return v + } + return int32(Int64(any)) +} + +// Int64 converts `any` to int64. +func Int64(any interface{}) int64 { + if any == nil { + return 0 + } + switch value := any.(type) { + case int: + return int64(value) + case int8: + return int64(value) + case int16: + return int64(value) + case int32: + return int64(value) + case int64: + return value + case uint: + return int64(value) + case uint8: + return int64(value) + case uint16: + return int64(value) + case uint32: + return int64(value) + case uint64: + return int64(value) + case float32: + return int64(value) + case float64: + return int64(value) + case bool: + if value { + return 1 + } + return 0 + case []byte: + return gbinary.DecodeToInt64(value) + default: + if f, ok := value.(iInt64); ok { + return f.Int64() + } + var ( + s = String(value) + isMinus = false + ) + if len(s) > 0 { + if s[0] == '-' { + isMinus = true + s = s[1:] + } else if s[0] == '+' { + s = s[1:] + } + } + // Hexadecimal + if len(s) > 2 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X') { + if v, e := strconv.ParseInt(s[2:], 16, 64); e == nil { + if isMinus { + return -v + } + return v + } + } + // Decimal + if v, e := strconv.ParseInt(s, 10, 64); e == nil { + if isMinus { + return -v + } + return v + } + // Float64 + if valueInt64 := Float64(value); math.IsNaN(valueInt64) { + return 0 + } else { + return int64(valueInt64) + } + } +} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_interface.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_interface.go new file mode 100644 index 00000000..9440e978 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_interface.go @@ -0,0 +1,112 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gconv + +import "github.com/gogf/gf/v2/os/gtime" + +// iString is used for type assert api for String(). +type iString interface { + String() string +} + +// iBool is used for type assert api for Bool(). +type iBool interface { + Bool() bool +} + +// iInt64 is used for type assert api for Int64(). +type iInt64 interface { + Int64() int64 +} + +// iUint64 is used for type assert api for Uint64(). +type iUint64 interface { + Uint64() uint64 +} + +// iFloat32 is used for type assert api for Float32(). +type iFloat32 interface { + Float32() float32 +} + +// iFloat64 is used for type assert api for Float64(). +type iFloat64 interface { + Float64() float64 +} + +// iError is used for type assert api for Error(). +type iError interface { + Error() string +} + +// iBytes is used for type assert api for Bytes(). +type iBytes interface { + Bytes() []byte +} + +// iInterface is used for type assert api for Interface(). +type iInterface interface { + Interface() interface{} +} + +// iInterfaces is used for type assert api for Interfaces(). +type iInterfaces interface { + Interfaces() []interface{} +} + +// iFloats is used for type assert api for Floats(). +type iFloats interface { + Floats() []float64 +} + +// iInts is used for type assert api for Ints(). +type iInts interface { + Ints() []int +} + +// iStrings is used for type assert api for Strings(). +type iStrings interface { + Strings() []string +} + +// iUints is used for type assert api for Uints(). +type iUints interface { + Uints() []uint +} + +// iMapStrAny is the interface support for converting struct parameter to map. +type iMapStrAny interface { + MapStrAny() map[string]interface{} +} + +// iUnmarshalValue is the interface for custom defined types customizing value assignment. +// Note that only pointer can implement interface iUnmarshalValue. +type iUnmarshalValue interface { + UnmarshalValue(interface{}) error +} + +// iUnmarshalText is the interface for custom defined types customizing value assignment. +// Note that only pointer can implement interface iUnmarshalText. +type iUnmarshalText interface { + UnmarshalText(text []byte) error +} + +// iUnmarshalText is the interface for custom defined types customizing value assignment. +// Note that only pointer can implement interface iUnmarshalJSON. +type iUnmarshalJSON interface { + UnmarshalJSON(b []byte) error +} + +// iSet is the interface for custom value assignment. +type iSet interface { + Set(value interface{}) (old interface{}) +} + +// iGTime is the interface for gtime.Time converting. +type iGTime interface { + GTime(format ...string) *gtime.Time +} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_map.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_map.go new file mode 100644 index 00000000..89bf8fc0 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_map.go @@ -0,0 +1,512 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gconv + +import ( + "reflect" + "strings" + + "github.com/gogf/gf/v2/internal/empty" + "github.com/gogf/gf/v2/internal/json" + "github.com/gogf/gf/v2/internal/utils" +) + +type recursiveType string + +const ( + recursiveTypeAuto recursiveType = "auto" + recursiveTypeTrue recursiveType = "true" +) + +// Map converts any variable `value` to map[string]interface{}. If the parameter `value` is not a +// map/struct/*struct type, then the conversion will fail and returns nil. +// +// If `value` is a struct/*struct object, the second parameter `tags` specifies the most priority +// tags that will be detected, otherwise it detects the tags in order of: +// gconv, json, field name. +func Map(value interface{}, tags ...string) map[string]interface{} { + return doMapConvert(value, recursiveTypeAuto, tags...) +} + +// MapDeep does Map function recursively, which means if the attribute of `value` +// is also a struct/*struct, calls Map function on this attribute converting it to +// a map[string]interface{} type variable. +// Also see Map. +func MapDeep(value interface{}, tags ...string) map[string]interface{} { + return doMapConvert(value, recursiveTypeTrue, tags...) +} + +// doMapConvert implements the map converting. +// It automatically checks and converts json string to map if `value` is string/[]byte. +// +// TODO completely implement the recursive converting for all types, especially the map. +func doMapConvert(value interface{}, recursive recursiveType, tags ...string) map[string]interface{} { + if value == nil { + return nil + } + newTags := StructTagPriority + switch len(tags) { + case 0: + // No need handling. + case 1: + newTags = append(strings.Split(tags[0], ","), StructTagPriority...) + default: + newTags = append(tags, StructTagPriority...) + } + // Assert the common combination of types, and finally it uses reflection. + dataMap := make(map[string]interface{}) + switch r := value.(type) { + case string: + // If it is a JSON string, automatically unmarshal it! + if len(r) > 0 && r[0] == '{' && r[len(r)-1] == '}' { + if err := json.UnmarshalUseNumber([]byte(r), &dataMap); err != nil { + return nil + } + } else { + return nil + } + case []byte: + // If it is a JSON string, automatically unmarshal it! + if len(r) > 0 && r[0] == '{' && r[len(r)-1] == '}' { + if err := json.UnmarshalUseNumber(r, &dataMap); err != nil { + return nil + } + } else { + return nil + } + case map[interface{}]interface{}: + for k, v := range r { + dataMap[String(k)] = doMapConvertForMapOrStructValue( + doMapConvertForMapOrStructValueInput{ + IsRoot: false, + Value: v, + RecursiveType: recursive, + RecursiveOption: recursive == recursiveTypeTrue, + Tags: newTags, + }, + ) + } + case map[interface{}]string: + for k, v := range r { + dataMap[String(k)] = v + } + case map[interface{}]int: + for k, v := range r { + dataMap[String(k)] = v + } + case map[interface{}]uint: + for k, v := range r { + dataMap[String(k)] = v + } + case map[interface{}]float32: + for k, v := range r { + dataMap[String(k)] = v + } + case map[interface{}]float64: + for k, v := range r { + dataMap[String(k)] = v + } + case map[string]bool: + for k, v := range r { + dataMap[k] = v + } + case map[string]int: + for k, v := range r { + dataMap[k] = v + } + case map[string]uint: + for k, v := range r { + dataMap[k] = v + } + case map[string]float32: + for k, v := range r { + dataMap[k] = v + } + case map[string]float64: + for k, v := range r { + dataMap[k] = v + } + case map[string]string: + for k, v := range r { + dataMap[k] = v + } + case map[string]interface{}: + if recursive == recursiveTypeTrue { + // A copy of current map. + for k, v := range r { + dataMap[k] = doMapConvertForMapOrStructValue( + doMapConvertForMapOrStructValueInput{ + IsRoot: false, + Value: v, + RecursiveType: recursive, + RecursiveOption: recursive == recursiveTypeTrue, + Tags: newTags, + }, + ) + } + } else { + // It returns the map directly without any changing. + return r + } + case map[int]interface{}: + for k, v := range r { + dataMap[String(k)] = doMapConvertForMapOrStructValue( + doMapConvertForMapOrStructValueInput{ + IsRoot: false, + Value: v, + RecursiveType: recursive, + RecursiveOption: recursive == recursiveTypeTrue, + Tags: newTags, + }, + ) + } + case map[int]string: + for k, v := range r { + dataMap[String(k)] = v + } + case map[uint]string: + for k, v := range r { + dataMap[String(k)] = v + } + + default: + // Not a common type, it then uses reflection for conversion. + var reflectValue reflect.Value + if v, ok := value.(reflect.Value); ok { + reflectValue = v + } else { + reflectValue = reflect.ValueOf(value) + } + reflectKind := reflectValue.Kind() + // If it is a pointer, we should find its real data type. + for reflectKind == reflect.Ptr { + reflectValue = reflectValue.Elem() + reflectKind = reflectValue.Kind() + } + switch reflectKind { + // If `value` is type of array, it converts the value of even number index as its key and + // the value of odd number index as its corresponding value, for example: + // []string{"k1","v1","k2","v2"} => map[string]interface{}{"k1":"v1", "k2":"v2"} + // []string{"k1","v1","k2"} => map[string]interface{}{"k1":"v1", "k2":nil} + case reflect.Slice, reflect.Array: + length := reflectValue.Len() + for i := 0; i < length; i += 2 { + if i+1 < length { + dataMap[String(reflectValue.Index(i).Interface())] = reflectValue.Index(i + 1).Interface() + } else { + dataMap[String(reflectValue.Index(i).Interface())] = nil + } + } + case reflect.Map, reflect.Struct, reflect.Interface: + convertedValue := doMapConvertForMapOrStructValue( + doMapConvertForMapOrStructValueInput{ + IsRoot: true, + Value: value, + RecursiveType: recursive, + RecursiveOption: recursive == recursiveTypeTrue, + Tags: newTags, + }, + ) + if m, ok := convertedValue.(map[string]interface{}); ok { + return m + } + return nil + default: + return nil + } + } + return dataMap +} + +type doMapConvertForMapOrStructValueInput struct { + IsRoot bool // It returns directly if it is not root and with no recursive converting. + Value interface{} // Current operation value. + RecursiveType recursiveType // The type from top function entry. + RecursiveOption bool // Whether convert recursively for `current` operation. + Tags []string // Map key mapping. +} + +func doMapConvertForMapOrStructValue(in doMapConvertForMapOrStructValueInput) interface{} { + if in.IsRoot == false && in.RecursiveOption == false { + return in.Value + } + + var reflectValue reflect.Value + if v, ok := in.Value.(reflect.Value); ok { + reflectValue = v + in.Value = v.Interface() + } else { + reflectValue = reflect.ValueOf(in.Value) + } + reflectKind := reflectValue.Kind() + // If it is a pointer, we should find its real data type. + for reflectKind == reflect.Ptr { + reflectValue = reflectValue.Elem() + reflectKind = reflectValue.Kind() + } + switch reflectKind { + case reflect.Map: + var ( + mapKeys = reflectValue.MapKeys() + dataMap = make(map[string]interface{}) + ) + for _, k := range mapKeys { + dataMap[String(k.Interface())] = doMapConvertForMapOrStructValue( + doMapConvertForMapOrStructValueInput{ + IsRoot: false, + Value: reflectValue.MapIndex(k).Interface(), + RecursiveType: in.RecursiveType, + RecursiveOption: in.RecursiveType == recursiveTypeTrue, + Tags: in.Tags, + }, + ) + } + return dataMap + + case reflect.Struct: + var dataMap = make(map[string]interface{}) + // Map converting interface check. + if v, ok := in.Value.(iMapStrAny); ok { + // Value copy, in case of concurrent safety. + for mapK, mapV := range v.MapStrAny() { + if in.RecursiveOption { + dataMap[mapK] = doMapConvertForMapOrStructValue( + doMapConvertForMapOrStructValueInput{ + IsRoot: false, + Value: mapV, + RecursiveType: in.RecursiveType, + RecursiveOption: in.RecursiveType == recursiveTypeTrue, + Tags: in.Tags, + }, + ) + } else { + dataMap[mapK] = mapV + } + } + return dataMap + } + // Using reflect for converting. + var ( + rtField reflect.StructField + rvField reflect.Value + reflectType = reflectValue.Type() // attribute value type. + mapKey = "" // mapKey may be the tag name or the struct attribute name. + ) + for i := 0; i < reflectValue.NumField(); i++ { + rtField = reflectType.Field(i) + rvField = reflectValue.Field(i) + // Only convert the public attributes. + fieldName := rtField.Name + if !utils.IsLetterUpper(fieldName[0]) { + continue + } + mapKey = "" + fieldTag := rtField.Tag + for _, tag := range in.Tags { + if mapKey = fieldTag.Get(tag); mapKey != "" { + break + } + } + if mapKey == "" { + mapKey = fieldName + } else { + // Support json tag feature: -, omitempty + mapKey = strings.TrimSpace(mapKey) + if mapKey == "-" { + continue + } + array := strings.Split(mapKey, ",") + if len(array) > 1 { + switch strings.TrimSpace(array[1]) { + case "omitempty": + if empty.IsEmpty(rvField.Interface()) { + continue + } else { + mapKey = strings.TrimSpace(array[0]) + } + default: + mapKey = strings.TrimSpace(array[0]) + } + } + if mapKey == "" { + mapKey = fieldName + } + } + if in.RecursiveOption || rtField.Anonymous { + // Do map converting recursively. + var ( + rvAttrField = rvField + rvAttrKind = rvField.Kind() + ) + if rvAttrKind == reflect.Ptr { + rvAttrField = rvField.Elem() + rvAttrKind = rvAttrField.Kind() + } + switch rvAttrKind { + case reflect.Struct: + // Embedded struct and has no fields, just ignores it. + // Eg: gmeta.Meta + if rvAttrField.Type().NumField() == 0 { + continue + } + var ( + hasNoTag = mapKey == fieldName + // DO NOT use rvAttrField.Interface() here, + // as it might be changed from pointer to struct. + rvInterface = rvField.Interface() + ) + switch { + case hasNoTag && rtField.Anonymous: + // It means this attribute field has no tag. + // Overwrite the attribute with sub-struct attribute fields. + anonymousValue := doMapConvertForMapOrStructValue(doMapConvertForMapOrStructValueInput{ + IsRoot: false, + Value: rvInterface, + RecursiveType: in.RecursiveType, + RecursiveOption: true, + Tags: in.Tags, + }) + if m, ok := anonymousValue.(map[string]interface{}); ok { + for k, v := range m { + dataMap[k] = v + } + } else { + dataMap[mapKey] = rvInterface + } + + // It means this attribute field has desired tag. + case !hasNoTag && rtField.Anonymous: + dataMap[mapKey] = doMapConvertForMapOrStructValue(doMapConvertForMapOrStructValueInput{ + IsRoot: false, + Value: rvInterface, + RecursiveType: in.RecursiveType, + RecursiveOption: true, + Tags: in.Tags, + }) + + default: + dataMap[mapKey] = doMapConvertForMapOrStructValue(doMapConvertForMapOrStructValueInput{ + IsRoot: false, + Value: rvInterface, + RecursiveType: in.RecursiveType, + RecursiveOption: in.RecursiveType == recursiveTypeTrue, + Tags: in.Tags, + }) + } + + // The struct attribute is type of slice. + case reflect.Array, reflect.Slice: + length := rvAttrField.Len() + if length == 0 { + dataMap[mapKey] = rvAttrField.Interface() + break + } + array := make([]interface{}, length) + for arrayIndex := 0; arrayIndex < length; arrayIndex++ { + array[arrayIndex] = doMapConvertForMapOrStructValue( + doMapConvertForMapOrStructValueInput{ + IsRoot: false, + Value: rvAttrField.Index(arrayIndex).Interface(), + RecursiveType: in.RecursiveType, + RecursiveOption: in.RecursiveType == recursiveTypeTrue, + Tags: in.Tags, + }, + ) + } + dataMap[mapKey] = array + case reflect.Map: + var ( + mapKeys = rvAttrField.MapKeys() + nestedMap = make(map[string]interface{}) + ) + for _, k := range mapKeys { + nestedMap[String(k.Interface())] = doMapConvertForMapOrStructValue( + doMapConvertForMapOrStructValueInput{ + IsRoot: false, + Value: rvAttrField.MapIndex(k).Interface(), + RecursiveType: in.RecursiveType, + RecursiveOption: in.RecursiveType == recursiveTypeTrue, + Tags: in.Tags, + }, + ) + } + dataMap[mapKey] = nestedMap + default: + if rvField.IsValid() { + dataMap[mapKey] = reflectValue.Field(i).Interface() + } else { + dataMap[mapKey] = nil + } + } + } else { + // No recursive map value converting + if rvField.IsValid() { + dataMap[mapKey] = reflectValue.Field(i).Interface() + } else { + dataMap[mapKey] = nil + } + } + } + if len(dataMap) == 0 { + return in.Value + } + return dataMap + + // The given value is type of slice. + case reflect.Array, reflect.Slice: + length := reflectValue.Len() + if length == 0 { + break + } + array := make([]interface{}, reflectValue.Len()) + for i := 0; i < length; i++ { + array[i] = doMapConvertForMapOrStructValue(doMapConvertForMapOrStructValueInput{ + IsRoot: false, + Value: reflectValue.Index(i).Interface(), + RecursiveType: in.RecursiveType, + RecursiveOption: in.RecursiveType == recursiveTypeTrue, + Tags: in.Tags, + }) + } + return array + } + return in.Value +} + +// MapStrStr converts `value` to map[string]string. +// Note that there might be data copy for this map type converting. +func MapStrStr(value interface{}, tags ...string) map[string]string { + if r, ok := value.(map[string]string); ok { + return r + } + m := Map(value, tags...) + if len(m) > 0 { + vMap := make(map[string]string, len(m)) + for k, v := range m { + vMap[k] = String(v) + } + return vMap + } + return nil +} + +// MapStrStrDeep converts `value` to map[string]string recursively. +// Note that there might be data copy for this map type converting. +func MapStrStrDeep(value interface{}, tags ...string) map[string]string { + if r, ok := value.(map[string]string); ok { + return r + } + m := MapDeep(value, tags...) + if len(m) > 0 { + vMap := make(map[string]string, len(m)) + for k, v := range m { + vMap[k] = String(v) + } + return vMap + } + return nil +} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_maps.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_maps.go new file mode 100644 index 00000000..3efe2386 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_maps.go @@ -0,0 +1,119 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gconv + +import "github.com/gogf/gf/v2/internal/json" + +// SliceMap is alias of Maps. +func SliceMap(any interface{}) []map[string]interface{} { + return Maps(any) +} + +// SliceMapDeep is alias of MapsDeep. +func SliceMapDeep(any interface{}) []map[string]interface{} { + return MapsDeep(any) +} + +// SliceStruct is alias of Structs. +func SliceStruct(params interface{}, pointer interface{}, mapping ...map[string]string) (err error) { + return Structs(params, pointer, mapping...) +} + +// Maps converts `value` to []map[string]interface{}. +// Note that it automatically checks and converts json string to []map if `value` is string/[]byte. +func Maps(value interface{}, tags ...string) []map[string]interface{} { + if value == nil { + return nil + } + switch r := value.(type) { + case string: + list := make([]map[string]interface{}, 0) + if len(r) > 0 && r[0] == '[' && r[len(r)-1] == ']' { + if err := json.UnmarshalUseNumber([]byte(r), &list); err != nil { + return nil + } + return list + } else { + return nil + } + + case []byte: + list := make([]map[string]interface{}, 0) + if len(r) > 0 && r[0] == '[' && r[len(r)-1] == ']' { + if err := json.UnmarshalUseNumber(r, &list); err != nil { + return nil + } + return list + } else { + return nil + } + + case []map[string]interface{}: + return r + + default: + array := Interfaces(value) + if len(array) == 0 { + return nil + } + list := make([]map[string]interface{}, len(array)) + for k, v := range array { + list[k] = Map(v, tags...) + } + return list + } +} + +// MapsDeep converts `value` to []map[string]interface{} recursively. +// +// TODO completely implement the recursive converting for all types. +func MapsDeep(value interface{}, tags ...string) []map[string]interface{} { + if value == nil { + return nil + } + switch r := value.(type) { + case string: + list := make([]map[string]interface{}, 0) + if len(r) > 0 && r[0] == '[' && r[len(r)-1] == ']' { + if err := json.UnmarshalUseNumber([]byte(r), &list); err != nil { + return nil + } + return list + } else { + return nil + } + + case []byte: + list := make([]map[string]interface{}, 0) + if len(r) > 0 && r[0] == '[' && r[len(r)-1] == ']' { + if err := json.UnmarshalUseNumber(r, &list); err != nil { + return nil + } + return list + } else { + return nil + } + + case []map[string]interface{}: + list := make([]map[string]interface{}, len(r)) + for k, v := range r { + list[k] = MapDeep(v, tags...) + } + return list + + default: + array := Interfaces(value) + if len(array) == 0 { + return nil + } + list := make([]map[string]interface{}, len(array)) + for k, v := range array { + list[k] = MapDeep(v, tags...) + } + return list + } +} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_maptomap.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_maptomap.go new file mode 100644 index 00000000..51eb0efd --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_maptomap.go @@ -0,0 +1,147 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gconv + +import ( + "reflect" + + "github.com/gogf/gf/v2/errors/gcode" + "github.com/gogf/gf/v2/errors/gerror" + "github.com/gogf/gf/v2/internal/json" +) + +// MapToMap converts any map type variable `params` to another map type variable `pointer` +// using reflect. +// See doMapToMap. +func MapToMap(params interface{}, pointer interface{}, mapping ...map[string]string) error { + return doMapToMap(params, pointer, mapping...) +} + +// doMapToMap converts any map type variable `params` to another map type variable `pointer`. +// +// The parameter `params` can be any type of map, like: +// map[string]string, map[string]struct, map[string]*struct, etc. +// +// The parameter `pointer` should be type of *map, like: +// map[int]string, map[string]struct, map[string]*struct, etc. +// +// The optional parameter `mapping` is used for struct attribute to map key mapping, which makes +// sense only if the items of original map `params` is type struct. +func doMapToMap(params interface{}, pointer interface{}, mapping ...map[string]string) (err error) { + // If given `params` is JSON, it then uses json.Unmarshal doing the converting. + switch r := params.(type) { + case []byte: + if json.Valid(r) { + if rv, ok := pointer.(reflect.Value); ok { + if rv.Kind() == reflect.Ptr { + return json.UnmarshalUseNumber(r, rv.Interface()) + } + } else { + return json.UnmarshalUseNumber(r, pointer) + } + } + case string: + if paramsBytes := []byte(r); json.Valid(paramsBytes) { + if rv, ok := pointer.(reflect.Value); ok { + if rv.Kind() == reflect.Ptr { + return json.UnmarshalUseNumber(paramsBytes, rv.Interface()) + } + } else { + return json.UnmarshalUseNumber(paramsBytes, pointer) + } + } + } + var ( + paramsRv reflect.Value + paramsKind reflect.Kind + keyToAttributeNameMapping map[string]string + ) + if len(mapping) > 0 { + keyToAttributeNameMapping = mapping[0] + } + if v, ok := params.(reflect.Value); ok { + paramsRv = v + } else { + paramsRv = reflect.ValueOf(params) + } + paramsKind = paramsRv.Kind() + if paramsKind == reflect.Ptr { + paramsRv = paramsRv.Elem() + paramsKind = paramsRv.Kind() + } + if paramsKind != reflect.Map { + return doMapToMap(Map(params), pointer, mapping...) + } + // Empty params map, no need continue. + if paramsRv.Len() == 0 { + return nil + } + var pointerRv reflect.Value + if v, ok := pointer.(reflect.Value); ok { + pointerRv = v + } else { + pointerRv = reflect.ValueOf(pointer) + } + pointerKind := pointerRv.Kind() + for pointerKind == reflect.Ptr { + pointerRv = pointerRv.Elem() + pointerKind = pointerRv.Kind() + } + if pointerKind != reflect.Map { + return gerror.NewCodef(gcode.CodeInvalidParameter, "pointer should be type of *map, but got:%s", pointerKind) + } + defer func() { + // Catch the panic, especially the reflect operation panics. + if exception := recover(); exception != nil { + if v, ok := exception.(error); ok && gerror.HasStack(v) { + err = v + } else { + err = gerror.NewCodeSkipf(gcode.CodeInternalError, 1, "%+v", exception) + } + } + }() + var ( + paramsKeys = paramsRv.MapKeys() + pointerKeyType = pointerRv.Type().Key() + pointerValueType = pointerRv.Type().Elem() + pointerValueKind = pointerValueType.Kind() + dataMap = reflect.MakeMapWithSize(pointerRv.Type(), len(paramsKeys)) + ) + // Retrieve the true element type of target map. + if pointerValueKind == reflect.Ptr { + pointerValueKind = pointerValueType.Elem().Kind() + } + for _, key := range paramsKeys { + e := reflect.New(pointerValueType).Elem() + switch pointerValueKind { + case reflect.Map, reflect.Struct: + if err = doStruct(paramsRv.MapIndex(key).Interface(), e, keyToAttributeNameMapping, ""); err != nil { + return err + } + default: + e.Set( + reflect.ValueOf( + Convert( + paramsRv.MapIndex(key).Interface(), + pointerValueType.String(), + ), + ), + ) + } + dataMap.SetMapIndex( + reflect.ValueOf( + Convert( + key.Interface(), + pointerKeyType.Name(), + ), + ), + e, + ) + } + pointerRv.Set(dataMap) + return nil +} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_maptomaps.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_maptomaps.go new file mode 100644 index 00000000..6ab8e219 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_maptomaps.go @@ -0,0 +1,141 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gconv + +import ( + "reflect" + + "github.com/gogf/gf/v2/errors/gcode" + "github.com/gogf/gf/v2/errors/gerror" + "github.com/gogf/gf/v2/internal/json" +) + +// MapToMaps converts any slice type variable `params` to another map slice type variable `pointer`. +// See doMapToMaps. +func MapToMaps(params interface{}, pointer interface{}, mapping ...map[string]string) error { + return doMapToMaps(params, pointer, mapping...) +} + +// doMapToMaps converts any map type variable `params` to another map slice variable `pointer`. +// +// The parameter `params` can be type of []map, []*map, []struct, []*struct. +// +// The parameter `pointer` should be type of []map, []*map. +// +// The optional parameter `mapping` is used for struct attribute to map key mapping, which makes +// sense only if the item of `params` is type struct. +func doMapToMaps(params interface{}, pointer interface{}, mapping ...map[string]string) (err error) { + // If given `params` is JSON, it then uses json.Unmarshal doing the converting. + switch r := params.(type) { + case []byte: + if json.Valid(r) { + if rv, ok := pointer.(reflect.Value); ok { + if rv.Kind() == reflect.Ptr { + return json.UnmarshalUseNumber(r, rv.Interface()) + } + } else { + return json.UnmarshalUseNumber(r, pointer) + } + } + case string: + if paramsBytes := []byte(r); json.Valid(paramsBytes) { + if rv, ok := pointer.(reflect.Value); ok { + if rv.Kind() == reflect.Ptr { + return json.UnmarshalUseNumber(paramsBytes, rv.Interface()) + } + } else { + return json.UnmarshalUseNumber(paramsBytes, pointer) + } + } + } + // Params and its element type check. + var ( + paramsRv reflect.Value + paramsKind reflect.Kind + ) + if v, ok := params.(reflect.Value); ok { + paramsRv = v + } else { + paramsRv = reflect.ValueOf(params) + } + paramsKind = paramsRv.Kind() + if paramsKind == reflect.Ptr { + paramsRv = paramsRv.Elem() + paramsKind = paramsRv.Kind() + } + if paramsKind != reflect.Array && paramsKind != reflect.Slice { + return gerror.NewCode(gcode.CodeInvalidParameter, "params should be type of slice, eg: []map/[]*map/[]struct/[]*struct") + } + var ( + paramsElem = paramsRv.Type().Elem() + paramsElemKind = paramsElem.Kind() + ) + if paramsElemKind == reflect.Ptr { + paramsElem = paramsElem.Elem() + paramsElemKind = paramsElem.Kind() + } + if paramsElemKind != reflect.Map && paramsElemKind != reflect.Struct && paramsElemKind != reflect.Interface { + return gerror.NewCodef(gcode.CodeInvalidParameter, "params element should be type of map/*map/struct/*struct, but got: %s", paramsElemKind) + } + // Empty slice, no need continue. + if paramsRv.Len() == 0 { + return nil + } + // Pointer and its element type check. + var ( + pointerRv = reflect.ValueOf(pointer) + pointerKind = pointerRv.Kind() + ) + for pointerKind == reflect.Ptr { + pointerRv = pointerRv.Elem() + pointerKind = pointerRv.Kind() + } + if pointerKind != reflect.Array && pointerKind != reflect.Slice { + return gerror.NewCode(gcode.CodeInvalidParameter, "pointer should be type of *[]map/*[]*map") + } + var ( + pointerElemType = pointerRv.Type().Elem() + pointerElemKind = pointerElemType.Kind() + ) + if pointerElemKind == reflect.Ptr { + pointerElemKind = pointerElemType.Elem().Kind() + } + if pointerElemKind != reflect.Map { + return gerror.NewCode(gcode.CodeInvalidParameter, "pointer element should be type of map/*map") + } + defer func() { + // Catch the panic, especially the reflection operation panics. + if exception := recover(); exception != nil { + if v, ok := exception.(error); ok && gerror.HasStack(v) { + err = v + } else { + err = gerror.NewCodeSkipf(gcode.CodeInternalError, 1, "%+v", exception) + } + } + }() + var ( + pointerSlice = reflect.MakeSlice(pointerRv.Type(), paramsRv.Len(), paramsRv.Len()) + ) + for i := 0; i < paramsRv.Len(); i++ { + var item reflect.Value + if pointerElemType.Kind() == reflect.Ptr { + item = reflect.New(pointerElemType.Elem()) + if err = MapToMap(paramsRv.Index(i).Interface(), item, mapping...); err != nil { + return err + } + pointerSlice.Index(i).Set(item) + } else { + item = reflect.New(pointerElemType) + if err = MapToMap(paramsRv.Index(i).Interface(), item, mapping...); err != nil { + return err + } + pointerSlice.Index(i).Set(item.Elem()) + } + } + pointerRv.Set(pointerSlice) + return +} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_ptr.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_ptr.go new file mode 100644 index 00000000..d23066da --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_ptr.go @@ -0,0 +1,96 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gconv + +// PtrAny creates and returns an interface{} pointer variable to this value. +func PtrAny(any interface{}) *interface{} { + return &any +} + +// PtrString creates and returns a string pointer variable to this value. +func PtrString(any interface{}) *string { + v := String(any) + return &v +} + +// PtrBool creates and returns a bool pointer variable to this value. +func PtrBool(any interface{}) *bool { + v := Bool(any) + return &v +} + +// PtrInt creates and returns an int pointer variable to this value. +func PtrInt(any interface{}) *int { + v := Int(any) + return &v +} + +// PtrInt8 creates and returns an int8 pointer variable to this value. +func PtrInt8(any interface{}) *int8 { + v := Int8(any) + return &v +} + +// PtrInt16 creates and returns an int16 pointer variable to this value. +func PtrInt16(any interface{}) *int16 { + v := Int16(any) + return &v +} + +// PtrInt32 creates and returns an int32 pointer variable to this value. +func PtrInt32(any interface{}) *int32 { + v := Int32(any) + return &v +} + +// PtrInt64 creates and returns an int64 pointer variable to this value. +func PtrInt64(any interface{}) *int64 { + v := Int64(any) + return &v +} + +// PtrUint creates and returns an uint pointer variable to this value. +func PtrUint(any interface{}) *uint { + v := Uint(any) + return &v +} + +// PtrUint8 creates and returns an uint8 pointer variable to this value. +func PtrUint8(any interface{}) *uint8 { + v := Uint8(any) + return &v +} + +// PtrUint16 creates and returns an uint16 pointer variable to this value. +func PtrUint16(any interface{}) *uint16 { + v := Uint16(any) + return &v +} + +// PtrUint32 creates and returns an uint32 pointer variable to this value. +func PtrUint32(any interface{}) *uint32 { + v := Uint32(any) + return &v +} + +// PtrUint64 creates and returns an uint64 pointer variable to this value. +func PtrUint64(any interface{}) *uint64 { + v := Uint64(any) + return &v +} + +// PtrFloat32 creates and returns a float32 pointer variable to this value. +func PtrFloat32(any interface{}) *float32 { + v := Float32(any) + return &v +} + +// PtrFloat64 creates and returns a float64 pointer variable to this value. +func PtrFloat64(any interface{}) *float64 { + v := Float64(any) + return &v +} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_scan.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_scan.go new file mode 100644 index 00000000..28f7d7b4 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_scan.go @@ -0,0 +1,525 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gconv + +import ( + "database/sql" + "reflect" + + "github.com/gogf/gf/v2/errors/gcode" + "github.com/gogf/gf/v2/errors/gerror" + "github.com/gogf/gf/v2/internal/utils" + "github.com/gogf/gf/v2/os/gstructs" +) + +// Scan automatically checks the type of `pointer` and converts `params` to `pointer`. It supports `pointer` +// with type of `*map/*[]map/*[]*map/*struct/**struct/*[]struct/*[]*struct` for converting. +// +// It calls function `doMapToMap` internally if `pointer` is type of *map for converting. +// It calls function `doMapToMaps` internally if `pointer` is type of *[]map/*[]*map for converting. +// It calls function `doStruct` internally if `pointer` is type of *struct/**struct for converting. +// It calls function `doStructs` internally if `pointer` is type of *[]struct/*[]*struct for converting. +func Scan(params interface{}, pointer interface{}, mapping ...map[string]string) (err error) { + var ( + pointerType reflect.Type + pointerKind reflect.Kind + pointerValue reflect.Value + ) + if v, ok := pointer.(reflect.Value); ok { + pointerValue = v + pointerType = v.Type() + } else { + pointerValue = reflect.ValueOf(pointer) + pointerType = reflect.TypeOf(pointer) // Do not use pointerValue.Type() as pointerValue might be zero. + } + + if pointerType == nil { + return gerror.NewCode(gcode.CodeInvalidParameter, "parameter pointer should not be nil") + } + pointerKind = pointerType.Kind() + if pointerKind != reflect.Ptr { + if pointerValue.CanAddr() { + pointerValue = pointerValue.Addr() + pointerType = pointerValue.Type() + pointerKind = pointerType.Kind() + } else { + return gerror.NewCodef( + gcode.CodeInvalidParameter, + "params should be type of pointer, but got type: %v", + pointerType, + ) + } + } + // Direct assignment checks! + var ( + paramsType reflect.Type + paramsValue reflect.Value + ) + if v, ok := params.(reflect.Value); ok { + paramsValue = v + paramsType = paramsValue.Type() + } else { + paramsValue = reflect.ValueOf(params) + paramsType = reflect.TypeOf(params) // Do not use paramsValue.Type() as paramsValue might be zero. + } + // If `params` and `pointer` are the same type, the do directly assignment. + // For performance enhancement purpose. + var ( + pointerValueElem = pointerValue.Elem() + ) + if pointerValueElem.CanSet() && paramsType == pointerValueElem.Type() { + pointerValueElem.Set(paramsValue) + return nil + } + + // Converting. + var ( + pointerElem = pointerType.Elem() + pointerElemKind = pointerElem.Kind() + keyToAttributeNameMapping map[string]string + ) + if len(mapping) > 0 { + keyToAttributeNameMapping = mapping[0] + } + switch pointerElemKind { + case reflect.Map: + return doMapToMap(params, pointer, mapping...) + + case reflect.Array, reflect.Slice: + var ( + sliceElem = pointerElem.Elem() + sliceElemKind = sliceElem.Kind() + ) + for sliceElemKind == reflect.Ptr { + sliceElem = sliceElem.Elem() + sliceElemKind = sliceElem.Kind() + } + if sliceElemKind == reflect.Map { + return doMapToMaps(params, pointer, mapping...) + } + return doStructs(params, pointer, keyToAttributeNameMapping, "") + + default: + return doStruct(params, pointer, keyToAttributeNameMapping, "") + } +} + +// ScanList converts `structSlice` to struct slice which contains other complex struct attributes. +// Note that the parameter `structSlicePointer` should be type of *[]struct/*[]*struct. +// +// Usage example 1: Normal attribute struct relation: +// +// type EntityUser struct { +// Uid int +// Name string +// } +// +// type EntityUserDetail struct { +// Uid int +// Address string +// } +// +// type EntityUserScores struct { +// Id int +// Uid int +// Score int +// Course string +// } +// +// type Entity struct { +// User *EntityUser +// UserDetail *EntityUserDetail +// UserScores []*EntityUserScores +// } +// +// var users []*Entity +// ScanList(records, &users, "User") +// ScanList(records, &users, "User", "uid") +// ScanList(records, &users, "UserDetail", "User", "uid:Uid") +// ScanList(records, &users, "UserScores", "User", "uid:Uid") +// ScanList(records, &users, "UserScores", "User", "uid") +// +// Usage example 2: Embedded attribute struct relation: +// +// type EntityUser struct { +// Uid int +// Name string +// } +// +// type EntityUserDetail struct { +// Uid int +// Address string +// } +// +// type EntityUserScores struct { +// Id int +// Uid int +// Score int +// } +// +// type Entity struct { +// EntityUser +// UserDetail EntityUserDetail +// UserScores []EntityUserScores +// } +// +// var users []*Entity +// ScanList(records, &users) +// ScanList(records, &users, "UserDetail", "uid") +// ScanList(records, &users, "UserScores", "uid") +// +// The parameters "User/UserDetail/UserScores" in the example codes specify the target attribute struct +// that current result will be bound to. +// +// The "uid" in the example codes is the table field name of the result, and the "Uid" is the relational +// struct attribute name - not the attribute name of the bound to target. In the example codes, it's attribute +// name "Uid" of "User" of entity "Entity". It automatically calculates the HasOne/HasMany relationship with +// given `relation` parameter. +// +// See the example or unit testing cases for clear understanding for this function. +func ScanList(structSlice interface{}, structSlicePointer interface{}, bindToAttrName string, relationAttrNameAndFields ...string) (err error) { + var ( + relationAttrName string + relationFields string + ) + switch len(relationAttrNameAndFields) { + case 2: + relationAttrName = relationAttrNameAndFields[0] + relationFields = relationAttrNameAndFields[1] + case 1: + relationFields = relationAttrNameAndFields[0] + } + return doScanList(structSlice, structSlicePointer, bindToAttrName, relationAttrName, relationFields) +} + +// doScanList converts `structSlice` to struct slice which contains other complex struct attributes recursively. +// Note that the parameter `structSlicePointer` should be type of *[]struct/*[]*struct. +func doScanList( + structSlice interface{}, structSlicePointer interface{}, bindToAttrName, relationAttrName, relationFields string, +) (err error) { + var ( + maps = Maps(structSlice) + ) + if len(maps) == 0 { + return nil + } + // Necessary checks for parameters. + if bindToAttrName == "" { + return gerror.NewCode(gcode.CodeInvalidParameter, `bindToAttrName should not be empty`) + } + + if relationAttrName == "." { + relationAttrName = "" + } + + var ( + reflectValue = reflect.ValueOf(structSlicePointer) + reflectKind = reflectValue.Kind() + ) + if reflectKind == reflect.Interface { + reflectValue = reflectValue.Elem() + reflectKind = reflectValue.Kind() + } + if reflectKind != reflect.Ptr { + return gerror.NewCodef( + gcode.CodeInvalidParameter, + "structSlicePointer should be type of *[]struct/*[]*struct, but got: %v", + reflectKind, + ) + } + reflectValue = reflectValue.Elem() + reflectKind = reflectValue.Kind() + if reflectKind != reflect.Slice && reflectKind != reflect.Array { + return gerror.NewCodef( + gcode.CodeInvalidParameter, + "structSlicePointer should be type of *[]struct/*[]*struct, but got: %v", + reflectKind, + ) + } + length := len(maps) + if length == 0 { + // The pointed slice is not empty. + if reflectValue.Len() > 0 { + // It here checks if it has struct item, which is already initialized. + // It then returns error to warn the developer its empty and no conversion. + if v := reflectValue.Index(0); v.Kind() != reflect.Ptr { + return sql.ErrNoRows + } + } + // Do nothing for empty struct slice. + return nil + } + var ( + arrayValue reflect.Value // Like: []*Entity + arrayItemType reflect.Type // Like: *Entity + reflectType = reflect.TypeOf(structSlicePointer) + ) + if reflectValue.Len() > 0 { + arrayValue = reflectValue + } else { + arrayValue = reflect.MakeSlice(reflectType.Elem(), length, length) + } + + // Slice element item. + arrayItemType = arrayValue.Index(0).Type() + + // Relation variables. + var ( + relationDataMap map[string]interface{} + relationFromFieldName string // Eg: relationKV: id:uid -> id + relationBindToFieldName string // Eg: relationKV: id:uid -> uid + ) + if len(relationFields) > 0 { + // The relation key string of table filed name and attribute name + // can be joined with char '=' or ':'. + array := utils.SplitAndTrim(relationFields, "=") + if len(array) == 1 { + // Compatible with old splitting char ':'. + array = utils.SplitAndTrim(relationFields, ":") + } + if len(array) == 1 { + // The relation names are the same. + array = []string{relationFields, relationFields} + } + if len(array) == 2 { + // Defined table field to relation attribute name. + // Like: + // uid:Uid + // uid:UserId + relationFromFieldName = array[0] + relationBindToFieldName = array[1] + if key, _ := utils.MapPossibleItemByKey(maps[0], relationFromFieldName); key == "" { + return gerror.NewCodef( + gcode.CodeInvalidParameter, + `cannot find possible related table field name "%s" from given relation fields "%s"`, + relationFromFieldName, + relationFields, + ) + } else { + relationFromFieldName = key + } + } else { + return gerror.NewCode( + gcode.CodeInvalidParameter, + `parameter relationKV should be format of "ResultFieldName:BindToAttrName"`, + ) + } + if relationFromFieldName != "" { + // Note that the value might be type of slice. + relationDataMap = utils.ListToMapByKey(maps, relationFromFieldName) + } + if len(relationDataMap) == 0 { + return gerror.NewCodef( + gcode.CodeInvalidParameter, + `cannot find the relation data map, maybe invalid relation fields given "%v"`, + relationFields, + ) + } + } + // Bind to target attribute. + var ( + ok bool + bindToAttrValue reflect.Value + bindToAttrKind reflect.Kind + bindToAttrType reflect.Type + bindToAttrField reflect.StructField + ) + if arrayItemType.Kind() == reflect.Ptr { + if bindToAttrField, ok = arrayItemType.Elem().FieldByName(bindToAttrName); !ok { + return gerror.NewCodef( + gcode.CodeInvalidParameter, + `invalid parameter bindToAttrName: cannot find attribute with name "%s" from slice element`, + bindToAttrName, + ) + } + } else { + if bindToAttrField, ok = arrayItemType.FieldByName(bindToAttrName); !ok { + return gerror.NewCodef( + gcode.CodeInvalidParameter, + `invalid parameter bindToAttrName: cannot find attribute with name "%s" from slice element`, + bindToAttrName, + ) + } + } + bindToAttrType = bindToAttrField.Type + bindToAttrKind = bindToAttrType.Kind() + + // Bind to relation conditions. + var ( + relationFromAttrValue reflect.Value + relationFromAttrField reflect.Value + relationBindToFieldNameChecked bool + ) + for i := 0; i < arrayValue.Len(); i++ { + arrayElemValue := arrayValue.Index(i) + // The FieldByName should be called on non-pointer reflect.Value. + if arrayElemValue.Kind() == reflect.Ptr { + // Like: []*Entity + arrayElemValue = arrayElemValue.Elem() + if !arrayElemValue.IsValid() { + // The element is nil, then create one and set it to the slice. + // The "reflect.New(itemType.Elem())" creates a new element and returns the address of it. + // For example: + // reflect.New(itemType.Elem()) => *Entity + // reflect.New(itemType.Elem()).Elem() => Entity + arrayElemValue = reflect.New(arrayItemType.Elem()).Elem() + arrayValue.Index(i).Set(arrayElemValue.Addr()) + } + } else { + // Like: []Entity + } + bindToAttrValue = arrayElemValue.FieldByName(bindToAttrName) + if relationAttrName != "" { + // Attribute value of current slice element. + relationFromAttrValue = arrayElemValue.FieldByName(relationAttrName) + if relationFromAttrValue.Kind() == reflect.Ptr { + relationFromAttrValue = relationFromAttrValue.Elem() + } + } else { + // Current slice element. + relationFromAttrValue = arrayElemValue + } + if len(relationDataMap) > 0 && !relationFromAttrValue.IsValid() { + return gerror.NewCodef(gcode.CodeInvalidParameter, `invalid relation fields specified: "%v"`, relationFields) + } + // Check and find possible bind to attribute name. + if relationFields != "" && !relationBindToFieldNameChecked { + relationFromAttrField = relationFromAttrValue.FieldByName(relationBindToFieldName) + if !relationFromAttrField.IsValid() { + var ( + filedMap, _ = gstructs.FieldMap(gstructs.FieldMapInput{ + Pointer: relationFromAttrValue, + RecursiveOption: gstructs.RecursiveOptionEmbeddedNoTag, + }) + ) + if key, _ := utils.MapPossibleItemByKey(Map(filedMap), relationBindToFieldName); key == "" { + return gerror.NewCodef( + gcode.CodeInvalidParameter, + `cannot find possible related attribute name "%s" from given relation fields "%s"`, + relationBindToFieldName, + relationFields, + ) + } else { + relationBindToFieldName = key + } + } + relationBindToFieldNameChecked = true + } + switch bindToAttrKind { + case reflect.Array, reflect.Slice: + if len(relationDataMap) > 0 { + relationFromAttrField = relationFromAttrValue.FieldByName(relationBindToFieldName) + if relationFromAttrField.IsValid() { + // results := make(Result, 0) + results := make([]interface{}, 0) + for _, v := range SliceAny(relationDataMap[String(relationFromAttrField.Interface())]) { + item := v + results = append(results, item) + } + if err = Structs(results, bindToAttrValue.Addr()); err != nil { + return err + } + } else { + // Maybe the attribute does not exist yet. + return gerror.NewCodef(gcode.CodeInvalidParameter, `invalid relation fields specified: "%v"`, relationFields) + } + } else { + return gerror.NewCodef( + gcode.CodeInvalidParameter, + `relationKey should not be empty as field "%s" is slice`, + bindToAttrName, + ) + } + + case reflect.Ptr: + var element reflect.Value + if bindToAttrValue.IsNil() { + element = reflect.New(bindToAttrType.Elem()).Elem() + } else { + element = bindToAttrValue.Elem() + } + if len(relationDataMap) > 0 { + relationFromAttrField = relationFromAttrValue.FieldByName(relationBindToFieldName) + if relationFromAttrField.IsValid() { + v := relationDataMap[String(relationFromAttrField.Interface())] + if v == nil { + // There's no relational data. + continue + } + if utils.IsSlice(v) { + if err = Struct(SliceAny(v)[0], element); err != nil { + return err + } + } else { + if err = Struct(v, element); err != nil { + return err + } + } + } else { + // Maybe the attribute does not exist yet. + return gerror.NewCodef(gcode.CodeInvalidParameter, `invalid relation fields specified: "%v"`, relationFields) + } + } else { + if i >= len(maps) { + // There's no relational data. + continue + } + v := maps[i] + if v == nil { + // There's no relational data. + continue + } + if err = Struct(v, element); err != nil { + return err + } + } + bindToAttrValue.Set(element.Addr()) + + case reflect.Struct: + if len(relationDataMap) > 0 { + relationFromAttrField = relationFromAttrValue.FieldByName(relationBindToFieldName) + if relationFromAttrField.IsValid() { + relationDataItem := relationDataMap[String(relationFromAttrField.Interface())] + if relationDataItem == nil { + // There's no relational data. + continue + } + if utils.IsSlice(relationDataItem) { + if err = Struct(SliceAny(relationDataItem)[0], bindToAttrValue); err != nil { + return err + } + } else { + if err = Struct(relationDataItem, bindToAttrValue); err != nil { + return err + } + } + } else { + // Maybe the attribute does not exist yet. + return gerror.NewCodef(gcode.CodeInvalidParameter, `invalid relation fields specified: "%v"`, relationFields) + } + } else { + if i >= len(maps) { + // There's no relational data. + continue + } + relationDataItem := maps[i] + if relationDataItem == nil { + // There's no relational data. + continue + } + if err = Struct(relationDataItem, bindToAttrValue); err != nil { + return err + } + } + + default: + return gerror.NewCodef(gcode.CodeInvalidParameter, `unsupported attribute type: %s`, bindToAttrKind.String()) + } + } + reflect.ValueOf(structSlicePointer).Elem().Set(arrayValue) + return nil +} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_any.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_any.go new file mode 100644 index 00000000..8308fdf6 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_any.go @@ -0,0 +1,130 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gconv + +import ( + "reflect" + + "github.com/gogf/gf/v2/internal/json" + "github.com/gogf/gf/v2/internal/reflection" +) + +// SliceAny is alias of Interfaces. +func SliceAny(any interface{}) []interface{} { + return Interfaces(any) +} + +// Interfaces converts `any` to []interface{}. +func Interfaces(any interface{}) []interface{} { + if any == nil { + return nil + } + var array []interface{} + switch value := any.(type) { + case []interface{}: + array = value + case []string: + array = make([]interface{}, len(value)) + for k, v := range value { + array[k] = v + } + case []int: + array = make([]interface{}, len(value)) + for k, v := range value { + array[k] = v + } + case []int8: + array = make([]interface{}, len(value)) + for k, v := range value { + array[k] = v + } + case []int16: + array = make([]interface{}, len(value)) + for k, v := range value { + array[k] = v + } + case []int32: + array = make([]interface{}, len(value)) + for k, v := range value { + array[k] = v + } + case []int64: + array = make([]interface{}, len(value)) + for k, v := range value { + array[k] = v + } + case []uint: + array = make([]interface{}, len(value)) + for k, v := range value { + array[k] = v + } + case []uint8: + if json.Valid(value) { + _ = json.UnmarshalUseNumber(value, &array) + } else { + array = make([]interface{}, len(value)) + for k, v := range value { + array[k] = v + } + } + case []uint16: + array = make([]interface{}, len(value)) + for k, v := range value { + array[k] = v + } + case []uint32: + for _, v := range value { + array = append(array, v) + } + case []uint64: + array = make([]interface{}, len(value)) + for k, v := range value { + array[k] = v + } + case []bool: + array = make([]interface{}, len(value)) + for k, v := range value { + array[k] = v + } + case []float32: + array = make([]interface{}, len(value)) + for k, v := range value { + array[k] = v + } + case []float64: + array = make([]interface{}, len(value)) + for k, v := range value { + array[k] = v + } + } + if array != nil { + return array + } + if v, ok := any.(iInterfaces); ok { + return v.Interfaces() + } + // JSON format string value converting. + if checkJsonAndUnmarshalUseNumber(any, &array) { + return array + } + // Not a common type, it then uses reflection for conversion. + originValueAndKind := reflection.OriginValueAndKind(any) + switch originValueAndKind.OriginKind { + case reflect.Slice, reflect.Array: + var ( + length = originValueAndKind.OriginValue.Len() + slice = make([]interface{}, length) + ) + for i := 0; i < length; i++ { + slice[i] = originValueAndKind.OriginValue.Index(i).Interface() + } + return slice + + default: + return []interface{}{any} + } +} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_float.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_float.go new file mode 100644 index 00000000..3d7b4994 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_float.go @@ -0,0 +1,282 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gconv + +import ( + "reflect" + + "github.com/gogf/gf/v2/internal/json" + "github.com/gogf/gf/v2/internal/reflection" +) + +// SliceFloat is alias of Floats. +func SliceFloat(any interface{}) []float64 { + return Floats(any) +} + +// SliceFloat32 is alias of Float32s. +func SliceFloat32(any interface{}) []float32 { + return Float32s(any) +} + +// SliceFloat64 is alias of Float64s. +func SliceFloat64(any interface{}) []float64 { + return Floats(any) +} + +// Floats converts `any` to []float64. +func Floats(any interface{}) []float64 { + return Float64s(any) +} + +// Float32s converts `any` to []float32. +func Float32s(any interface{}) []float32 { + if any == nil { + return nil + } + var ( + array []float32 = nil + ) + switch value := any.(type) { + case string: + if value == "" { + return []float32{} + } + return []float32{Float32(value)} + case []string: + array = make([]float32, len(value)) + for k, v := range value { + array[k] = Float32(v) + } + case []int: + array = make([]float32, len(value)) + for k, v := range value { + array[k] = Float32(v) + } + case []int8: + array = make([]float32, len(value)) + for k, v := range value { + array[k] = Float32(v) + } + case []int16: + array = make([]float32, len(value)) + for k, v := range value { + array[k] = Float32(v) + } + case []int32: + array = make([]float32, len(value)) + for k, v := range value { + array[k] = Float32(v) + } + case []int64: + array = make([]float32, len(value)) + for k, v := range value { + array[k] = Float32(v) + } + case []uint: + for _, v := range value { + array = append(array, Float32(v)) + } + case []uint8: + if json.Valid(value) { + _ = json.UnmarshalUseNumber(value, &array) + } else { + array = make([]float32, len(value)) + for k, v := range value { + array[k] = Float32(v) + } + } + case []uint16: + array = make([]float32, len(value)) + for k, v := range value { + array[k] = Float32(v) + } + case []uint32: + array = make([]float32, len(value)) + for k, v := range value { + array[k] = Float32(v) + } + case []uint64: + array = make([]float32, len(value)) + for k, v := range value { + array[k] = Float32(v) + } + case []bool: + array = make([]float32, len(value)) + for k, v := range value { + array[k] = Float32(v) + } + case []float32: + array = value + case []float64: + array = make([]float32, len(value)) + for k, v := range value { + array[k] = Float32(v) + } + case []interface{}: + array = make([]float32, len(value)) + for k, v := range value { + array[k] = Float32(v) + } + } + if array != nil { + return array + } + if v, ok := any.(iFloats); ok { + return Float32s(v.Floats()) + } + if v, ok := any.(iInterfaces); ok { + return Float32s(v.Interfaces()) + } + // JSON format string value converting. + if checkJsonAndUnmarshalUseNumber(any, &array) { + return array + } + // Not a common type, it then uses reflection for conversion. + originValueAndKind := reflection.OriginValueAndKind(any) + switch originValueAndKind.OriginKind { + case reflect.Slice, reflect.Array: + var ( + length = originValueAndKind.OriginValue.Len() + slice = make([]float32, length) + ) + for i := 0; i < length; i++ { + slice[i] = Float32(originValueAndKind.OriginValue.Index(i).Interface()) + } + return slice + + default: + if originValueAndKind.OriginValue.IsZero() { + return []float32{} + } + return []float32{Float32(any)} + } +} + +// Float64s converts `any` to []float64. +func Float64s(any interface{}) []float64 { + if any == nil { + return nil + } + var ( + array []float64 = nil + ) + switch value := any.(type) { + case string: + if value == "" { + return []float64{} + } + return []float64{Float64(value)} + case []string: + array = make([]float64, len(value)) + for k, v := range value { + array[k] = Float64(v) + } + case []int: + array = make([]float64, len(value)) + for k, v := range value { + array[k] = Float64(v) + } + case []int8: + array = make([]float64, len(value)) + for k, v := range value { + array[k] = Float64(v) + } + case []int16: + array = make([]float64, len(value)) + for k, v := range value { + array[k] = Float64(v) + } + case []int32: + array = make([]float64, len(value)) + for k, v := range value { + array[k] = Float64(v) + } + case []int64: + array = make([]float64, len(value)) + for k, v := range value { + array[k] = Float64(v) + } + case []uint: + for _, v := range value { + array = append(array, Float64(v)) + } + case []uint8: + if json.Valid(value) { + _ = json.UnmarshalUseNumber(value, &array) + } else { + array = make([]float64, len(value)) + for k, v := range value { + array[k] = Float64(v) + } + } + case []uint16: + array = make([]float64, len(value)) + for k, v := range value { + array[k] = Float64(v) + } + case []uint32: + array = make([]float64, len(value)) + for k, v := range value { + array[k] = Float64(v) + } + case []uint64: + array = make([]float64, len(value)) + for k, v := range value { + array[k] = Float64(v) + } + case []bool: + array = make([]float64, len(value)) + for k, v := range value { + array[k] = Float64(v) + } + case []float32: + array = make([]float64, len(value)) + for k, v := range value { + array[k] = Float64(v) + } + case []float64: + array = value + case []interface{}: + array = make([]float64, len(value)) + for k, v := range value { + array[k] = Float64(v) + } + } + if array != nil { + return array + } + if v, ok := any.(iFloats); ok { + return v.Floats() + } + if v, ok := any.(iInterfaces); ok { + return Floats(v.Interfaces()) + } + // JSON format string value converting. + if checkJsonAndUnmarshalUseNumber(any, &array) { + return array + } + // Not a common type, it then uses reflection for conversion. + originValueAndKind := reflection.OriginValueAndKind(any) + switch originValueAndKind.OriginKind { + case reflect.Slice, reflect.Array: + var ( + length = originValueAndKind.OriginValue.Len() + slice = make([]float64, length) + ) + for i := 0; i < length; i++ { + slice[i] = Float64(originValueAndKind.OriginValue.Index(i).Interface()) + } + return slice + + default: + if originValueAndKind.OriginValue.IsZero() { + return []float64{} + } + return []float64{Float64(any)} + } +} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_int.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_int.go new file mode 100644 index 00000000..f28e7fd1 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_int.go @@ -0,0 +1,416 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gconv + +import ( + "reflect" + + "github.com/gogf/gf/v2/internal/json" + "github.com/gogf/gf/v2/internal/reflection" +) + +// SliceInt is alias of Ints. +func SliceInt(any interface{}) []int { + return Ints(any) +} + +// SliceInt32 is alias of Int32s. +func SliceInt32(any interface{}) []int32 { + return Int32s(any) +} + +// SliceInt64 is alias of Int64s. +func SliceInt64(any interface{}) []int64 { + return Int64s(any) +} + +// Ints converts `any` to []int. +func Ints(any interface{}) []int { + if any == nil { + return nil + } + var ( + array []int = nil + ) + switch value := any.(type) { + case []string: + array = make([]int, len(value)) + for k, v := range value { + array[k] = Int(v) + } + case []int: + array = value + case []int8: + array = make([]int, len(value)) + for k, v := range value { + array[k] = int(v) + } + case []int16: + array = make([]int, len(value)) + for k, v := range value { + array[k] = int(v) + } + case []int32: + array = make([]int, len(value)) + for k, v := range value { + array[k] = int(v) + } + case []int64: + array = make([]int, len(value)) + for k, v := range value { + array[k] = int(v) + } + case []uint: + array = make([]int, len(value)) + for k, v := range value { + array[k] = int(v) + } + case []uint8: + if json.Valid(value) { + _ = json.UnmarshalUseNumber(value, &array) + } else { + array = make([]int, len(value)) + for k, v := range value { + array[k] = int(v) + } + } + case []uint16: + array = make([]int, len(value)) + for k, v := range value { + array[k] = int(v) + } + case []uint32: + array = make([]int, len(value)) + for k, v := range value { + array[k] = int(v) + } + case []uint64: + array = make([]int, len(value)) + for k, v := range value { + array[k] = int(v) + } + case []bool: + array = make([]int, len(value)) + for k, v := range value { + if v { + array[k] = 1 + } else { + array[k] = 0 + } + } + case []float32: + array = make([]int, len(value)) + for k, v := range value { + array[k] = Int(v) + } + case []float64: + array = make([]int, len(value)) + for k, v := range value { + array[k] = Int(v) + } + case []interface{}: + array = make([]int, len(value)) + for k, v := range value { + array[k] = Int(v) + } + case [][]byte: + array = make([]int, len(value)) + for k, v := range value { + array[k] = Int(v) + } + } + if array != nil { + return array + } + if v, ok := any.(iInts); ok { + return v.Ints() + } + if v, ok := any.(iInterfaces); ok { + return Ints(v.Interfaces()) + } + // JSON format string value converting. + if checkJsonAndUnmarshalUseNumber(any, &array) { + return array + } + // Not a common type, it then uses reflection for conversion. + originValueAndKind := reflection.OriginValueAndKind(any) + switch originValueAndKind.OriginKind { + case reflect.Slice, reflect.Array: + var ( + length = originValueAndKind.OriginValue.Len() + slice = make([]int, length) + ) + for i := 0; i < length; i++ { + slice[i] = Int(originValueAndKind.OriginValue.Index(i).Interface()) + } + return slice + + default: + if originValueAndKind.OriginValue.IsZero() { + return []int{} + } + return []int{Int(any)} + } +} + +// Int32s converts `any` to []int32. +func Int32s(any interface{}) []int32 { + if any == nil { + return nil + } + var ( + array []int32 = nil + ) + switch value := any.(type) { + case []string: + array = make([]int32, len(value)) + for k, v := range value { + array[k] = Int32(v) + } + case []int: + array = make([]int32, len(value)) + for k, v := range value { + array[k] = int32(v) + } + case []int8: + array = make([]int32, len(value)) + for k, v := range value { + array[k] = int32(v) + } + case []int16: + array = make([]int32, len(value)) + for k, v := range value { + array[k] = int32(v) + } + case []int32: + array = value + case []int64: + array = make([]int32, len(value)) + for k, v := range value { + array[k] = int32(v) + } + case []uint: + array = make([]int32, len(value)) + for k, v := range value { + array[k] = int32(v) + } + case []uint8: + if json.Valid(value) { + _ = json.UnmarshalUseNumber(value, &array) + } else { + array = make([]int32, len(value)) + for k, v := range value { + array[k] = int32(v) + } + } + case []uint16: + array = make([]int32, len(value)) + for k, v := range value { + array[k] = int32(v) + } + case []uint32: + array = make([]int32, len(value)) + for k, v := range value { + array[k] = int32(v) + } + case []uint64: + array = make([]int32, len(value)) + for k, v := range value { + array[k] = int32(v) + } + case []bool: + array = make([]int32, len(value)) + for k, v := range value { + if v { + array[k] = 1 + } else { + array[k] = 0 + } + } + case []float32: + array = make([]int32, len(value)) + for k, v := range value { + array[k] = Int32(v) + } + case []float64: + array = make([]int32, len(value)) + for k, v := range value { + array[k] = Int32(v) + } + case []interface{}: + array = make([]int32, len(value)) + for k, v := range value { + array[k] = Int32(v) + } + case [][]byte: + array = make([]int32, len(value)) + for k, v := range value { + array[k] = Int32(v) + } + } + if array != nil { + return array + } + if v, ok := any.(iInts); ok { + return Int32s(v.Ints()) + } + if v, ok := any.(iInterfaces); ok { + return Int32s(v.Interfaces()) + } + // JSON format string value converting. + if checkJsonAndUnmarshalUseNumber(any, &array) { + return array + } + // Not a common type, it then uses reflection for conversion. + originValueAndKind := reflection.OriginValueAndKind(any) + switch originValueAndKind.OriginKind { + case reflect.Slice, reflect.Array: + var ( + length = originValueAndKind.OriginValue.Len() + slice = make([]int32, length) + ) + for i := 0; i < length; i++ { + slice[i] = Int32(originValueAndKind.OriginValue.Index(i).Interface()) + } + return slice + + default: + if originValueAndKind.OriginValue.IsZero() { + return []int32{} + } + return []int32{Int32(any)} + } +} + +// Int64s converts `any` to []int64. +func Int64s(any interface{}) []int64 { + if any == nil { + return nil + } + var ( + array []int64 = nil + ) + switch value := any.(type) { + case []string: + array = make([]int64, len(value)) + for k, v := range value { + array[k] = Int64(v) + } + case []int: + array = make([]int64, len(value)) + for k, v := range value { + array[k] = int64(v) + } + case []int8: + array = make([]int64, len(value)) + for k, v := range value { + array[k] = int64(v) + } + case []int16: + array = make([]int64, len(value)) + for k, v := range value { + array[k] = int64(v) + } + case []int32: + array = make([]int64, len(value)) + for k, v := range value { + array[k] = int64(v) + } + case []int64: + array = value + case []uint: + array = make([]int64, len(value)) + for k, v := range value { + array[k] = int64(v) + } + case []uint8: + if json.Valid(value) { + _ = json.UnmarshalUseNumber(value, &array) + } else { + array = make([]int64, len(value)) + for k, v := range value { + array[k] = int64(v) + } + } + case []uint16: + array = make([]int64, len(value)) + for k, v := range value { + array[k] = int64(v) + } + case []uint32: + array = make([]int64, len(value)) + for k, v := range value { + array[k] = int64(v) + } + case []uint64: + array = make([]int64, len(value)) + for k, v := range value { + array[k] = int64(v) + } + case []bool: + array = make([]int64, len(value)) + for k, v := range value { + if v { + array[k] = 1 + } else { + array[k] = 0 + } + } + case []float32: + array = make([]int64, len(value)) + for k, v := range value { + array[k] = Int64(v) + } + case []float64: + array = make([]int64, len(value)) + for k, v := range value { + array[k] = Int64(v) + } + case []interface{}: + array = make([]int64, len(value)) + for k, v := range value { + array[k] = Int64(v) + } + case [][]byte: + array = make([]int64, len(value)) + for k, v := range value { + array[k] = Int64(v) + } + } + if array != nil { + return array + } + if v, ok := any.(iInts); ok { + return Int64s(v.Ints()) + } + if v, ok := any.(iInterfaces); ok { + return Int64s(v.Interfaces()) + } + // JSON format string value converting. + if checkJsonAndUnmarshalUseNumber(any, &array) { + return array + } + // Not a common type, it then uses reflection for conversion. + originValueAndKind := reflection.OriginValueAndKind(any) + switch originValueAndKind.OriginKind { + case reflect.Slice, reflect.Array: + var ( + length = originValueAndKind.OriginValue.Len() + slice = make([]int64, length) + ) + for i := 0; i < length; i++ { + slice[i] = Int64(originValueAndKind.OriginValue.Index(i).Interface()) + } + return slice + + default: + if originValueAndKind.OriginValue.IsZero() { + return []int64{} + } + return []int64{Int64(any)} + } +} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_str.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_str.go new file mode 100644 index 00000000..c085d271 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_str.go @@ -0,0 +1,144 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gconv + +import ( + "reflect" + + "github.com/gogf/gf/v2/internal/json" + "github.com/gogf/gf/v2/internal/reflection" +) + +// SliceStr is alias of Strings. +func SliceStr(any interface{}) []string { + return Strings(any) +} + +// Strings converts `any` to []string. +func Strings(any interface{}) []string { + if any == nil { + return nil + } + var ( + array []string = nil + ) + switch value := any.(type) { + case []int: + array = make([]string, len(value)) + for k, v := range value { + array[k] = String(v) + } + case []int8: + array = make([]string, len(value)) + for k, v := range value { + array[k] = String(v) + } + case []int16: + array = make([]string, len(value)) + for k, v := range value { + array[k] = String(v) + } + case []int32: + array = make([]string, len(value)) + for k, v := range value { + array[k] = String(v) + } + case []int64: + array = make([]string, len(value)) + for k, v := range value { + array[k] = String(v) + } + case []uint: + array = make([]string, len(value)) + for k, v := range value { + array[k] = String(v) + } + case []uint8: + if json.Valid(value) { + _ = json.UnmarshalUseNumber(value, &array) + } else { + array = make([]string, len(value)) + for k, v := range value { + array[k] = String(v) + } + } + case []uint16: + array = make([]string, len(value)) + for k, v := range value { + array[k] = String(v) + } + case []uint32: + array = make([]string, len(value)) + for k, v := range value { + array[k] = String(v) + } + case []uint64: + array = make([]string, len(value)) + for k, v := range value { + array[k] = String(v) + } + case []bool: + array = make([]string, len(value)) + for k, v := range value { + array[k] = String(v) + } + case []float32: + array = make([]string, len(value)) + for k, v := range value { + array[k] = String(v) + } + case []float64: + array = make([]string, len(value)) + for k, v := range value { + array[k] = String(v) + } + case []interface{}: + array = make([]string, len(value)) + for k, v := range value { + array[k] = String(v) + } + case []string: + array = value + case [][]byte: + array = make([]string, len(value)) + for k, v := range value { + array[k] = String(v) + } + } + if array != nil { + return array + } + if v, ok := any.(iStrings); ok { + return v.Strings() + } + if v, ok := any.(iInterfaces); ok { + return Strings(v.Interfaces()) + } + // JSON format string value converting. + if checkJsonAndUnmarshalUseNumber(any, &array) { + return array + } + // Not a common type, it then uses reflection for conversion. + originValueAndKind := reflection.OriginValueAndKind(any) + switch originValueAndKind.OriginKind { + case reflect.Slice, reflect.Array: + var ( + length = originValueAndKind.OriginValue.Len() + slice = make([]string, length) + ) + for i := 0; i < length; i++ { + slice[i] = String(originValueAndKind.OriginValue.Index(i).Interface()) + } + return slice + + default: + if originValueAndKind.OriginValue.IsZero() { + return []string{} + } + return []string{String(any)} + } +} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_uint.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_uint.go new file mode 100644 index 00000000..a1ffa761 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_slice_uint.go @@ -0,0 +1,436 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gconv + +import ( + "reflect" + "strings" + + "github.com/gogf/gf/v2/internal/json" + "github.com/gogf/gf/v2/internal/reflection" + "github.com/gogf/gf/v2/internal/utils" +) + +// SliceUint is alias of Uints. +func SliceUint(any interface{}) []uint { + return Uints(any) +} + +// SliceUint32 is alias of Uint32s. +func SliceUint32(any interface{}) []uint32 { + return Uint32s(any) +} + +// SliceUint64 is alias of Uint64s. +func SliceUint64(any interface{}) []uint64 { + return Uint64s(any) +} + +// Uints converts `any` to []uint. +func Uints(any interface{}) []uint { + if any == nil { + return nil + } + + var ( + array []uint = nil + ) + switch value := any.(type) { + case string: + value = strings.TrimSpace(value) + if value == "" { + return []uint{} + } + if utils.IsNumeric(value) { + return []uint{Uint(value)} + } + + case []string: + array = make([]uint, len(value)) + for k, v := range value { + array[k] = Uint(v) + } + case []int8: + array = make([]uint, len(value)) + for k, v := range value { + array[k] = uint(v) + } + case []int16: + array = make([]uint, len(value)) + for k, v := range value { + array[k] = uint(v) + } + case []int32: + array = make([]uint, len(value)) + for k, v := range value { + array[k] = uint(v) + } + case []int64: + array = make([]uint, len(value)) + for k, v := range value { + array[k] = uint(v) + } + case []uint: + array = value + case []uint8: + if json.Valid(value) { + _ = json.UnmarshalUseNumber(value, &array) + } else { + array = make([]uint, len(value)) + for k, v := range value { + array[k] = uint(v) + } + } + case []uint16: + array = make([]uint, len(value)) + for k, v := range value { + array[k] = uint(v) + } + case []uint32: + array = make([]uint, len(value)) + for k, v := range value { + array[k] = uint(v) + } + case []uint64: + array = make([]uint, len(value)) + for k, v := range value { + array[k] = uint(v) + } + case []bool: + array = make([]uint, len(value)) + for k, v := range value { + if v { + array[k] = 1 + } else { + array[k] = 0 + } + } + case []float32: + array = make([]uint, len(value)) + for k, v := range value { + array[k] = Uint(v) + } + case []float64: + array = make([]uint, len(value)) + for k, v := range value { + array[k] = Uint(v) + } + case []interface{}: + array = make([]uint, len(value)) + for k, v := range value { + array[k] = Uint(v) + } + case [][]byte: + array = make([]uint, len(value)) + for k, v := range value { + array[k] = Uint(v) + } + } + + if array != nil { + return array + } + + // Default handler. + if v, ok := any.(iUints); ok { + return v.Uints() + } + if v, ok := any.(iInterfaces); ok { + return Uints(v.Interfaces()) + } + // JSON format string value converting. + if checkJsonAndUnmarshalUseNumber(any, &array) { + return array + } + // Not a common type, it then uses reflection for conversion. + originValueAndKind := reflection.OriginValueAndKind(any) + switch originValueAndKind.OriginKind { + case reflect.Slice, reflect.Array: + var ( + length = originValueAndKind.OriginValue.Len() + slice = make([]uint, length) + ) + for i := 0; i < length; i++ { + slice[i] = Uint(originValueAndKind.OriginValue.Index(i).Interface()) + } + return slice + + default: + if originValueAndKind.OriginValue.IsZero() { + return []uint{} + } + return []uint{Uint(any)} + } +} + +// Uint32s converts `any` to []uint32. +func Uint32s(any interface{}) []uint32 { + if any == nil { + return nil + } + var ( + array []uint32 = nil + ) + switch value := any.(type) { + case string: + value = strings.TrimSpace(value) + if value == "" { + return []uint32{} + } + if utils.IsNumeric(value) { + return []uint32{Uint32(value)} + } + case []string: + array = make([]uint32, len(value)) + for k, v := range value { + array[k] = Uint32(v) + } + case []int8: + array = make([]uint32, len(value)) + for k, v := range value { + array[k] = uint32(v) + } + case []int16: + array = make([]uint32, len(value)) + for k, v := range value { + array[k] = uint32(v) + } + case []int32: + array = make([]uint32, len(value)) + for k, v := range value { + array[k] = uint32(v) + } + case []int64: + array = make([]uint32, len(value)) + for k, v := range value { + array[k] = uint32(v) + } + case []uint: + array = make([]uint32, len(value)) + for k, v := range value { + array[k] = uint32(v) + } + case []uint8: + if json.Valid(value) { + _ = json.UnmarshalUseNumber(value, &array) + } else { + array = make([]uint32, len(value)) + for k, v := range value { + array[k] = uint32(v) + } + } + case []uint16: + array = make([]uint32, len(value)) + for k, v := range value { + array[k] = uint32(v) + } + case []uint32: + array = value + case []uint64: + array = make([]uint32, len(value)) + for k, v := range value { + array[k] = uint32(v) + } + case []bool: + array = make([]uint32, len(value)) + for k, v := range value { + if v { + array[k] = 1 + } else { + array[k] = 0 + } + } + case []float32: + array = make([]uint32, len(value)) + for k, v := range value { + array[k] = Uint32(v) + } + case []float64: + array = make([]uint32, len(value)) + for k, v := range value { + array[k] = Uint32(v) + } + case []interface{}: + array = make([]uint32, len(value)) + for k, v := range value { + array[k] = Uint32(v) + } + case [][]byte: + array = make([]uint32, len(value)) + for k, v := range value { + array[k] = Uint32(v) + } + } + if array != nil { + return array + } + + // Default handler. + if v, ok := any.(iUints); ok { + return Uint32s(v.Uints()) + } + if v, ok := any.(iInterfaces); ok { + return Uint32s(v.Interfaces()) + } + // JSON format string value converting. + if checkJsonAndUnmarshalUseNumber(any, &array) { + return array + } + // Not a common type, it then uses reflection for conversion. + originValueAndKind := reflection.OriginValueAndKind(any) + switch originValueAndKind.OriginKind { + case reflect.Slice, reflect.Array: + var ( + length = originValueAndKind.OriginValue.Len() + slice = make([]uint32, length) + ) + for i := 0; i < length; i++ { + slice[i] = Uint32(originValueAndKind.OriginValue.Index(i).Interface()) + } + return slice + + default: + if originValueAndKind.OriginValue.IsZero() { + return []uint32{} + } + return []uint32{Uint32(any)} + } +} + +// Uint64s converts `any` to []uint64. +func Uint64s(any interface{}) []uint64 { + if any == nil { + return nil + } + var ( + array []uint64 = nil + ) + switch value := any.(type) { + case string: + value = strings.TrimSpace(value) + if value == "" { + return []uint64{} + } + if utils.IsNumeric(value) { + return []uint64{Uint64(value)} + } + + case []string: + array = make([]uint64, len(value)) + for k, v := range value { + array[k] = Uint64(v) + } + case []int8: + array = make([]uint64, len(value)) + for k, v := range value { + array[k] = uint64(v) + } + case []int16: + array = make([]uint64, len(value)) + for k, v := range value { + array[k] = uint64(v) + } + case []int32: + array = make([]uint64, len(value)) + for k, v := range value { + array[k] = uint64(v) + } + case []int64: + array = make([]uint64, len(value)) + for k, v := range value { + array[k] = uint64(v) + } + case []uint: + array = make([]uint64, len(value)) + for k, v := range value { + array[k] = uint64(v) + } + case []uint8: + if json.Valid(value) { + _ = json.UnmarshalUseNumber(value, &array) + } else { + array = make([]uint64, len(value)) + for k, v := range value { + array[k] = uint64(v) + } + } + case []uint16: + array = make([]uint64, len(value)) + for k, v := range value { + array[k] = uint64(v) + } + case []uint32: + array = make([]uint64, len(value)) + for k, v := range value { + array[k] = uint64(v) + } + case []uint64: + array = value + case []bool: + array = make([]uint64, len(value)) + for k, v := range value { + if v { + array[k] = 1 + } else { + array[k] = 0 + } + } + case []float32: + array = make([]uint64, len(value)) + for k, v := range value { + array[k] = Uint64(v) + } + case []float64: + array = make([]uint64, len(value)) + for k, v := range value { + array[k] = Uint64(v) + } + case []interface{}: + array = make([]uint64, len(value)) + for k, v := range value { + array[k] = Uint64(v) + } + case [][]byte: + array = make([]uint64, len(value)) + for k, v := range value { + array[k] = Uint64(v) + } + } + if array != nil { + return array + } + // Default handler. + if v, ok := any.(iUints); ok { + return Uint64s(v.Uints()) + } + if v, ok := any.(iInterfaces); ok { + return Uint64s(v.Interfaces()) + } + // JSON format string value converting. + if checkJsonAndUnmarshalUseNumber(any, &array) { + return array + } + // Not a common type, it then uses reflection for conversion. + originValueAndKind := reflection.OriginValueAndKind(any) + switch originValueAndKind.OriginKind { + case reflect.Slice, reflect.Array: + var ( + length = originValueAndKind.OriginValue.Len() + slice = make([]uint64, length) + ) + for i := 0; i < length; i++ { + slice[i] = Uint64(originValueAndKind.OriginValue.Index(i).Interface()) + } + return slice + + default: + if originValueAndKind.OriginValue.IsZero() { + return []uint64{} + } + return []uint64{Uint64(any)} + } +} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_struct.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_struct.go new file mode 100644 index 00000000..786233f7 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_struct.go @@ -0,0 +1,620 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gconv + +import ( + "reflect" + "strings" + + "github.com/gogf/gf/v2/errors/gcode" + "github.com/gogf/gf/v2/errors/gerror" + "github.com/gogf/gf/v2/internal/empty" + "github.com/gogf/gf/v2/internal/json" + "github.com/gogf/gf/v2/internal/utils" + "github.com/gogf/gf/v2/os/gstructs" +) + +// Struct maps the params key-value pairs to the corresponding struct object's attributes. +// The third parameter `mapping` is unnecessary, indicating the mapping rules between the +// custom key name and the attribute name(case-sensitive). +// +// Note: +// 1. The `params` can be any type of map/struct, usually a map. +// 2. The `pointer` should be type of *struct/**struct, which is a pointer to struct object +// or struct pointer. +// 3. Only the public attributes of struct object can be mapped. +// 4. If `params` is a map, the key of the map `params` can be lowercase. +// It will automatically convert the first letter of the key to uppercase +// in mapping procedure to do the matching. +// It ignores the map key, if it does not match. +func Struct(params interface{}, pointer interface{}, mapping ...map[string]string) (err error) { + return Scan(params, pointer, mapping...) +} + +// StructTag acts as Struct but also with support for priority tag feature, which retrieves the +// specified tags for `params` key-value items to struct attribute names mapping. +// The parameter `priorityTag` supports multiple tags that can be joined with char ','. +func StructTag(params interface{}, pointer interface{}, priorityTag string) (err error) { + return doStruct(params, pointer, nil, priorityTag) +} + +// doStructWithJsonCheck checks if given `params` is JSON, it then uses json.Unmarshal doing the converting. +func doStructWithJsonCheck(params interface{}, pointer interface{}) (err error, ok bool) { + switch r := params.(type) { + case []byte: + if json.Valid(r) { + if rv, ok := pointer.(reflect.Value); ok { + if rv.Kind() == reflect.Ptr { + if rv.IsNil() { + return nil, false + } + return json.UnmarshalUseNumber(r, rv.Interface()), true + } else if rv.CanAddr() { + return json.UnmarshalUseNumber(r, rv.Addr().Interface()), true + } + } else { + return json.UnmarshalUseNumber(r, pointer), true + } + } + case string: + if paramsBytes := []byte(r); json.Valid(paramsBytes) { + if rv, ok := pointer.(reflect.Value); ok { + if rv.Kind() == reflect.Ptr { + if rv.IsNil() { + return nil, false + } + return json.UnmarshalUseNumber(paramsBytes, rv.Interface()), true + } else if rv.CanAddr() { + return json.UnmarshalUseNumber(paramsBytes, rv.Addr().Interface()), true + } + } else { + return json.UnmarshalUseNumber(paramsBytes, pointer), true + } + } + default: + // The `params` might be struct that implements interface function Interface, eg: gvar.Var. + if v, ok := params.(iInterface); ok { + return doStructWithJsonCheck(v.Interface(), pointer) + } + } + return nil, false +} + +// doStruct is the core internal converting function for any data to struct. +func doStruct(params interface{}, pointer interface{}, mapping map[string]string, priorityTag string) (err error) { + if params == nil { + // If `params` is nil, no conversion. + return nil + } + if pointer == nil { + return gerror.NewCode(gcode.CodeInvalidParameter, "object pointer cannot be nil") + } + + defer func() { + // Catch the panic, especially the reflection operation panics. + if exception := recover(); exception != nil { + if v, ok := exception.(error); ok && gerror.HasStack(v) { + err = v + } else { + err = gerror.NewCodeSkipf(gcode.CodeInternalError, 1, "%+v", exception) + } + } + }() + + // JSON content converting. + err, ok := doStructWithJsonCheck(params, pointer) + if err != nil { + return err + } + if ok { + return nil + } + + var ( + paramsReflectValue reflect.Value + paramsInterface interface{} // DO NOT use `params` directly as it might be type `reflect.Value` + pointerReflectValue reflect.Value + pointerReflectKind reflect.Kind + pointerElemReflectValue reflect.Value // The pointed element. + ) + if v, ok := params.(reflect.Value); ok { + paramsReflectValue = v + } else { + paramsReflectValue = reflect.ValueOf(params) + } + paramsInterface = paramsReflectValue.Interface() + if v, ok := pointer.(reflect.Value); ok { + pointerReflectValue = v + pointerElemReflectValue = v + } else { + pointerReflectValue = reflect.ValueOf(pointer) + pointerReflectKind = pointerReflectValue.Kind() + if pointerReflectKind != reflect.Ptr { + return gerror.NewCodef(gcode.CodeInvalidParameter, "object pointer should be type of '*struct', but got '%v'", pointerReflectKind) + } + // Using IsNil on reflect.Ptr variable is OK. + if !pointerReflectValue.IsValid() || pointerReflectValue.IsNil() { + return gerror.NewCode(gcode.CodeInvalidParameter, "object pointer cannot be nil") + } + pointerElemReflectValue = pointerReflectValue.Elem() + } + + // If `params` and `pointer` are the same type, the do directly assignment. + // For performance enhancement purpose. + if pointerElemReflectValue.IsValid() { + switch { + // Eg: + // UploadFile => UploadFile + // *UploadFile => *UploadFile + case pointerElemReflectValue.Type() == paramsReflectValue.Type(): + pointerElemReflectValue.Set(paramsReflectValue) + return nil + + // Eg: + // UploadFile => *UploadFile + case pointerElemReflectValue.Kind() == reflect.Ptr && pointerElemReflectValue.Elem().IsValid() && + pointerElemReflectValue.Elem().Type() == paramsReflectValue.Type(): + pointerElemReflectValue.Elem().Set(paramsReflectValue) + return nil + + // Eg: + // *UploadFile => UploadFile + case paramsReflectValue.Kind() == reflect.Ptr && paramsReflectValue.Elem().IsValid() && + pointerElemReflectValue.Type() == paramsReflectValue.Elem().Type(): + pointerElemReflectValue.Set(paramsReflectValue.Elem()) + return nil + } + } + + // Normal unmarshalling interfaces checks. + if err, ok = bindVarToReflectValueWithInterfaceCheck(pointerReflectValue, paramsInterface); ok { + return err + } + + // It automatically creates struct object if necessary. + // For example, if `pointer` is **User, then `elem` is *User, which is a pointer to User. + if pointerElemReflectValue.Kind() == reflect.Ptr { + if !pointerElemReflectValue.IsValid() || pointerElemReflectValue.IsNil() { + e := reflect.New(pointerElemReflectValue.Type().Elem()).Elem() + pointerElemReflectValue.Set(e.Addr()) + } + // if v, ok := pointerElemReflectValue.Interface().(iUnmarshalValue); ok { + // return v.UnmarshalValue(params) + // } + // Note that it's `pointerElemReflectValue` here not `pointerReflectValue`. + if err, ok = bindVarToReflectValueWithInterfaceCheck(pointerElemReflectValue, paramsInterface); ok { + return err + } + // Retrieve its element, may be struct at last. + pointerElemReflectValue = pointerElemReflectValue.Elem() + } + + // paramsMap is the map[string]interface{} type variable for params. + // DO NOT use MapDeep here. + paramsMap := Map(paramsInterface) + if paramsMap == nil { + return gerror.NewCodef( + gcode.CodeInvalidParameter, + `convert params from "%#v" to "map[string]interface{}" failed`, + params, + ) + } + + // Nothing to be done as the parameters are empty. + if len(paramsMap) == 0 { + return nil + } + + // It only performs one converting to the same attribute. + // doneMap is used to check repeated converting, its key is the real attribute name + // of the struct. + doneMap := make(map[string]struct{}) + + // The key of the attrMap is the attribute name of the struct, + // and the value is its replaced name for later comparison to improve performance. + var ( + tempName string + elemFieldType reflect.StructField + elemFieldValue reflect.Value + elemType = pointerElemReflectValue.Type() + attrToCheckNameMap = make(map[string]string) + ) + for i := 0; i < pointerElemReflectValue.NumField(); i++ { + elemFieldType = elemType.Field(i) + // Only do converting to public attributes. + if !utils.IsLetterUpper(elemFieldType.Name[0]) { + continue + } + // Maybe it's struct/*struct embedded. + if elemFieldType.Anonymous { + elemFieldValue = pointerElemReflectValue.Field(i) + // Ignore the interface attribute if it's nil. + if elemFieldValue.Kind() == reflect.Interface { + elemFieldValue = elemFieldValue.Elem() + if !elemFieldValue.IsValid() { + continue + } + } + if err = doStruct(paramsMap, elemFieldValue, mapping, priorityTag); err != nil { + return err + } + } else { + tempName = elemFieldType.Name + attrToCheckNameMap[tempName] = utils.RemoveSymbols(tempName) + } + } + if len(attrToCheckNameMap) == 0 { + return nil + } + + // The key of the tagMap is the attribute name of the struct, + // and the value is its replaced tag name for later comparison to improve performance. + var ( + attrToTagCheckNameMap = make(map[string]string) + priorityTagArray []string + ) + if priorityTag != "" { + priorityTagArray = append(utils.SplitAndTrim(priorityTag, ","), StructTagPriority...) + } else { + priorityTagArray = StructTagPriority + } + tagToAttrNameMap, err := gstructs.TagMapName(pointerElemReflectValue, priorityTagArray) + if err != nil { + return err + } + for tagName, attributeName := range tagToAttrNameMap { + // If there's something else in the tag string, + // it uses the first part which is split using char ','. + // Eg: + // orm:"id, priority" + // orm:"name, with:uid=id" + attrToTagCheckNameMap[attributeName] = utils.RemoveSymbols(strings.Split(tagName, ",")[0]) + // If tag and attribute values both exist in `paramsMap`, + // it then uses the tag value overwriting the attribute value in `paramsMap`. + if paramsMap[tagName] != nil && paramsMap[attributeName] != nil { + paramsMap[attributeName] = paramsMap[tagName] + } + } + + var ( + attrName string + checkName string + ) + for paramName, paramValue := range paramsMap { + attrName = "" + // It firstly checks the passed mapping rules. + if len(mapping) > 0 { + if passedAttrKey, ok := mapping[paramName]; ok { + attrName = passedAttrKey + } + } + // It secondly checks the predefined tags and matching rules. + if attrName == "" { + // It firstly considers `paramName` as accurate tag name, + // and retrieve attribute name from `tagToAttrNameMap` . + attrName = tagToAttrNameMap[paramName] + if attrName == "" { + checkName = utils.RemoveSymbols(paramName) + // Loop to find the matched attribute name with or without + // string cases and chars like '-'/'_'/'.'/' '. + + // Matching the parameters to struct tag names. + // The `attrKey` is the attribute name of the struct. + for attrKey, cmpKey := range attrToTagCheckNameMap { + if strings.EqualFold(checkName, cmpKey) { + attrName = attrKey + break + } + } + } + + // Matching the parameters to struct attributes. + if attrName == "" { + for attrKey, cmpKey := range attrToCheckNameMap { + // Eg: + // UserName eq user_name + // User-Name eq username + // username eq userName + // etc. + if strings.EqualFold(checkName, cmpKey) { + attrName = attrKey + break + } + } + } + } + + // No matching, it gives up this attribute converting. + if attrName == "" { + continue + } + // If the attribute name is already checked converting, then skip it. + if _, ok = doneMap[attrName]; ok { + continue + } + // Mark it done. + doneMap[attrName] = struct{}{} + if err = bindVarToStructAttr(pointerElemReflectValue, attrName, paramValue, mapping); err != nil { + return err + } + } + return nil +} + +// bindVarToStructAttr sets value to struct object attribute by name. +func bindVarToStructAttr(structReflectValue reflect.Value, attrName string, value interface{}, mapping map[string]string) (err error) { + structFieldValue := structReflectValue.FieldByName(attrName) + if !structFieldValue.IsValid() { + return nil + } + // CanSet checks whether attribute is public accessible. + if !structFieldValue.CanSet() { + return nil + } + defer func() { + if exception := recover(); exception != nil { + if err = bindVarToReflectValue(structFieldValue, value, mapping); err != nil { + err = gerror.Wrapf(err, `error binding value to attribute "%s"`, attrName) + } + } + }() + // Directly converting. + if empty.IsNil(value) { + structFieldValue.Set(reflect.Zero(structFieldValue.Type())) + } else { + // Special handling for certain types: + // - Overwrite the default type converting logic of stdlib for time.Time/*time.Time. + var structFieldTypeName = structFieldValue.Type().String() + switch structFieldTypeName { + case "time.Time", "*time.Time": + doConvertWithReflectValueSet(structFieldValue, doConvertInput{ + FromValue: value, + ToTypeName: structFieldTypeName, + ReferValue: structFieldValue, + }) + return + } + + // Common interface check. + var ok bool + if err, ok = bindVarToReflectValueWithInterfaceCheck(structFieldValue, value); ok { + return err + } + + // Default converting. + doConvertWithReflectValueSet(structFieldValue, doConvertInput{ + FromValue: value, + ToTypeName: structFieldTypeName, + ReferValue: structFieldValue, + }) + } + return nil +} + +// bindVarToReflectValueWithInterfaceCheck does bind using common interfaces checks. +func bindVarToReflectValueWithInterfaceCheck(reflectValue reflect.Value, value interface{}) (error, bool) { + var pointer interface{} + if reflectValue.Kind() != reflect.Ptr && reflectValue.CanAddr() { + reflectValueAddr := reflectValue.Addr() + if reflectValueAddr.IsNil() || !reflectValueAddr.IsValid() { + return nil, false + } + // Not a pointer, but can token address, that makes it can be unmarshalled. + pointer = reflectValue.Addr().Interface() + } else { + if reflectValue.IsNil() || !reflectValue.IsValid() { + return nil, false + } + pointer = reflectValue.Interface() + } + // UnmarshalValue. + if v, ok := pointer.(iUnmarshalValue); ok { + return v.UnmarshalValue(value), ok + } + // UnmarshalText. + if v, ok := pointer.(iUnmarshalText); ok { + var valueBytes []byte + if b, ok := value.([]byte); ok { + valueBytes = b + } else if s, ok := value.(string); ok { + valueBytes = []byte(s) + } + if len(valueBytes) > 0 { + return v.UnmarshalText(valueBytes), ok + } + } + // UnmarshalJSON. + if v, ok := pointer.(iUnmarshalJSON); ok { + var valueBytes []byte + if b, ok := value.([]byte); ok { + valueBytes = b + } else if s, ok := value.(string); ok { + valueBytes = []byte(s) + } + + if len(valueBytes) > 0 { + // If it is not a valid JSON string, it then adds char `"` on its both sides to make it is. + if !json.Valid(valueBytes) { + newValueBytes := make([]byte, len(valueBytes)+2) + newValueBytes[0] = '"' + newValueBytes[len(newValueBytes)-1] = '"' + copy(newValueBytes[1:], valueBytes) + valueBytes = newValueBytes + } + return v.UnmarshalJSON(valueBytes), ok + } + } + if v, ok := pointer.(iSet); ok { + v.Set(value) + return nil, ok + } + return nil, false +} + +// bindVarToReflectValue sets `value` to reflect value object `structFieldValue`. +func bindVarToReflectValue(structFieldValue reflect.Value, value interface{}, mapping map[string]string) (err error) { + // JSON content converting. + err, ok := doStructWithJsonCheck(value, structFieldValue) + if err != nil { + return err + } + if ok { + return nil + } + + kind := structFieldValue.Kind() + // Converting using `Set` interface implements, for some types. + switch kind { + case reflect.Slice, reflect.Array, reflect.Ptr, reflect.Interface: + if !structFieldValue.IsNil() { + if v, ok := structFieldValue.Interface().(iSet); ok { + v.Set(value) + return nil + } + } + } + + // Converting using reflection by kind. + switch kind { + case reflect.Map: + return doMapToMap(value, structFieldValue, mapping) + + case reflect.Struct: + // Recursively converting for struct attribute. + if err = doStruct(value, structFieldValue, nil, ""); err != nil { + // Note there's reflect conversion mechanism here. + structFieldValue.Set(reflect.ValueOf(value).Convert(structFieldValue.Type())) + } + + // Note that the slice element might be type of struct, + // so it uses Struct function doing the converting internally. + case reflect.Slice, reflect.Array: + var ( + reflectArray reflect.Value + reflectValue = reflect.ValueOf(value) + ) + if reflectValue.Kind() == reflect.Slice || reflectValue.Kind() == reflect.Array { + reflectArray = reflect.MakeSlice(structFieldValue.Type(), reflectValue.Len(), reflectValue.Len()) + if reflectValue.Len() > 0 { + var ( + elemType = reflectArray.Index(0).Type() + elemTypeName string + converted bool + ) + for i := 0; i < reflectValue.Len(); i++ { + converted = false + elemTypeName = elemType.Name() + if elemTypeName == "" { + elemTypeName = elemType.String() + } + var elem reflect.Value + if elemType.Kind() == reflect.Ptr { + elem = reflect.New(elemType.Elem()).Elem() + } else { + elem = reflect.New(elemType).Elem() + } + if elem.Kind() == reflect.Struct { + if err = doStruct(reflectValue.Index(i).Interface(), elem, nil, ""); err == nil { + converted = true + } + } + if !converted { + doConvertWithReflectValueSet(elem, doConvertInput{ + FromValue: reflectValue.Index(i).Interface(), + ToTypeName: elemTypeName, + ReferValue: elem, + }) + } + if elemType.Kind() == reflect.Ptr { + // Before it sets the `elem` to array, do pointer converting if necessary. + elem = elem.Addr() + } + reflectArray.Index(i).Set(elem) + } + } + } else { + reflectArray = reflect.MakeSlice(structFieldValue.Type(), 1, 1) + var ( + elem reflect.Value + elemType = reflectArray.Index(0).Type() + elemTypeName = elemType.Name() + converted bool + ) + if elemTypeName == "" { + elemTypeName = elemType.String() + } + if elemType.Kind() == reflect.Ptr { + elem = reflect.New(elemType.Elem()).Elem() + } else { + elem = reflect.New(elemType).Elem() + } + if elem.Kind() == reflect.Struct { + if err = doStruct(value, elem, nil, ""); err == nil { + converted = true + } + } + if !converted { + doConvertWithReflectValueSet(elem, doConvertInput{ + FromValue: value, + ToTypeName: elemTypeName, + ReferValue: elem, + }) + } + if elemType.Kind() == reflect.Ptr { + // Before it sets the `elem` to array, do pointer converting if necessary. + elem = elem.Addr() + } + reflectArray.Index(0).Set(elem) + } + structFieldValue.Set(reflectArray) + + case reflect.Ptr: + if structFieldValue.IsNil() || structFieldValue.IsZero() { + // Nil or empty pointer, it creates a new one. + item := reflect.New(structFieldValue.Type().Elem()) + if err, ok = bindVarToReflectValueWithInterfaceCheck(item, value); ok { + structFieldValue.Set(item) + return err + } + elem := item.Elem() + if err = bindVarToReflectValue(elem, value, mapping); err == nil { + structFieldValue.Set(elem.Addr()) + } + } else { + // Not empty pointer, it assigns values to it. + return bindVarToReflectValue(structFieldValue.Elem(), value, mapping) + } + + // It mainly and specially handles the interface of nil value. + case reflect.Interface: + if value == nil { + // Specially. + structFieldValue.Set(reflect.ValueOf((*interface{})(nil))) + } else { + // Note there's reflect conversion mechanism here. + structFieldValue.Set(reflect.ValueOf(value).Convert(structFieldValue.Type())) + } + + default: + defer func() { + if exception := recover(); exception != nil { + err = gerror.NewCodef( + gcode.CodeInternalError, + `cannot convert value "%+v" to type "%s":%+v`, + value, + structFieldValue.Type().String(), + exception, + ) + } + }() + // It here uses reflect converting `value` to type of the attribute and assigns + // the result value to the attribute. It might fail and panic if the usual Go + // conversion rules do not allow conversion. + structFieldValue.Set(reflect.ValueOf(value).Convert(structFieldValue.Type())) + } + return nil +} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_structs.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_structs.go new file mode 100644 index 00000000..be1322a7 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_structs.go @@ -0,0 +1,172 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gconv + +import ( + "reflect" + + "github.com/gogf/gf/v2/errors/gcode" + "github.com/gogf/gf/v2/errors/gerror" + "github.com/gogf/gf/v2/internal/json" +) + +// Structs converts any slice to given struct slice. +// Also see Scan, Struct. +func Structs(params interface{}, pointer interface{}, mapping ...map[string]string) (err error) { + return Scan(params, pointer, mapping...) +} + +// StructsTag acts as Structs but also with support for priority tag feature, which retrieves the +// specified tags for `params` key-value items to struct attribute names mapping. +// The parameter `priorityTag` supports multiple tags that can be joined with char ','. +func StructsTag(params interface{}, pointer interface{}, priorityTag string) (err error) { + return doStructs(params, pointer, nil, priorityTag) +} + +// doStructs converts any slice to given struct slice. +// +// It automatically checks and converts json string to []map if `params` is string/[]byte. +// +// The parameter `pointer` should be type of pointer to slice of struct. +// Note that if `pointer` is a pointer to another pointer of type of slice of struct, +// it will create the struct/pointer internally. +func doStructs(params interface{}, pointer interface{}, mapping map[string]string, priorityTag string) (err error) { + if params == nil { + // If `params` is nil, no conversion. + return nil + } + if pointer == nil { + return gerror.NewCode(gcode.CodeInvalidParameter, "object pointer cannot be nil") + } + + if doStructsByDirectReflectSet(params, pointer) { + return nil + } + + defer func() { + // Catch the panic, especially the reflection operation panics. + if exception := recover(); exception != nil { + if v, ok := exception.(error); ok && gerror.HasStack(v) { + err = v + } else { + err = gerror.NewCodeSkipf(gcode.CodeInternalError, 1, "%+v", exception) + } + } + }() + // If given `params` is JSON, it then uses json.Unmarshal doing the converting. + switch r := params.(type) { + case []byte: + if json.Valid(r) { + if rv, ok := pointer.(reflect.Value); ok { + if rv.Kind() == reflect.Ptr { + return json.UnmarshalUseNumber(r, rv.Interface()) + } + } else { + return json.UnmarshalUseNumber(r, pointer) + } + } + case string: + if paramsBytes := []byte(r); json.Valid(paramsBytes) { + if rv, ok := pointer.(reflect.Value); ok { + if rv.Kind() == reflect.Ptr { + return json.UnmarshalUseNumber(paramsBytes, rv.Interface()) + } + } else { + return json.UnmarshalUseNumber(paramsBytes, pointer) + } + } + } + // Pointer type check. + pointerRv, ok := pointer.(reflect.Value) + if !ok { + pointerRv = reflect.ValueOf(pointer) + if kind := pointerRv.Kind(); kind != reflect.Ptr { + return gerror.NewCodef(gcode.CodeInvalidParameter, "pointer should be type of pointer, but got: %v", kind) + } + } + // Converting `params` to map slice. + var ( + paramsList []interface{} + paramsRv = reflect.ValueOf(params) + paramsKind = paramsRv.Kind() + ) + for paramsKind == reflect.Ptr { + paramsRv = paramsRv.Elem() + paramsKind = paramsRv.Kind() + } + switch paramsKind { + case reflect.Slice, reflect.Array: + paramsList = make([]interface{}, paramsRv.Len()) + for i := 0; i < paramsRv.Len(); i++ { + paramsList[i] = paramsRv.Index(i).Interface() + } + default: + var paramsMaps = Maps(params) + paramsList = make([]interface{}, len(paramsMaps)) + for i := 0; i < len(paramsMaps); i++ { + paramsList[i] = paramsMaps[i] + } + } + // If `params` is an empty slice, no conversion. + if len(paramsList) == 0 { + return nil + } + var ( + reflectElemArray = reflect.MakeSlice(pointerRv.Type().Elem(), len(paramsList), len(paramsList)) + itemType = reflectElemArray.Index(0).Type() + itemTypeKind = itemType.Kind() + pointerRvElem = pointerRv.Elem() + pointerRvLength = pointerRvElem.Len() + ) + if itemTypeKind == reflect.Ptr { + // Pointer element. + for i := 0; i < len(paramsList); i++ { + var tempReflectValue reflect.Value + if i < pointerRvLength { + // Might be nil. + tempReflectValue = pointerRvElem.Index(i).Elem() + } + if !tempReflectValue.IsValid() { + tempReflectValue = reflect.New(itemType.Elem()).Elem() + } + if err = doStruct(paramsList[i], tempReflectValue, mapping, priorityTag); err != nil { + return err + } + reflectElemArray.Index(i).Set(tempReflectValue.Addr()) + } + } else { + // Struct element. + for i := 0; i < len(paramsList); i++ { + var tempReflectValue reflect.Value + if i < pointerRvLength { + tempReflectValue = pointerRvElem.Index(i) + } else { + tempReflectValue = reflect.New(itemType).Elem() + } + if err = doStruct(paramsList[i], tempReflectValue, mapping, priorityTag); err != nil { + return err + } + reflectElemArray.Index(i).Set(tempReflectValue) + } + } + pointerRv.Elem().Set(reflectElemArray) + return nil +} + +// doStructsByDirectReflectSet do the converting directly using reflect Set. +// It returns true if success, or else false. +func doStructsByDirectReflectSet(params interface{}, pointer interface{}) (ok bool) { + v1 := reflect.ValueOf(pointer) + v2 := reflect.ValueOf(params) + if v1.Kind() == reflect.Ptr { + if elem := v1.Elem(); elem.IsValid() && elem.Type() == v2.Type() { + elem.Set(v2) + ok = true + } + } + return ok +} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_time.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_time.go new file mode 100644 index 00000000..a5269cad --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_time.go @@ -0,0 +1,84 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gconv + +import ( + "time" + + "github.com/gogf/gf/v2/internal/utils" + "github.com/gogf/gf/v2/os/gtime" +) + +// Time converts `any` to time.Time. +func Time(any interface{}, format ...string) time.Time { + // It's already this type. + if len(format) == 0 { + if v, ok := any.(time.Time); ok { + return v + } + } + if t := GTime(any, format...); t != nil { + return t.Time + } + return time.Time{} +} + +// Duration converts `any` to time.Duration. +// If `any` is string, then it uses time.ParseDuration to convert it. +// If `any` is numeric, then it converts `any` as nanoseconds. +func Duration(any interface{}) time.Duration { + // It's already this type. + if v, ok := any.(time.Duration); ok { + return v + } + s := String(any) + if !utils.IsNumeric(s) { + d, _ := gtime.ParseDuration(s) + return d + } + return time.Duration(Int64(any)) +} + +// GTime converts `any` to *gtime.Time. +// The parameter `format` can be used to specify the format of `any`. +// If no `format` given, it converts `any` using gtime.NewFromTimeStamp if `any` is numeric, +// or using gtime.StrToTime if `any` is string. +func GTime(any interface{}, format ...string) *gtime.Time { + if any == nil { + return nil + } + if v, ok := any.(iGTime); ok { + return v.GTime(format...) + } + // It's already this type. + if len(format) == 0 { + if v, ok := any.(*gtime.Time); ok { + return v + } + if t, ok := any.(time.Time); ok { + return gtime.New(t) + } + if t, ok := any.(*time.Time); ok { + return gtime.New(t) + } + } + s := String(any) + if len(s) == 0 { + return gtime.New() + } + // Priority conversion using given format. + if len(format) > 0 { + t, _ := gtime.StrToTimeFormat(s, format[0]) + return t + } + if utils.IsNumeric(s) { + return gtime.NewFromTimeStamp(Int64(s)) + } else { + t, _ := gtime.StrToTime(s) + return t + } +} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_uint.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_uint.go new file mode 100644 index 00000000..028a14de --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_uint.go @@ -0,0 +1,119 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gconv + +import ( + "math" + "strconv" + + "github.com/gogf/gf/v2/encoding/gbinary" +) + +// Uint converts `any` to uint. +func Uint(any interface{}) uint { + if any == nil { + return 0 + } + if v, ok := any.(uint); ok { + return v + } + return uint(Uint64(any)) +} + +// Uint8 converts `any` to uint8. +func Uint8(any interface{}) uint8 { + if any == nil { + return 0 + } + if v, ok := any.(uint8); ok { + return v + } + return uint8(Uint64(any)) +} + +// Uint16 converts `any` to uint16. +func Uint16(any interface{}) uint16 { + if any == nil { + return 0 + } + if v, ok := any.(uint16); ok { + return v + } + return uint16(Uint64(any)) +} + +// Uint32 converts `any` to uint32. +func Uint32(any interface{}) uint32 { + if any == nil { + return 0 + } + if v, ok := any.(uint32); ok { + return v + } + return uint32(Uint64(any)) +} + +// Uint64 converts `any` to uint64. +func Uint64(any interface{}) uint64 { + if any == nil { + return 0 + } + switch value := any.(type) { + case int: + return uint64(value) + case int8: + return uint64(value) + case int16: + return uint64(value) + case int32: + return uint64(value) + case int64: + return uint64(value) + case uint: + return uint64(value) + case uint8: + return uint64(value) + case uint16: + return uint64(value) + case uint32: + return uint64(value) + case uint64: + return value + case float32: + return uint64(value) + case float64: + return uint64(value) + case bool: + if value { + return 1 + } + return 0 + case []byte: + return gbinary.DecodeToUint64(value) + default: + if f, ok := value.(iUint64); ok { + return f.Uint64() + } + s := String(value) + // Hexadecimal + if len(s) > 2 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X') { + if v, e := strconv.ParseUint(s[2:], 16, 64); e == nil { + return v + } + } + // Decimal + if v, e := strconv.ParseUint(s, 10, 64); e == nil { + return v + } + // Float64 + if valueFloat64 := Float64(value); math.IsNaN(valueFloat64) { + return 0 + } else { + return uint64(valueFloat64) + } + } +} diff --git a/vendor/github.com/gogf/gf/v2/util/gconv/gconv_unsafe.go b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_unsafe.go new file mode 100644 index 00000000..e4b24fdf --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/util/gconv/gconv_unsafe.go @@ -0,0 +1,23 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gconv + +import "unsafe" + +// UnsafeStrToBytes converts string to []byte without memory copy. +// Note that, if you completely sure you will never use `s` variable in the feature, +// you can use this unsafe function to implement type conversion in high performance. +func UnsafeStrToBytes(s string) []byte { + return *(*[]byte)(unsafe.Pointer(&s)) +} + +// UnsafeBytesToStr converts []byte to string without memory copy. +// Note that, if you completely sure you will never use `b` variable in the feature, +// you can use this unsafe function to implement type conversion in high performance. +func UnsafeBytesToStr(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} diff --git a/vendor/github.com/gogf/gf/v2/util/grand/grand.go b/vendor/github.com/gogf/gf/v2/util/grand/grand.go new file mode 100644 index 00000000..90fd93e6 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/util/grand/grand.go @@ -0,0 +1,195 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package grand provides high performance random bytes/number/string generation functionality. +package grand + +import ( + "encoding/binary" + "time" +) + +var ( + letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" // 52 + symbols = "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~" // 32 + digits = "0123456789" // 10 + characters = letters + digits + symbols // 94 +) + +// Intn returns an int number which is between 0 and max: [0, max). +// +// Note that: +// 1. The `max` can only be greater than 0, or else it returns `max` directly; +// 2. The result is greater than or equal to 0, but less than `max`; +// 3. The result number is 32bit and less than math.MaxUint32. +func Intn(max int) int { + if max <= 0 { + return max + } + n := int(binary.LittleEndian.Uint32(<-bufferChan)) % max + if (max > 0 && n < 0) || (max < 0 && n > 0) { + return -n + } + return n +} + +// B retrieves and returns random bytes of given length `n`. +func B(n int) []byte { + if n <= 0 { + return nil + } + i := 0 + b := make([]byte, n) + for { + copy(b[i:], <-bufferChan) + i += 4 + if i >= n { + break + } + } + return b +} + +// N returns a random int between min and max: [min, max]. +// The `min` and `max` also support negative numbers. +func N(min, max int) int { + if min >= max { + return min + } + if min >= 0 { + return Intn(max-min+1) + min + } + // As `Intn` dose not support negative number, + // so we should first shift the value to right, + // then call `Intn` to produce the random number, + // and finally shift the result back to left. + return Intn(max+(0-min)+1) - (0 - min) +} + +// S returns a random string which contains digits and letters, and its length is `n`. +// The optional parameter `symbols` specifies whether the result could contain symbols, +// which is false in default. +func S(n int, symbols ...bool) string { + if n <= 0 { + return "" + } + var ( + b = make([]byte, n) + numberBytes = B(n) + ) + for i := range b { + if len(symbols) > 0 && symbols[0] { + b[i] = characters[numberBytes[i]%94] + } else { + b[i] = characters[numberBytes[i]%62] + } + } + return string(b) +} + +// D returns a random time.Duration between min and max: [min, max]. +func D(min, max time.Duration) time.Duration { + multiple := int64(1) + if min != 0 { + for min%10 == 0 { + multiple *= 10 + min /= 10 + max /= 10 + } + } + n := int64(N(int(min), int(max))) + return time.Duration(n * multiple) +} + +// Str randomly picks and returns `n` count of chars from given string `s`. +// It also supports unicode string like Chinese/Russian/Japanese, etc. +func Str(s string, n int) string { + if n <= 0 { + return "" + } + var ( + b = make([]rune, n) + runes = []rune(s) + ) + if len(runes) <= 255 { + numberBytes := B(n) + for i := range b { + b[i] = runes[int(numberBytes[i])%len(runes)] + } + } else { + for i := range b { + b[i] = runes[Intn(len(runes))] + } + } + return string(b) +} + +// Digits returns a random string which contains only digits, and its length is `n`. +func Digits(n int) string { + if n <= 0 { + return "" + } + var ( + b = make([]byte, n) + numberBytes = B(n) + ) + for i := range b { + b[i] = digits[numberBytes[i]%10] + } + return string(b) +} + +// Letters returns a random string which contains only letters, and its length is `n`. +func Letters(n int) string { + if n <= 0 { + return "" + } + var ( + b = make([]byte, n) + numberBytes = B(n) + ) + for i := range b { + b[i] = letters[numberBytes[i]%52] + } + return string(b) +} + +// Symbols returns a random string which contains only symbols, and its length is `n`. +func Symbols(n int) string { + if n <= 0 { + return "" + } + var ( + b = make([]byte, n) + numberBytes = B(n) + ) + for i := range b { + b[i] = symbols[numberBytes[i]%32] + } + return string(b) +} + +// Perm returns, as a slice of n int numbers, a pseudo-random permutation of the integers [0,n). +// TODO performance improving for large slice producing. +func Perm(n int) []int { + m := make([]int, n) + for i := 0; i < n; i++ { + j := Intn(i + 1) + m[i] = m[j] + m[j] = i + } + return m +} + +// Meet randomly calculate whether the given probability `num`/`total` is met. +func Meet(num, total int) bool { + return Intn(total) < num +} + +// MeetProb randomly calculate whether the given probability is met. +func MeetProb(prob float32) bool { + return Intn(1e7) < int(prob*1e7) +} diff --git a/vendor/github.com/gogf/gf/v2/util/grand/grand_buffer.go b/vendor/github.com/gogf/gf/v2/util/grand/grand_buffer.go new file mode 100644 index 00000000..4527c25a --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/util/grand/grand_buffer.go @@ -0,0 +1,53 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package grand + +import ( + "crypto/rand" + + "github.com/gogf/gf/v2/errors/gcode" + "github.com/gogf/gf/v2/errors/gerror" +) + +const ( + // Buffer size for uint32 random number. + bufferChanSize = 10000 +) + +var ( + // bufferChan is the buffer for random bytes, + // every item storing 4 bytes. + bufferChan = make(chan []byte, bufferChanSize) +) + +func init() { + go asyncProducingRandomBufferBytesLoop() +} + +// asyncProducingRandomBufferBytes is a named goroutine, which uses an asynchronous goroutine +// to produce the random bytes, and a buffer chan to store the random bytes. +// So it has high performance to generate random numbers. +func asyncProducingRandomBufferBytesLoop() { + var step int + for { + buffer := make([]byte, 1024) + if n, err := rand.Read(buffer); err != nil { + panic(gerror.WrapCode(gcode.CodeInternalError, err, `error reading random buffer from system`)) + } else { + // The random buffer from system is very expensive, + // so fully reuse the random buffer by changing + // the step with a different number can + // improve the performance a lot. + // for _, step = range []int{4, 5, 6, 7} { + for _, step = range []int{4} { + for i := 0; i <= n-4; i += step { + bufferChan <- buffer[i : i+4] + } + } + } + } +} diff --git a/vendor/github.com/gogf/gf/v2/util/gtag/gtag.go b/vendor/github.com/gogf/gf/v2/util/gtag/gtag.go new file mode 100644 index 00000000..656e39d9 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/util/gtag/gtag.go @@ -0,0 +1,48 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package gtag providing tag content storing for struct. +// +// Note that calling functions of this package is not concurrently safe, +// which means you cannot call them in runtime but in boot procedure. +package gtag + +const ( + Default = "default" // Default value tag of struct field for receiving parameters from HTTP request. + DefaultShort = "d" // Short name of Default. + Param = "param" // Parameter name for converting certain parameter to specified struct field. + ParamShort = "p" // Short name of Param. + Valid = "valid" // Validation rule tag for struct of field. + ValidShort = "v" // Short name of Valid. + NoValidation = "nv" // No validation for specified struct/field. + ORM = "orm" // ORM tag for ORM feature, which performs different features according scenarios. + Arg = "arg" // Arg tag for struct, usually for command argument option. + Brief = "brief" // Brief tag for struct, usually be considered as summary. + Root = "root" // Root tag for struct, usually for nested commands management. + Additional = "additional" // Additional tag for struct, usually for additional description of command. + AdditionalShort = "ad" // Short name of Additional. + Path = `path` // Route path for HTTP request. + Method = `method` // Route method for HTTP request. + Domain = `domain` // Route domain for HTTP request. + Mime = `mime` // MIME type for HTTP request/response. + Consumes = `consumes` // MIME type for HTTP request. + Summary = `summary` // Summary for struct, usually for OpenAPI in request struct. + SummaryShort = `sm` // Short name of Summary. + SummaryShort2 = `sum` // Short name of Summary. + Description = `description` // Description for struct, usually for OpenAPI in request struct. + DescriptionShort = `dc` // Short name of Description. + DescriptionShort2 = `des` // Short name of Description. + Example = `example` // Example for struct, usually for OpenAPI in request struct. + ExampleShort = `eg` // Short name of Example. + Examples = `examples` // Examples for struct, usually for OpenAPI in request struct. + ExamplesShort = `egs` // Short name of Examples. + ExternalDocs = `externalDocs` // External docs for struct, always for OpenAPI in request struct. + ExternalDocsShort = `ed` // Short name of ExternalDocs. + GConv = "gconv" // GConv defines the converting target name for specified struct field. + GConvShort = "c" // GConv defines the converting target name for specified struct field. + Json = "json" // Json tag is supported by stdlib. + Security = "security" // Security defines scheme for authentication. Detail to see https://swagger.io/docs/specification/authentication/ +) diff --git a/vendor/github.com/gogf/gf/v2/util/gtag/gtag_func.go b/vendor/github.com/gogf/gf/v2/util/gtag/gtag_func.go new file mode 100644 index 00000000..5085c788 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/util/gtag/gtag_func.go @@ -0,0 +1,65 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gtag + +import ( + "regexp" + + "github.com/gogf/gf/v2/errors/gerror" +) + +var ( + data = make(map[string]string) + regex = regexp.MustCompile(`\{(.+?)\}`) +) + +// Set sets tag content for specified name. +// Note that it panics if `name` already exists. +func Set(name, value string) { + if _, ok := data[name]; ok { + panic(gerror.Newf(`value for tag name "%s" already exists`, name)) + } + data[name] = value +} + +// SetOver performs as Set, but it overwrites the old value if `name` already exists. +func SetOver(name, value string) { + data[name] = value +} + +// Sets sets multiple tag content by map. +func Sets(m map[string]string) { + for k, v := range m { + Set(k, v) + } +} + +// SetsOver performs as Sets, but it overwrites the old value if `name` already exists. +func SetsOver(m map[string]string) { + for k, v := range m { + SetOver(k, v) + } +} + +// Get retrieves and returns the stored tag content for specified name. +func Get(name string) string { + return data[name] +} + +// Parse parses and returns the content by replacing all tag name variable to +// its content for given `content`. +// Eg: +// gtag.Set("demo", "content") +// Parse(`This is {demo}`) -> `This is content`. +func Parse(content string) string { + return regex.ReplaceAllStringFunc(content, func(s string) string { + if v, ok := data[s[1:len(s)-1]]; ok { + return v + } + return s + }) +} diff --git a/vendor/github.com/gogf/gf/v2/util/gutil/gutil.go b/vendor/github.com/gogf/gf/v2/util/gutil/gutil.go new file mode 100644 index 00000000..b6b76efb --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/util/gutil/gutil.go @@ -0,0 +1,159 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +// Package gutil provides utility functions. +package gutil + +import ( + "context" + "reflect" + + "github.com/gogf/gf/v2/errors/gerror" + "github.com/gogf/gf/v2/internal/empty" + "github.com/gogf/gf/v2/util/gconv" +) + +const ( + dumpIndent = ` ` +) + +// Throw throws out an exception, which can be caught be TryCatch or recover. +func Throw(exception interface{}) { + panic(exception) +} + +// Try implements try... logistics using internal panic...recover. +// It returns error if any exception occurs, or else it returns nil. +func Try(ctx context.Context, try func(ctx context.Context)) (err error) { + defer func() { + if exception := recover(); exception != nil { + if v, ok := exception.(error); ok && gerror.HasStack(v) { + err = v + } else { + err = gerror.Newf(`%+v`, exception) + } + } + }() + try(ctx) + return +} + +// TryCatch implements try...catch... logistics using internal panic...recover. +// It automatically calls function `catch` if any exception occurs and passes the exception as an error. +func TryCatch(ctx context.Context, try func(ctx context.Context), catch ...func(ctx context.Context, exception error)) { + defer func() { + if exception := recover(); exception != nil && len(catch) > 0 { + if v, ok := exception.(error); ok && gerror.HasStack(v) { + catch[0](ctx, v) + } else { + catch[0](ctx, gerror.Newf(`%+v`, exception)) + } + } + }() + try(ctx) +} + +// IsEmpty checks given `value` empty or not. +// It returns false if `value` is: integer(0), bool(false), slice/map(len=0), nil; +// or else returns true. +func IsEmpty(value interface{}) bool { + return empty.IsEmpty(value) +} + +// Keys retrieves and returns the keys from given map or struct. +func Keys(mapOrStruct interface{}) (keysOrAttrs []string) { + keysOrAttrs = make([]string, 0) + if m, ok := mapOrStruct.(map[string]interface{}); ok { + for k := range m { + keysOrAttrs = append(keysOrAttrs, k) + } + return + } + var ( + reflectValue reflect.Value + reflectKind reflect.Kind + ) + if v, ok := mapOrStruct.(reflect.Value); ok { + reflectValue = v + } else { + reflectValue = reflect.ValueOf(mapOrStruct) + } + reflectKind = reflectValue.Kind() + for reflectKind == reflect.Ptr { + if !reflectValue.IsValid() || reflectValue.IsNil() { + reflectValue = reflect.New(reflectValue.Type().Elem()).Elem() + reflectKind = reflectValue.Kind() + } else { + reflectValue = reflectValue.Elem() + reflectKind = reflectValue.Kind() + } + } + switch reflectKind { + case reflect.Map: + for _, k := range reflectValue.MapKeys() { + keysOrAttrs = append(keysOrAttrs, gconv.String(k.Interface())) + } + case reflect.Struct: + var ( + fieldType reflect.StructField + reflectType = reflectValue.Type() + ) + for i := 0; i < reflectValue.NumField(); i++ { + fieldType = reflectType.Field(i) + if fieldType.Anonymous { + keysOrAttrs = append(keysOrAttrs, Keys(reflectValue.Field(i))...) + } else { + keysOrAttrs = append(keysOrAttrs, fieldType.Name) + } + } + } + return +} + +// Values retrieves and returns the values from given map or struct. +func Values(mapOrStruct interface{}) (values []interface{}) { + values = make([]interface{}, 0) + if m, ok := mapOrStruct.(map[string]interface{}); ok { + for _, v := range m { + values = append(values, v) + } + return + } + var ( + reflectValue reflect.Value + reflectKind reflect.Kind + ) + if v, ok := mapOrStruct.(reflect.Value); ok { + reflectValue = v + } else { + reflectValue = reflect.ValueOf(mapOrStruct) + } + reflectKind = reflectValue.Kind() + for reflectKind == reflect.Ptr { + reflectValue = reflectValue.Elem() + reflectKind = reflectValue.Kind() + } + switch reflectKind { + case reflect.Map: + for _, k := range reflectValue.MapKeys() { + values = append(values, reflectValue.MapIndex(k).Interface()) + } + case reflect.Struct: + var ( + fieldType reflect.StructField + reflectType = reflectValue.Type() + ) + for i := 0; i < reflectValue.NumField(); i++ { + fieldType = reflectType.Field(i) + if fieldType.Anonymous { + values = append(values, Values(reflectValue.Field(i))...) + } else { + values = append(values, reflectValue.Field(i).Interface()) + } + } + } + return +} diff --git a/vendor/github.com/gogf/gf/v2/util/gutil/gutil_comparator.go b/vendor/github.com/gogf/gf/v2/util/gutil/gutil_comparator.go new file mode 100644 index 00000000..c454cf92 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/util/gutil/gutil_comparator.go @@ -0,0 +1,127 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gutil + +import ( + "strings" + + "github.com/gogf/gf/v2/util/gconv" +) + +// Comparator is a function that compare a and b, and returns the result as int. +// +// Should return a number: +// +// negative , if a < b +// zero , if a == b +// positive , if a > b +type Comparator func(a, b interface{}) int + +// ComparatorString provides a fast comparison on strings. +func ComparatorString(a, b interface{}) int { + return strings.Compare(gconv.String(a), gconv.String(b)) +} + +// ComparatorInt provides a basic comparison on int. +func ComparatorInt(a, b interface{}) int { + return gconv.Int(a) - gconv.Int(b) +} + +// ComparatorInt8 provides a basic comparison on int8. +func ComparatorInt8(a, b interface{}) int { + return int(gconv.Int8(a) - gconv.Int8(b)) +} + +// ComparatorInt16 provides a basic comparison on int16. +func ComparatorInt16(a, b interface{}) int { + return int(gconv.Int16(a) - gconv.Int16(b)) +} + +// ComparatorInt32 provides a basic comparison on int32. +func ComparatorInt32(a, b interface{}) int { + return int(gconv.Int32(a) - gconv.Int32(b)) +} + +// ComparatorInt64 provides a basic comparison on int64. +func ComparatorInt64(a, b interface{}) int { + return int(gconv.Int64(a) - gconv.Int64(b)) +} + +// ComparatorUint provides a basic comparison on uint. +func ComparatorUint(a, b interface{}) int { + return int(gconv.Uint(a) - gconv.Uint(b)) +} + +// ComparatorUint8 provides a basic comparison on uint8. +func ComparatorUint8(a, b interface{}) int { + return int(gconv.Uint8(a) - gconv.Uint8(b)) +} + +// ComparatorUint16 provides a basic comparison on uint16. +func ComparatorUint16(a, b interface{}) int { + return int(gconv.Uint16(a) - gconv.Uint16(b)) +} + +// ComparatorUint32 provides a basic comparison on uint32. +func ComparatorUint32(a, b interface{}) int { + return int(gconv.Uint32(a) - gconv.Uint32(b)) +} + +// ComparatorUint64 provides a basic comparison on uint64. +func ComparatorUint64(a, b interface{}) int { + return int(gconv.Uint64(a) - gconv.Uint64(b)) +} + +// ComparatorFloat32 provides a basic comparison on float32. +func ComparatorFloat32(a, b interface{}) int { + aFloat := gconv.Float32(a) + bFloat := gconv.Float32(b) + if aFloat == bFloat { + return 0 + } + if aFloat > bFloat { + return 1 + } + return -1 +} + +// ComparatorFloat64 provides a basic comparison on float64. +func ComparatorFloat64(a, b interface{}) int { + aFloat := gconv.Float64(a) + bFloat := gconv.Float64(b) + if aFloat == bFloat { + return 0 + } + if aFloat > bFloat { + return 1 + } + return -1 +} + +// ComparatorByte provides a basic comparison on byte. +func ComparatorByte(a, b interface{}) int { + return int(gconv.Byte(a) - gconv.Byte(b)) +} + +// ComparatorRune provides a basic comparison on rune. +func ComparatorRune(a, b interface{}) int { + return int(gconv.Rune(a) - gconv.Rune(b)) +} + +// ComparatorTime provides a basic comparison on time.Time. +func ComparatorTime(a, b interface{}) int { + aTime := gconv.Time(a) + bTime := gconv.Time(b) + switch { + case aTime.After(bTime): + return 1 + case aTime.Before(bTime): + return -1 + default: + return 0 + } +} diff --git a/vendor/github.com/gogf/gf/v2/util/gutil/gutil_copy.go b/vendor/github.com/gogf/gf/v2/util/gutil/gutil_copy.go new file mode 100644 index 00000000..24398b38 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/util/gutil/gutil_copy.go @@ -0,0 +1,20 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gutil + +import ( + "github.com/gogf/gf/v2/internal/deepcopy" +) + +// Copy returns a deep copy of v. +// +// Copy is unable to copy unexported fields in a struct (lowercase field names). +// Unexported fields can't be reflected by the Go runtime and therefore +// they can't perform any data copies. +func Copy(src interface{}) (dst interface{}) { + return deepcopy.Copy(src) +} diff --git a/vendor/github.com/gogf/gf/v2/util/gutil/gutil_default.go b/vendor/github.com/gogf/gf/v2/util/gutil/gutil_default.go new file mode 100644 index 00000000..881b0374 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/util/gutil/gutil_default.go @@ -0,0 +1,27 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gutil + +// GetOrDefaultStr checks and returns value according whether parameter `param` available. +// It returns `param[0]` if it is available, or else it returns `def`. +func GetOrDefaultStr(def string, param ...string) string { + value := def + if len(param) > 0 && param[0] != "" { + value = param[0] + } + return value +} + +// GetOrDefaultAny checks and returns value according whether parameter `param` available. +// It returns `param[0]` if it is available, or else it returns `def`. +func GetOrDefaultAny(def interface{}, param ...interface{}) interface{} { + value := def + if len(param) > 0 && param[0] != "" { + value = param[0] + } + return value +} diff --git a/vendor/github.com/gogf/gf/v2/util/gutil/gutil_dump.go b/vendor/github.com/gogf/gf/v2/util/gutil/gutil_dump.go new file mode 100644 index 00000000..48bb6308 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/util/gutil/gutil_dump.go @@ -0,0 +1,471 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gutil + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" + + "github.com/gogf/gf/v2/internal/reflection" + "github.com/gogf/gf/v2/os/gstructs" + "github.com/gogf/gf/v2/text/gstr" +) + +// iString is used for type assert api for String(). +type iString interface { + String() string +} + +// iError is used for type assert api for Error(). +type iError interface { + Error() string +} + +// iMarshalJSON is the interface for custom Json marshaling. +type iMarshalJSON interface { + MarshalJSON() ([]byte, error) +} + +// DumpOption specifies the behavior of function Export. +type DumpOption struct { + WithType bool // WithType specifies dumping content with type information. + ExportedOnly bool // Only dump Exported fields for structs. +} + +// Dump prints variables `values` to stdout with more manually readable. +func Dump(values ...interface{}) { + for _, value := range values { + DumpWithOption(value, DumpOption{ + WithType: false, + ExportedOnly: false, + }) + } +} + +// DumpWithType acts like Dump, but with type information. +// Also see Dump. +func DumpWithType(values ...interface{}) { + for _, value := range values { + DumpWithOption(value, DumpOption{ + WithType: true, + ExportedOnly: false, + }) + } +} + +// DumpWithOption returns variables `values` as a string with more manually readable. +func DumpWithOption(value interface{}, option DumpOption) { + buffer := bytes.NewBuffer(nil) + DumpTo(buffer, value, DumpOption{ + WithType: option.WithType, + ExportedOnly: option.ExportedOnly, + }) + fmt.Println(buffer.String()) +} + +// DumpTo writes variables `values` as a string in to `writer` with more manually readable +func DumpTo(writer io.Writer, value interface{}, option DumpOption) { + buffer := bytes.NewBuffer(nil) + doDump(value, "", buffer, doDumpOption{ + WithType: option.WithType, + ExportedOnly: option.ExportedOnly, + }) + _, _ = writer.Write(buffer.Bytes()) +} + +type doDumpOption struct { + WithType bool + ExportedOnly bool + DumpedPointerSet map[string]struct{} +} + +func doDump(value interface{}, indent string, buffer *bytes.Buffer, option doDumpOption) { + if option.DumpedPointerSet == nil { + option.DumpedPointerSet = map[string]struct{}{} + } + + if value == nil { + buffer.WriteString(``) + return + } + var reflectValue reflect.Value + if v, ok := value.(reflect.Value); ok { + reflectValue = v + if v.IsValid() && v.CanInterface() { + value = v.Interface() + } else { + if convertedValue, ok := reflection.ValueToInterface(v); ok { + value = convertedValue + } + } + } else { + reflectValue = reflect.ValueOf(value) + } + // Double check nil value. + if value == nil { + buffer.WriteString(``) + return + } + var ( + reflectKind = reflectValue.Kind() + reflectTypeName = reflectValue.Type().String() + ptrAddress string + newIndent = indent + dumpIndent + ) + reflectTypeName = strings.ReplaceAll(reflectTypeName, `[]uint8`, `[]byte`) + for reflectKind == reflect.Ptr { + if ptrAddress == "" { + ptrAddress = fmt.Sprintf(`0x%x`, reflectValue.Pointer()) + } + reflectValue = reflectValue.Elem() + reflectKind = reflectValue.Kind() + } + var ( + exportInternalInput = doDumpInternalInput{ + Value: value, + Indent: indent, + NewIndent: newIndent, + Buffer: buffer, + Option: option, + PtrAddress: ptrAddress, + ReflectValue: reflectValue, + ReflectTypeName: reflectTypeName, + ExportedOnly: option.ExportedOnly, + DumpedPointerSet: option.DumpedPointerSet, + } + ) + switch reflectKind { + case reflect.Slice, reflect.Array: + doDumpSlice(exportInternalInput) + + case reflect.Map: + doDumpMap(exportInternalInput) + + case reflect.Struct: + doDumpStruct(exportInternalInput) + + case reflect.String: + doDumpString(exportInternalInput) + + case reflect.Bool: + doDumpBool(exportInternalInput) + + case + reflect.Int, + reflect.Int8, + reflect.Int16, + reflect.Int32, + reflect.Int64, + reflect.Uint, + reflect.Uint8, + reflect.Uint16, + reflect.Uint32, + reflect.Uint64, + reflect.Float32, + reflect.Float64, + reflect.Complex64, + reflect.Complex128: + doDumpNumber(exportInternalInput) + + case reflect.Chan: + buffer.WriteString(fmt.Sprintf(`<%s>`, reflectValue.Type().String())) + + case reflect.Func: + if reflectValue.IsNil() || !reflectValue.IsValid() { + buffer.WriteString(``) + } else { + buffer.WriteString(fmt.Sprintf(`<%s>`, reflectValue.Type().String())) + } + + case reflect.Interface: + doDump(exportInternalInput.ReflectValue.Elem(), indent, buffer, option) + + default: + doDumpDefault(exportInternalInput) + } +} + +type doDumpInternalInput struct { + Value interface{} + Indent string + NewIndent string + Buffer *bytes.Buffer + Option doDumpOption + ReflectValue reflect.Value + ReflectTypeName string + PtrAddress string + ExportedOnly bool + DumpedPointerSet map[string]struct{} +} + +func doDumpSlice(in doDumpInternalInput) { + if b, ok := in.Value.([]byte); ok { + if !in.Option.WithType { + in.Buffer.WriteString(fmt.Sprintf(`"%s"`, addSlashesForString(string(b)))) + } else { + in.Buffer.WriteString(fmt.Sprintf( + `%s(%d) "%s"`, + in.ReflectTypeName, + len(string(b)), + string(b), + )) + } + return + } + if in.ReflectValue.Len() == 0 { + if !in.Option.WithType { + in.Buffer.WriteString("[]") + } else { + in.Buffer.WriteString(fmt.Sprintf("%s(0) []", in.ReflectTypeName)) + } + return + } + if !in.Option.WithType { + in.Buffer.WriteString("[\n") + } else { + in.Buffer.WriteString(fmt.Sprintf("%s(%d) [\n", in.ReflectTypeName, in.ReflectValue.Len())) + } + for i := 0; i < in.ReflectValue.Len(); i++ { + in.Buffer.WriteString(in.NewIndent) + doDump(in.ReflectValue.Index(i), in.NewIndent, in.Buffer, in.Option) + in.Buffer.WriteString(",\n") + } + in.Buffer.WriteString(fmt.Sprintf("%s]", in.Indent)) +} + +func doDumpMap(in doDumpInternalInput) { + var mapKeys = make([]reflect.Value, 0) + for _, key := range in.ReflectValue.MapKeys() { + if !key.CanInterface() { + continue + } + mapKey := key + mapKeys = append(mapKeys, mapKey) + } + if len(mapKeys) == 0 { + if !in.Option.WithType { + in.Buffer.WriteString("{}") + } else { + in.Buffer.WriteString(fmt.Sprintf("%s(0) {}", in.ReflectTypeName)) + } + return + } + var ( + maxSpaceNum = 0 + tmpSpaceNum = 0 + mapKeyStr = "" + ) + for _, key := range mapKeys { + tmpSpaceNum = len(fmt.Sprintf(`%v`, key.Interface())) + if tmpSpaceNum > maxSpaceNum { + maxSpaceNum = tmpSpaceNum + } + } + if !in.Option.WithType { + in.Buffer.WriteString("{\n") + } else { + in.Buffer.WriteString(fmt.Sprintf("%s(%d) {\n", in.ReflectTypeName, len(mapKeys))) + } + for _, mapKey := range mapKeys { + tmpSpaceNum = len(fmt.Sprintf(`%v`, mapKey.Interface())) + if mapKey.Kind() == reflect.String { + mapKeyStr = fmt.Sprintf(`"%v"`, mapKey.Interface()) + } else { + mapKeyStr = fmt.Sprintf(`%v`, mapKey.Interface()) + } + // Map key and indent string dump. + if !in.Option.WithType { + in.Buffer.WriteString(fmt.Sprintf( + "%s%v:%s", + in.NewIndent, + mapKeyStr, + strings.Repeat(" ", maxSpaceNum-tmpSpaceNum+1), + )) + } else { + in.Buffer.WriteString(fmt.Sprintf( + "%s%s(%v):%s", + in.NewIndent, + mapKey.Type().String(), + mapKeyStr, + strings.Repeat(" ", maxSpaceNum-tmpSpaceNum+1), + )) + } + // Map value dump. + doDump(in.ReflectValue.MapIndex(mapKey), in.NewIndent, in.Buffer, in.Option) + in.Buffer.WriteString(",\n") + } + in.Buffer.WriteString(fmt.Sprintf("%s}", in.Indent)) +} + +func doDumpStruct(in doDumpInternalInput) { + if in.PtrAddress != "" { + if _, ok := in.DumpedPointerSet[in.PtrAddress]; ok { + in.Buffer.WriteString(fmt.Sprintf(``, in.PtrAddress)) + return + } + } + in.DumpedPointerSet[in.PtrAddress] = struct{}{} + + structFields, _ := gstructs.Fields(gstructs.FieldsInput{ + Pointer: in.Value, + RecursiveOption: gstructs.RecursiveOptionEmbedded, + }) + var ( + hasNoExportedFields = true + _, isReflectValue = in.Value.(reflect.Value) + ) + for _, field := range structFields { + if field.IsExported() { + hasNoExportedFields = false + break + } + } + if !isReflectValue && (len(structFields) == 0 || hasNoExportedFields) { + var ( + structContentStr = "" + attributeCountStr = "0" + ) + if v, ok := in.Value.(iString); ok { + structContentStr = v.String() + } else if v, ok := in.Value.(iError); ok { + structContentStr = v.Error() + } else if v, ok := in.Value.(iMarshalJSON); ok { + b, _ := v.MarshalJSON() + structContentStr = string(b) + } else { + // Has no common interface implements. + if len(structFields) != 0 { + goto dumpStructFields + } + } + if structContentStr == "" { + structContentStr = "{}" + } else { + structContentStr = fmt.Sprintf(`"%s"`, addSlashesForString(structContentStr)) + attributeCountStr = fmt.Sprintf(`%d`, len(structContentStr)-2) + } + if !in.Option.WithType { + in.Buffer.WriteString(structContentStr) + } else { + in.Buffer.WriteString(fmt.Sprintf( + "%s(%s) %s", + in.ReflectTypeName, + attributeCountStr, + structContentStr, + )) + } + return + } + +dumpStructFields: + var ( + maxSpaceNum = 0 + tmpSpaceNum = 0 + ) + for _, field := range structFields { + if in.ExportedOnly && !field.IsExported() { + continue + } + tmpSpaceNum = len(field.Name()) + if tmpSpaceNum > maxSpaceNum { + maxSpaceNum = tmpSpaceNum + } + } + if !in.Option.WithType { + in.Buffer.WriteString("{\n") + } else { + in.Buffer.WriteString(fmt.Sprintf("%s(%d) {\n", in.ReflectTypeName, len(structFields))) + } + for _, field := range structFields { + if in.ExportedOnly && !field.IsExported() { + continue + } + tmpSpaceNum = len(fmt.Sprintf(`%v`, field.Name())) + in.Buffer.WriteString(fmt.Sprintf( + "%s%s:%s", + in.NewIndent, + field.Name(), + strings.Repeat(" ", maxSpaceNum-tmpSpaceNum+1), + )) + doDump(field.Value, in.NewIndent, in.Buffer, in.Option) + in.Buffer.WriteString(",\n") + } + in.Buffer.WriteString(fmt.Sprintf("%s}", in.Indent)) +} + +func doDumpNumber(in doDumpInternalInput) { + if v, ok := in.Value.(iString); ok { + s := v.String() + if !in.Option.WithType { + in.Buffer.WriteString(fmt.Sprintf(`"%v"`, addSlashesForString(s))) + } else { + in.Buffer.WriteString(fmt.Sprintf( + `%s(%d) "%v"`, + in.ReflectTypeName, + len(s), + addSlashesForString(s), + )) + } + } else { + doDumpDefault(in) + } +} + +func doDumpString(in doDumpInternalInput) { + s := in.ReflectValue.String() + if !in.Option.WithType { + in.Buffer.WriteString(fmt.Sprintf(`"%v"`, addSlashesForString(s))) + } else { + in.Buffer.WriteString(fmt.Sprintf( + `%s(%d) "%v"`, + in.ReflectTypeName, + len(s), + addSlashesForString(s), + )) + } +} + +func doDumpBool(in doDumpInternalInput) { + var s string + if in.ReflectValue.Bool() { + s = `true` + } else { + s = `false` + } + if in.Option.WithType { + s = fmt.Sprintf(`bool(%s)`, s) + } + in.Buffer.WriteString(s) +} + +func doDumpDefault(in doDumpInternalInput) { + var s string + if in.ReflectValue.IsValid() && in.ReflectValue.CanInterface() { + s = fmt.Sprintf("%v", in.ReflectValue.Interface()) + } + if s == "" { + s = fmt.Sprintf("%v", in.Value) + } + s = gstr.Trim(s, `<>`) + if !in.Option.WithType { + in.Buffer.WriteString(s) + } else { + in.Buffer.WriteString(fmt.Sprintf("%s(%s)", in.ReflectTypeName, s)) + } +} + +func addSlashesForString(s string) string { + return gstr.ReplaceByMap(s, map[string]string{ + `"`: `\"`, + "\r": `\r`, + "\t": `\t`, + "\n": `\n`, + }) +} diff --git a/vendor/github.com/gogf/gf/v2/util/gutil/gutil_list.go b/vendor/github.com/gogf/gf/v2/util/gutil/gutil_list.go new file mode 100644 index 00000000..9a60a318 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/util/gutil/gutil_list.go @@ -0,0 +1,140 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gutil + +import ( + "reflect" + + "github.com/gogf/gf/v2/internal/utils" +) + +// ListItemValues retrieves and returns the elements of all item struct/map with key `key`. +// Note that the parameter `list` should be type of slice which contains elements of map or struct, +// or else it returns an empty slice. +// +// The parameter `list` supports types like: +// []map[string]interface{} +// []map[string]sub-map +// []struct +// []struct:sub-struct +// Note that the sub-map/sub-struct makes sense only if the optional parameter `subKey` is given. +func ListItemValues(list interface{}, key interface{}, subKey ...interface{}) (values []interface{}) { + var reflectValue reflect.Value + if v, ok := list.(reflect.Value); ok { + reflectValue = v + } else { + reflectValue = reflect.ValueOf(list) + } + reflectKind := reflectValue.Kind() + for reflectKind == reflect.Ptr { + reflectValue = reflectValue.Elem() + reflectKind = reflectValue.Kind() + } + switch reflectKind { + case reflect.Slice, reflect.Array: + if reflectValue.Len() == 0 { + return + } + values = []interface{}{} + for i := 0; i < reflectValue.Len(); i++ { + if value, ok := ItemValue(reflectValue.Index(i), key); ok { + if len(subKey) > 0 && subKey[0] != nil { + if subValue, ok := ItemValue(value, subKey[0]); ok { + value = subValue + } else { + continue + } + } + if array, ok := value.([]interface{}); ok { + values = append(values, array...) + } else { + values = append(values, value) + } + } + } + } + return +} + +// ItemValue retrieves and returns its value of which name/attribute specified by `key`. +// The parameter `item` can be type of map/*map/struct/*struct. +func ItemValue(item interface{}, key interface{}) (value interface{}, found bool) { + var reflectValue reflect.Value + if v, ok := item.(reflect.Value); ok { + reflectValue = v + } else { + reflectValue = reflect.ValueOf(item) + } + reflectKind := reflectValue.Kind() + if reflectKind == reflect.Interface { + reflectValue = reflectValue.Elem() + reflectKind = reflectValue.Kind() + } + for reflectKind == reflect.Ptr { + reflectValue = reflectValue.Elem() + reflectKind = reflectValue.Kind() + } + var keyValue reflect.Value + if v, ok := key.(reflect.Value); ok { + keyValue = v + } else { + keyValue = reflect.ValueOf(key) + } + switch reflectKind { + case reflect.Array, reflect.Slice: + // The `key` must be type of string. + values := ListItemValues(reflectValue, keyValue.String()) + if values == nil { + return nil, false + } + return values, true + + case reflect.Map: + v := reflectValue.MapIndex(keyValue) + if v.IsValid() { + found = true + value = v.Interface() + } + + case reflect.Struct: + // The `mapKey` must be type of string. + v := reflectValue.FieldByName(keyValue.String()) + if v.IsValid() { + found = true + value = v.Interface() + } + } + return +} + +// ListItemValuesUnique retrieves and returns the unique elements of all struct/map with key `key`. +// Note that the parameter `list` should be type of slice which contains elements of map or struct, +// or else it returns an empty slice. +func ListItemValuesUnique(list interface{}, key string, subKey ...interface{}) []interface{} { + values := ListItemValues(list, key, subKey...) + if len(values) > 0 { + var ( + ok bool + m = make(map[interface{}]struct{}, len(values)) + ) + for i := 0; i < len(values); { + if _, ok = m[values[i]]; ok { + values = SliceDelete(values, i) + } else { + m[values[i]] = struct{}{} + i++ + } + } + } + return values +} + +// ListToMapByKey converts `list` to a map[string]interface{} of which key is specified by `key`. +// Note that the item value may be type of slice. +func ListToMapByKey(list []map[string]interface{}, key string) map[string]interface{} { + return utils.ListToMapByKey(list, key) +} diff --git a/vendor/github.com/gogf/gf/v2/util/gutil/gutil_map.go b/vendor/github.com/gogf/gf/v2/util/gutil/gutil_map.go new file mode 100644 index 00000000..92e47043 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/util/gutil/gutil_map.go @@ -0,0 +1,115 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gutil + +import ( + "reflect" + + "github.com/gogf/gf/v2/internal/utils" +) + +// MapCopy does a shallow copy from map `data` to `copy` for most commonly used map type +// map[string]interface{}. +func MapCopy(data map[string]interface{}) (copy map[string]interface{}) { + copy = make(map[string]interface{}, len(data)) + for k, v := range data { + copy[k] = v + } + return +} + +// MapContains checks whether map `data` contains `key`. +func MapContains(data map[string]interface{}, key string) (ok bool) { + if len(data) == 0 { + return + } + _, ok = data[key] + return +} + +// MapDelete deletes all `keys` from map `data`. +func MapDelete(data map[string]interface{}, keys ...string) { + if len(data) == 0 { + return + } + for _, key := range keys { + delete(data, key) + } +} + +// MapMerge merges all map from `src` to map `dst`. +func MapMerge(dst map[string]interface{}, src ...map[string]interface{}) { + if dst == nil { + return + } + for _, m := range src { + for k, v := range m { + dst[k] = v + } + } +} + +// MapMergeCopy creates and returns a new map which merges all map from `src`. +func MapMergeCopy(src ...map[string]interface{}) (copy map[string]interface{}) { + copy = make(map[string]interface{}) + for _, m := range src { + for k, v := range m { + copy[k] = v + } + } + return +} + +// MapPossibleItemByKey tries to find the possible key-value pair for given key ignoring cases and symbols. +// +// Note that this function might be of low performance. +func MapPossibleItemByKey(data map[string]interface{}, key string) (foundKey string, foundValue interface{}) { + return utils.MapPossibleItemByKey(data, key) +} + +// MapContainsPossibleKey checks if the given `key` is contained in given map `data`. +// It checks the key ignoring cases and symbols. +// +// Note that this function might be of low performance. +func MapContainsPossibleKey(data map[string]interface{}, key string) bool { + return utils.MapContainsPossibleKey(data, key) +} + +// MapOmitEmpty deletes all empty values from given map. +func MapOmitEmpty(data map[string]interface{}) { + if len(data) == 0 { + return + } + for k, v := range data { + if IsEmpty(v) { + delete(data, k) + } + } +} + +// MapToSlice converts map to slice of which all keys and values are its items. +// Eg: {"K1": "v1", "K2": "v2"} => ["K1", "v1", "K2", "v2"] +func MapToSlice(data interface{}) []interface{} { + var ( + reflectValue = reflect.ValueOf(data) + reflectKind = reflectValue.Kind() + ) + for reflectKind == reflect.Ptr { + reflectValue = reflectValue.Elem() + reflectKind = reflectValue.Kind() + } + switch reflectKind { + case reflect.Map: + array := make([]interface{}, 0) + for _, key := range reflectValue.MapKeys() { + array = append(array, key.Interface()) + array = append(array, reflectValue.MapIndex(key).Interface()) + } + return array + } + return nil +} diff --git a/vendor/github.com/gogf/gf/v2/util/gutil/gutil_reflect.go b/vendor/github.com/gogf/gf/v2/util/gutil/gutil_reflect.go new file mode 100644 index 00000000..87fdb781 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/util/gutil/gutil_reflect.go @@ -0,0 +1,26 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gutil + +import ( + "github.com/gogf/gf/v2/internal/reflection" +) + +type ( + OriginValueAndKindOutput = reflection.OriginValueAndKindOutput + OriginTypeAndKindOutput = reflection.OriginTypeAndKindOutput +) + +// OriginValueAndKind retrieves and returns the original reflect value and kind. +func OriginValueAndKind(value interface{}) (out OriginValueAndKindOutput) { + return reflection.OriginValueAndKind(value) +} + +// OriginTypeAndKind retrieves and returns the original reflect type and kind. +func OriginTypeAndKind(value interface{}) (out OriginTypeAndKindOutput) { + return reflection.OriginTypeAndKind(value) +} diff --git a/vendor/github.com/gogf/gf/v2/util/gutil/gutil_slice.go b/vendor/github.com/gogf/gf/v2/util/gutil/gutil_slice.go new file mode 100644 index 00000000..fa8d71e5 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/util/gutil/gutil_slice.go @@ -0,0 +1,118 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gutil + +import ( + "reflect" + + "github.com/gogf/gf/v2/util/gconv" +) + +// SliceCopy does a shallow copy of slice `data` for most commonly used slice type +// []interface{}. +func SliceCopy(slice []interface{}) []interface{} { + newSlice := make([]interface{}, len(slice)) + copy(newSlice, slice) + return newSlice +} + +// SliceInsertBefore inserts the `values` to the front of `index` and returns a new slice. +func SliceInsertBefore(slice []interface{}, index int, values ...interface{}) (newSlice []interface{}) { + if index < 0 || index >= len(slice) { + return slice + } + newSlice = make([]interface{}, len(slice)+len(values)) + copy(newSlice, slice[0:index]) + copy(newSlice[index:], values) + copy(newSlice[index+len(values):], slice[index:]) + return +} + +// SliceInsertAfter inserts the `values` to the back of `index` and returns a new slice. +func SliceInsertAfter(slice []interface{}, index int, values ...interface{}) (newSlice []interface{}) { + if index < 0 || index >= len(slice) { + return slice + } + newSlice = make([]interface{}, len(slice)+len(values)) + copy(newSlice, slice[0:index+1]) + copy(newSlice[index+1:], values) + copy(newSlice[index+1+len(values):], slice[index+1:]) + return +} + +// SliceDelete deletes an element at `index` and returns the new slice. +// It does nothing if the given `index` is invalid. +func SliceDelete(slice []interface{}, index int) (newSlice []interface{}) { + if index < 0 || index >= len(slice) { + return slice + } + // Determine array boundaries when deleting to improve deletion efficiency. + if index == 0 { + return slice[1:] + } else if index == len(slice)-1 { + return slice[:index] + } + // If it is a non-boundary delete, + // it will involve the creation of an array, + // then the deletion is less efficient. + return append(slice[:index], slice[index+1:]...) +} + +// SliceToMap converts slice type variable `slice` to `map[string]interface{}`. +// Note that if the length of `slice` is not an even number, it returns nil. +// Eg: +// ["K1", "v1", "K2", "v2"] => {"K1": "v1", "K2": "v2"} +// ["K1", "v1", "K2"] => nil +func SliceToMap(slice interface{}) map[string]interface{} { + var ( + reflectValue = reflect.ValueOf(slice) + reflectKind = reflectValue.Kind() + ) + for reflectKind == reflect.Ptr { + reflectValue = reflectValue.Elem() + reflectKind = reflectValue.Kind() + } + switch reflectKind { + case reflect.Slice, reflect.Array: + length := reflectValue.Len() + if length%2 != 0 { + return nil + } + data := make(map[string]interface{}) + for i := 0; i < reflectValue.Len(); i += 2 { + data[gconv.String(reflectValue.Index(i).Interface())] = reflectValue.Index(i + 1).Interface() + } + return data + } + return nil +} + +// SliceToMapWithColumnAsKey converts slice type variable `slice` to `map[interface{}]interface{}` +// The value of specified column use as the key for returned map. +// Eg: +// SliceToMapWithColumnAsKey([{"K1": "v1", "K2": 1}, {"K1": "v2", "K2": 2}], "K1") => {"v1": {"K1": "v1", "K2": 1}, "v2": {"K1": "v2", "K2": 2}} +// SliceToMapWithColumnAsKey([{"K1": "v1", "K2": 1}, {"K1": "v2", "K2": 2}], "K2") => {1: {"K1": "v1", "K2": 1}, 2: {"K1": "v2", "K2": 2}} +func SliceToMapWithColumnAsKey(slice interface{}, key interface{}) map[interface{}]interface{} { + var ( + reflectValue = reflect.ValueOf(slice) + reflectKind = reflectValue.Kind() + ) + for reflectKind == reflect.Ptr { + reflectValue = reflectValue.Elem() + reflectKind = reflectValue.Kind() + } + data := make(map[interface{}]interface{}) + switch reflectKind { + case reflect.Slice, reflect.Array: + for i := 0; i < reflectValue.Len(); i++ { + if k, ok := ItemValue(reflectValue.Index(i), key); ok { + data[k] = reflectValue.Index(i).Interface() + } + } + } + return data +} diff --git a/vendor/github.com/gogf/gf/v2/util/gutil/gutil_struct.go b/vendor/github.com/gogf/gf/v2/util/gutil/gutil_struct.go new file mode 100644 index 00000000..bd856fc4 --- /dev/null +++ b/vendor/github.com/gogf/gf/v2/util/gutil/gutil_struct.go @@ -0,0 +1,38 @@ +// Copyright GoFrame Author(https://goframe.org). All Rights Reserved. +// +// This Source Code Form is subject to the terms of the MIT License. +// If a copy of the MIT was not distributed with this file, +// You can obtain one at https://github.com/gogf/gf. + +package gutil + +import ( + "reflect" + + "github.com/gogf/gf/v2/util/gconv" +) + +// StructToSlice converts struct to slice of which all keys and values are its items. +// Eg: {"K1": "v1", "K2": "v2"} => ["K1", "v1", "K2", "v2"] +func StructToSlice(data interface{}) []interface{} { + var ( + reflectValue = reflect.ValueOf(data) + reflectKind = reflectValue.Kind() + ) + for reflectKind == reflect.Ptr { + reflectValue = reflectValue.Elem() + reflectKind = reflectValue.Kind() + } + switch reflectKind { + case reflect.Struct: + array := make([]interface{}, 0) + // Note that, it uses the gconv tag name instead of the attribute name if + // the gconv tag is fined in the struct attributes. + for k, v := range gconv.Map(reflectValue) { + array = append(array, k) + array = append(array, v) + } + return array + } + return nil +} diff --git a/vendor/github.com/mattn/go-colorable/LICENSE b/vendor/github.com/mattn/go-colorable/LICENSE new file mode 100644 index 00000000..91b5cef3 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mattn/go-colorable/README.md b/vendor/github.com/mattn/go-colorable/README.md new file mode 100644 index 00000000..ca048371 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/README.md @@ -0,0 +1,48 @@ +# go-colorable + +[![Build Status](https://github.com/mattn/go-colorable/workflows/test/badge.svg)](https://github.com/mattn/go-colorable/actions?query=workflow%3Atest) +[![Codecov](https://codecov.io/gh/mattn/go-colorable/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-colorable) +[![GoDoc](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable) +[![Go Report Card](https://goreportcard.com/badge/mattn/go-colorable)](https://goreportcard.com/report/mattn/go-colorable) + +Colorable writer for windows. + +For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.) +This package is possible to handle escape sequence for ansi color on windows. + +## Too Bad! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png) + + +## So Good! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png) + +## Usage + +```go +logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true}) +logrus.SetOutput(colorable.NewColorableStdout()) + +logrus.Info("succeeded") +logrus.Warn("not correct") +logrus.Error("something error") +logrus.Fatal("panic") +``` + +You can compile above code on non-windows OSs. + +## Installation + +``` +$ go get github.com/mattn/go-colorable +``` + +# License + +MIT + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/mattn/go-colorable/colorable_appengine.go b/vendor/github.com/mattn/go-colorable/colorable_appengine.go new file mode 100644 index 00000000..416d1bbb --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_appengine.go @@ -0,0 +1,38 @@ +//go:build appengine +// +build appengine + +package colorable + +import ( + "io" + "os" + + _ "github.com/mattn/go-isatty" +) + +// NewColorable returns new instance of Writer which handles escape sequence. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +func NewColorableStdout() io.Writer { + return os.Stdout +} + +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +func NewColorableStderr() io.Writer { + return os.Stderr +} + +// EnableColorsStdout enable colors if possible. +func EnableColorsStdout(enabled *bool) func() { + if enabled != nil { + *enabled = true + } + return func() {} +} diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go new file mode 100644 index 00000000..766d9460 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_others.go @@ -0,0 +1,38 @@ +//go:build !windows && !appengine +// +build !windows,!appengine + +package colorable + +import ( + "io" + "os" + + _ "github.com/mattn/go-isatty" +) + +// NewColorable returns new instance of Writer which handles escape sequence. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +func NewColorableStdout() io.Writer { + return os.Stdout +} + +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +func NewColorableStderr() io.Writer { + return os.Stderr +} + +// EnableColorsStdout enable colors if possible. +func EnableColorsStdout(enabled *bool) func() { + if enabled != nil { + *enabled = true + } + return func() {} +} diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go new file mode 100644 index 00000000..1846ad5a --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go @@ -0,0 +1,1047 @@ +//go:build windows && !appengine +// +build windows,!appengine + +package colorable + +import ( + "bytes" + "io" + "math" + "os" + "strconv" + "strings" + "sync" + "syscall" + "unsafe" + + "github.com/mattn/go-isatty" +) + +const ( + foregroundBlue = 0x1 + foregroundGreen = 0x2 + foregroundRed = 0x4 + foregroundIntensity = 0x8 + foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) + backgroundBlue = 0x10 + backgroundGreen = 0x20 + backgroundRed = 0x40 + backgroundIntensity = 0x80 + backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) + commonLvbUnderscore = 0x8000 + + cENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4 +) + +const ( + genericRead = 0x80000000 + genericWrite = 0x40000000 +) + +const ( + consoleTextmodeBuffer = 0x1 +) + +type wchar uint16 +type short int16 +type dword uint32 +type word uint16 + +type coord struct { + x short + y short +} + +type smallRect struct { + left short + top short + right short + bottom short +} + +type consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord +} + +type consoleCursorInfo struct { + size dword + visible int32 +} + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") + procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") + procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") + procGetConsoleCursorInfo = kernel32.NewProc("GetConsoleCursorInfo") + procSetConsoleCursorInfo = kernel32.NewProc("SetConsoleCursorInfo") + procSetConsoleTitle = kernel32.NewProc("SetConsoleTitleW") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procSetConsoleMode = kernel32.NewProc("SetConsoleMode") + procCreateConsoleScreenBuffer = kernel32.NewProc("CreateConsoleScreenBuffer") +) + +// Writer provides colorable Writer to the console +type Writer struct { + out io.Writer + handle syscall.Handle + althandle syscall.Handle + oldattr word + oldpos coord + rest bytes.Buffer + mutex sync.Mutex +} + +// NewColorable returns new instance of Writer which handles escape sequence from File. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + if isatty.IsTerminal(file.Fd()) { + var mode uint32 + if r, _, _ := procGetConsoleMode.Call(file.Fd(), uintptr(unsafe.Pointer(&mode))); r != 0 && mode&cENABLE_VIRTUAL_TERMINAL_PROCESSING != 0 { + return file + } + var csbi consoleScreenBufferInfo + handle := syscall.Handle(file.Fd()) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + return &Writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}} + } + return file +} + +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +func NewColorableStdout() io.Writer { + return NewColorable(os.Stdout) +} + +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +func NewColorableStderr() io.Writer { + return NewColorable(os.Stderr) +} + +var color256 = map[int]int{ + 0: 0x000000, + 1: 0x800000, + 2: 0x008000, + 3: 0x808000, + 4: 0x000080, + 5: 0x800080, + 6: 0x008080, + 7: 0xc0c0c0, + 8: 0x808080, + 9: 0xff0000, + 10: 0x00ff00, + 11: 0xffff00, + 12: 0x0000ff, + 13: 0xff00ff, + 14: 0x00ffff, + 15: 0xffffff, + 16: 0x000000, + 17: 0x00005f, + 18: 0x000087, + 19: 0x0000af, + 20: 0x0000d7, + 21: 0x0000ff, + 22: 0x005f00, + 23: 0x005f5f, + 24: 0x005f87, + 25: 0x005faf, + 26: 0x005fd7, + 27: 0x005fff, + 28: 0x008700, + 29: 0x00875f, + 30: 0x008787, + 31: 0x0087af, + 32: 0x0087d7, + 33: 0x0087ff, + 34: 0x00af00, + 35: 0x00af5f, + 36: 0x00af87, + 37: 0x00afaf, + 38: 0x00afd7, + 39: 0x00afff, + 40: 0x00d700, + 41: 0x00d75f, + 42: 0x00d787, + 43: 0x00d7af, + 44: 0x00d7d7, + 45: 0x00d7ff, + 46: 0x00ff00, + 47: 0x00ff5f, + 48: 0x00ff87, + 49: 0x00ffaf, + 50: 0x00ffd7, + 51: 0x00ffff, + 52: 0x5f0000, + 53: 0x5f005f, + 54: 0x5f0087, + 55: 0x5f00af, + 56: 0x5f00d7, + 57: 0x5f00ff, + 58: 0x5f5f00, + 59: 0x5f5f5f, + 60: 0x5f5f87, + 61: 0x5f5faf, + 62: 0x5f5fd7, + 63: 0x5f5fff, + 64: 0x5f8700, + 65: 0x5f875f, + 66: 0x5f8787, + 67: 0x5f87af, + 68: 0x5f87d7, + 69: 0x5f87ff, + 70: 0x5faf00, + 71: 0x5faf5f, + 72: 0x5faf87, + 73: 0x5fafaf, + 74: 0x5fafd7, + 75: 0x5fafff, + 76: 0x5fd700, + 77: 0x5fd75f, + 78: 0x5fd787, + 79: 0x5fd7af, + 80: 0x5fd7d7, + 81: 0x5fd7ff, + 82: 0x5fff00, + 83: 0x5fff5f, + 84: 0x5fff87, + 85: 0x5fffaf, + 86: 0x5fffd7, + 87: 0x5fffff, + 88: 0x870000, + 89: 0x87005f, + 90: 0x870087, + 91: 0x8700af, + 92: 0x8700d7, + 93: 0x8700ff, + 94: 0x875f00, + 95: 0x875f5f, + 96: 0x875f87, + 97: 0x875faf, + 98: 0x875fd7, + 99: 0x875fff, + 100: 0x878700, + 101: 0x87875f, + 102: 0x878787, + 103: 0x8787af, + 104: 0x8787d7, + 105: 0x8787ff, + 106: 0x87af00, + 107: 0x87af5f, + 108: 0x87af87, + 109: 0x87afaf, + 110: 0x87afd7, + 111: 0x87afff, + 112: 0x87d700, + 113: 0x87d75f, + 114: 0x87d787, + 115: 0x87d7af, + 116: 0x87d7d7, + 117: 0x87d7ff, + 118: 0x87ff00, + 119: 0x87ff5f, + 120: 0x87ff87, + 121: 0x87ffaf, + 122: 0x87ffd7, + 123: 0x87ffff, + 124: 0xaf0000, + 125: 0xaf005f, + 126: 0xaf0087, + 127: 0xaf00af, + 128: 0xaf00d7, + 129: 0xaf00ff, + 130: 0xaf5f00, + 131: 0xaf5f5f, + 132: 0xaf5f87, + 133: 0xaf5faf, + 134: 0xaf5fd7, + 135: 0xaf5fff, + 136: 0xaf8700, + 137: 0xaf875f, + 138: 0xaf8787, + 139: 0xaf87af, + 140: 0xaf87d7, + 141: 0xaf87ff, + 142: 0xafaf00, + 143: 0xafaf5f, + 144: 0xafaf87, + 145: 0xafafaf, + 146: 0xafafd7, + 147: 0xafafff, + 148: 0xafd700, + 149: 0xafd75f, + 150: 0xafd787, + 151: 0xafd7af, + 152: 0xafd7d7, + 153: 0xafd7ff, + 154: 0xafff00, + 155: 0xafff5f, + 156: 0xafff87, + 157: 0xafffaf, + 158: 0xafffd7, + 159: 0xafffff, + 160: 0xd70000, + 161: 0xd7005f, + 162: 0xd70087, + 163: 0xd700af, + 164: 0xd700d7, + 165: 0xd700ff, + 166: 0xd75f00, + 167: 0xd75f5f, + 168: 0xd75f87, + 169: 0xd75faf, + 170: 0xd75fd7, + 171: 0xd75fff, + 172: 0xd78700, + 173: 0xd7875f, + 174: 0xd78787, + 175: 0xd787af, + 176: 0xd787d7, + 177: 0xd787ff, + 178: 0xd7af00, + 179: 0xd7af5f, + 180: 0xd7af87, + 181: 0xd7afaf, + 182: 0xd7afd7, + 183: 0xd7afff, + 184: 0xd7d700, + 185: 0xd7d75f, + 186: 0xd7d787, + 187: 0xd7d7af, + 188: 0xd7d7d7, + 189: 0xd7d7ff, + 190: 0xd7ff00, + 191: 0xd7ff5f, + 192: 0xd7ff87, + 193: 0xd7ffaf, + 194: 0xd7ffd7, + 195: 0xd7ffff, + 196: 0xff0000, + 197: 0xff005f, + 198: 0xff0087, + 199: 0xff00af, + 200: 0xff00d7, + 201: 0xff00ff, + 202: 0xff5f00, + 203: 0xff5f5f, + 204: 0xff5f87, + 205: 0xff5faf, + 206: 0xff5fd7, + 207: 0xff5fff, + 208: 0xff8700, + 209: 0xff875f, + 210: 0xff8787, + 211: 0xff87af, + 212: 0xff87d7, + 213: 0xff87ff, + 214: 0xffaf00, + 215: 0xffaf5f, + 216: 0xffaf87, + 217: 0xffafaf, + 218: 0xffafd7, + 219: 0xffafff, + 220: 0xffd700, + 221: 0xffd75f, + 222: 0xffd787, + 223: 0xffd7af, + 224: 0xffd7d7, + 225: 0xffd7ff, + 226: 0xffff00, + 227: 0xffff5f, + 228: 0xffff87, + 229: 0xffffaf, + 230: 0xffffd7, + 231: 0xffffff, + 232: 0x080808, + 233: 0x121212, + 234: 0x1c1c1c, + 235: 0x262626, + 236: 0x303030, + 237: 0x3a3a3a, + 238: 0x444444, + 239: 0x4e4e4e, + 240: 0x585858, + 241: 0x626262, + 242: 0x6c6c6c, + 243: 0x767676, + 244: 0x808080, + 245: 0x8a8a8a, + 246: 0x949494, + 247: 0x9e9e9e, + 248: 0xa8a8a8, + 249: 0xb2b2b2, + 250: 0xbcbcbc, + 251: 0xc6c6c6, + 252: 0xd0d0d0, + 253: 0xdadada, + 254: 0xe4e4e4, + 255: 0xeeeeee, +} + +// `\033]0;TITLESTR\007` +func doTitleSequence(er *bytes.Reader) error { + var c byte + var err error + + c, err = er.ReadByte() + if err != nil { + return err + } + if c != '0' && c != '2' { + return nil + } + c, err = er.ReadByte() + if err != nil { + return err + } + if c != ';' { + return nil + } + title := make([]byte, 0, 80) + for { + c, err = er.ReadByte() + if err != nil { + return err + } + if c == 0x07 || c == '\n' { + break + } + title = append(title, c) + } + if len(title) > 0 { + title8, err := syscall.UTF16PtrFromString(string(title)) + if err == nil { + procSetConsoleTitle.Call(uintptr(unsafe.Pointer(title8))) + } + } + return nil +} + +// returns Atoi(s) unless s == "" in which case it returns def +func atoiWithDefault(s string, def int) (int, error) { + if s == "" { + return def, nil + } + return strconv.Atoi(s) +} + +// Write writes data on console +func (w *Writer) Write(data []byte) (n int, err error) { + w.mutex.Lock() + defer w.mutex.Unlock() + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + + handle := w.handle + + var er *bytes.Reader + if w.rest.Len() > 0 { + var rest bytes.Buffer + w.rest.WriteTo(&rest) + w.rest.Reset() + rest.Write(data) + er = bytes.NewReader(rest.Bytes()) + } else { + er = bytes.NewReader(data) + } + var plaintext bytes.Buffer +loop: + for { + c1, err := er.ReadByte() + if err != nil { + plaintext.WriteTo(w.out) + break loop + } + if c1 != 0x1b { + plaintext.WriteByte(c1) + continue + } + _, err = plaintext.WriteTo(w.out) + if err != nil { + break loop + } + c2, err := er.ReadByte() + if err != nil { + break loop + } + + switch c2 { + case '>': + continue + case ']': + w.rest.WriteByte(c1) + w.rest.WriteByte(c2) + er.WriteTo(&w.rest) + if bytes.IndexByte(w.rest.Bytes(), 0x07) == -1 { + break loop + } + er = bytes.NewReader(w.rest.Bytes()[2:]) + err := doTitleSequence(er) + if err != nil { + break loop + } + w.rest.Reset() + continue + // https://github.com/mattn/go-colorable/issues/27 + case '7': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + w.oldpos = csbi.cursorPosition + continue + case '8': + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + continue + case 0x5b: + // execute part after switch + default: + continue + } + + w.rest.WriteByte(c1) + w.rest.WriteByte(c2) + er.WriteTo(&w.rest) + + var buf bytes.Buffer + var m byte + for i, c := range w.rest.Bytes()[2:] { + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + m = c + er = bytes.NewReader(w.rest.Bytes()[2+i+1:]) + w.rest.Reset() + break + } + buf.Write([]byte(string(c))) + } + if m == 0 { + break loop + } + + switch m { + case 'A': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'B': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'C': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'D': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x -= short(n) + if csbi.cursorPosition.x < 0 { + csbi.cursorPosition.x = 0 + } + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'E': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'F': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'G': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + if n < 1 { + n = 1 + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = short(n - 1) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'H', 'f': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + if buf.Len() > 0 { + token := strings.Split(buf.String(), ";") + switch len(token) { + case 1: + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + csbi.cursorPosition.y = short(n1 - 1) + case 2: + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + n2, err := strconv.Atoi(token[1]) + if err != nil { + continue + } + csbi.cursorPosition.x = short(n2 - 1) + csbi.cursorPosition.y = short(n1 - 1) + } + } else { + csbi.cursorPosition.y = 0 + } + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'J': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + var count, written dword + var cursor coord + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.window.top-csbi.cursorPosition.y)*dword(csbi.size.x) + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) + } + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'K': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + var cursor coord + var count, written dword + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x) + case 1: + cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x) + case 2: + cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} + count = dword(csbi.size.x) + } + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'X': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + var cursor coord + var written dword + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(n), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(n), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'm': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + attr := csbi.attributes + cs := buf.String() + if cs == "" { + procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(w.oldattr)) + continue + } + token := strings.Split(cs, ";") + for i := 0; i < len(token); i++ { + ns := token[i] + if n, err = strconv.Atoi(ns); err == nil { + switch { + case n == 0 || n == 100: + attr = w.oldattr + case n == 4: + attr |= commonLvbUnderscore + case (1 <= n && n <= 3) || n == 5: + attr |= foregroundIntensity + case n == 7 || n == 27: + attr = + (attr &^ (foregroundMask | backgroundMask)) | + ((attr & foregroundMask) << 4) | + ((attr & backgroundMask) >> 4) + case n == 22: + attr &^= foregroundIntensity + case n == 24: + attr &^= commonLvbUnderscore + case 30 <= n && n <= 37: + attr &= backgroundMask + if (n-30)&1 != 0 { + attr |= foregroundRed + } + if (n-30)&2 != 0 { + attr |= foregroundGreen + } + if (n-30)&4 != 0 { + attr |= foregroundBlue + } + case n == 38: // set foreground color. + if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256foreAttr == nil { + n256setup() + } + attr &= backgroundMask + attr |= n256foreAttr[n256%len(n256foreAttr)] + i += 2 + } + } else if len(token) == 5 && token[i+1] == "2" { + var r, g, b int + r, _ = strconv.Atoi(token[i+2]) + g, _ = strconv.Atoi(token[i+3]) + b, _ = strconv.Atoi(token[i+4]) + i += 4 + if r > 127 { + attr |= foregroundRed + } + if g > 127 { + attr |= foregroundGreen + } + if b > 127 { + attr |= foregroundBlue + } + } else { + attr = attr & (w.oldattr & backgroundMask) + } + case n == 39: // reset foreground color. + attr &= backgroundMask + attr |= w.oldattr & foregroundMask + case 40 <= n && n <= 47: + attr &= foregroundMask + if (n-40)&1 != 0 { + attr |= backgroundRed + } + if (n-40)&2 != 0 { + attr |= backgroundGreen + } + if (n-40)&4 != 0 { + attr |= backgroundBlue + } + case n == 48: // set background color. + if i < len(token)-2 && token[i+1] == "5" { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256backAttr == nil { + n256setup() + } + attr &= foregroundMask + attr |= n256backAttr[n256%len(n256backAttr)] + i += 2 + } + } else if len(token) == 5 && token[i+1] == "2" { + var r, g, b int + r, _ = strconv.Atoi(token[i+2]) + g, _ = strconv.Atoi(token[i+3]) + b, _ = strconv.Atoi(token[i+4]) + i += 4 + if r > 127 { + attr |= backgroundRed + } + if g > 127 { + attr |= backgroundGreen + } + if b > 127 { + attr |= backgroundBlue + } + } else { + attr = attr & (w.oldattr & foregroundMask) + } + case n == 49: // reset foreground color. + attr &= foregroundMask + attr |= w.oldattr & backgroundMask + case 90 <= n && n <= 97: + attr = (attr & backgroundMask) + attr |= foregroundIntensity + if (n-90)&1 != 0 { + attr |= foregroundRed + } + if (n-90)&2 != 0 { + attr |= foregroundGreen + } + if (n-90)&4 != 0 { + attr |= foregroundBlue + } + case 100 <= n && n <= 107: + attr = (attr & foregroundMask) + attr |= backgroundIntensity + if (n-100)&1 != 0 { + attr |= backgroundRed + } + if (n-100)&2 != 0 { + attr |= backgroundGreen + } + if (n-100)&4 != 0 { + attr |= backgroundBlue + } + } + procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(attr)) + } + } + case 'h': + var ci consoleCursorInfo + cs := buf.String() + if cs == "5>" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 0 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?25" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 1 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?1049" { + if w.althandle == 0 { + h, _, _ := procCreateConsoleScreenBuffer.Call(uintptr(genericRead|genericWrite), 0, 0, uintptr(consoleTextmodeBuffer), 0, 0) + w.althandle = syscall.Handle(h) + if w.althandle != 0 { + handle = w.althandle + } + } + } + case 'l': + var ci consoleCursorInfo + cs := buf.String() + if cs == "5>" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 1 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?25" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 0 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?1049" { + if w.althandle != 0 { + syscall.CloseHandle(w.althandle) + w.althandle = 0 + handle = w.handle + } + } + case 's': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + w.oldpos = csbi.cursorPosition + case 'u': + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + } + } + + return len(data), nil +} + +type consoleColor struct { + rgb int + red bool + green bool + blue bool + intensity bool +} + +func (c consoleColor) foregroundAttr() (attr word) { + if c.red { + attr |= foregroundRed + } + if c.green { + attr |= foregroundGreen + } + if c.blue { + attr |= foregroundBlue + } + if c.intensity { + attr |= foregroundIntensity + } + return +} + +func (c consoleColor) backgroundAttr() (attr word) { + if c.red { + attr |= backgroundRed + } + if c.green { + attr |= backgroundGreen + } + if c.blue { + attr |= backgroundBlue + } + if c.intensity { + attr |= backgroundIntensity + } + return +} + +var color16 = []consoleColor{ + {0x000000, false, false, false, false}, + {0x000080, false, false, true, false}, + {0x008000, false, true, false, false}, + {0x008080, false, true, true, false}, + {0x800000, true, false, false, false}, + {0x800080, true, false, true, false}, + {0x808000, true, true, false, false}, + {0xc0c0c0, true, true, true, false}, + {0x808080, false, false, false, true}, + {0x0000ff, false, false, true, true}, + {0x00ff00, false, true, false, true}, + {0x00ffff, false, true, true, true}, + {0xff0000, true, false, false, true}, + {0xff00ff, true, false, true, true}, + {0xffff00, true, true, false, true}, + {0xffffff, true, true, true, true}, +} + +type hsv struct { + h, s, v float32 +} + +func (a hsv) dist(b hsv) float32 { + dh := a.h - b.h + switch { + case dh > 0.5: + dh = 1 - dh + case dh < -0.5: + dh = -1 - dh + } + ds := a.s - b.s + dv := a.v - b.v + return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv))) +} + +func toHSV(rgb int) hsv { + r, g, b := float32((rgb&0xFF0000)>>16)/256.0, + float32((rgb&0x00FF00)>>8)/256.0, + float32(rgb&0x0000FF)/256.0 + min, max := minmax3f(r, g, b) + h := max - min + if h > 0 { + if max == r { + h = (g - b) / h + if h < 0 { + h += 6 + } + } else if max == g { + h = 2 + (b-r)/h + } else { + h = 4 + (r-g)/h + } + } + h /= 6.0 + s := max - min + if max != 0 { + s /= max + } + v := max + return hsv{h: h, s: s, v: v} +} + +type hsvTable []hsv + +func toHSVTable(rgbTable []consoleColor) hsvTable { + t := make(hsvTable, len(rgbTable)) + for i, c := range rgbTable { + t[i] = toHSV(c.rgb) + } + return t +} + +func (t hsvTable) find(rgb int) consoleColor { + hsv := toHSV(rgb) + n := 7 + l := float32(5.0) + for i, p := range t { + d := hsv.dist(p) + if d < l { + l, n = d, i + } + } + return color16[n] +} + +func minmax3f(a, b, c float32) (min, max float32) { + if a < b { + if b < c { + return a, c + } else if a < c { + return a, b + } else { + return c, b + } + } else { + if a < c { + return b, c + } else if b < c { + return b, a + } else { + return c, a + } + } +} + +var n256foreAttr []word +var n256backAttr []word + +func n256setup() { + n256foreAttr = make([]word, 256) + n256backAttr = make([]word, 256) + t := toHSVTable(color16) + for i, rgb := range color256 { + c := t.find(rgb) + n256foreAttr[i] = c.foregroundAttr() + n256backAttr[i] = c.backgroundAttr() + } +} + +// EnableColorsStdout enable colors if possible. +func EnableColorsStdout(enabled *bool) func() { + var mode uint32 + h := os.Stdout.Fd() + if r, _, _ := procGetConsoleMode.Call(h, uintptr(unsafe.Pointer(&mode))); r != 0 { + if r, _, _ = procSetConsoleMode.Call(h, uintptr(mode|cENABLE_VIRTUAL_TERMINAL_PROCESSING)); r != 0 { + if enabled != nil { + *enabled = true + } + return func() { + procSetConsoleMode.Call(h, uintptr(mode)) + } + } + } + if enabled != nil { + *enabled = true + } + return func() {} +} diff --git a/vendor/github.com/mattn/go-colorable/noncolorable.go b/vendor/github.com/mattn/go-colorable/noncolorable.go new file mode 100644 index 00000000..05d6f74b --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/noncolorable.go @@ -0,0 +1,57 @@ +package colorable + +import ( + "bytes" + "io" +) + +// NonColorable holds writer but removes escape sequence. +type NonColorable struct { + out io.Writer +} + +// NewNonColorable returns new instance of Writer which removes escape sequence from Writer. +func NewNonColorable(w io.Writer) io.Writer { + return &NonColorable{out: w} +} + +// Write writes data on console +func (w *NonColorable) Write(data []byte) (n int, err error) { + er := bytes.NewReader(data) + var plaintext bytes.Buffer +loop: + for { + c1, err := er.ReadByte() + if err != nil { + plaintext.WriteTo(w.out) + break loop + } + if c1 != 0x1b { + plaintext.WriteByte(c1) + continue + } + _, err = plaintext.WriteTo(w.out) + if err != nil { + break loop + } + c2, err := er.ReadByte() + if err != nil { + break loop + } + if c2 != 0x5b { + continue + } + + for { + c, err := er.ReadByte() + if err != nil { + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + break + } + } + } + + return len(data), nil +} diff --git a/vendor/go.opentelemetry.io/otel/.gitattributes b/vendor/go.opentelemetry.io/otel/.gitattributes new file mode 100644 index 00000000..314766e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.gitattributes @@ -0,0 +1,3 @@ +* text=auto eol=lf +*.{cmd,[cC][mM][dD]} text eol=crlf +*.{bat,[bB][aA][tT]} text eol=crlf diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore new file mode 100644 index 00000000..0b605b3d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.gitignore @@ -0,0 +1,21 @@ +.DS_Store +Thumbs.db + +.tools/ +.idea/ +.vscode/ +*.iml +*.so +coverage.* + +gen/ + +/example/fib/fib +/example/fib/traces.txt +/example/jaeger/jaeger +/example/namedtracer/namedtracer +/example/opencensus/opencensus +/example/passthrough/passthrough +/example/prometheus/prometheus +/example/zipkin/zipkin +/example/otel-collector/otel-collector diff --git a/vendor/go.opentelemetry.io/otel/.gitmodules b/vendor/go.opentelemetry.io/otel/.gitmodules new file mode 100644 index 00000000..38a1f569 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.gitmodules @@ -0,0 +1,3 @@ +[submodule "opentelemetry-proto"] + path = exporters/otlp/internal/opentelemetry-proto + url = https://github.com/open-telemetry/opentelemetry-proto diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml new file mode 100644 index 00000000..0f099f57 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -0,0 +1,244 @@ +# See https://github.com/golangci/golangci-lint#config-file +run: + issues-exit-code: 1 #Default + tests: true #Default + +linters: + # Disable everything by default so upgrades to not include new "default + # enabled" linters. + disable-all: true + # Specifically enable linters we want to use. + enable: + - depguard + - errcheck + - godot + - gofmt + - goimports + - gosimple + - govet + - ineffassign + - misspell + - revive + - staticcheck + - typecheck + - unused + +issues: + # Maximum issues count per one linter. + # Set to 0 to disable. + # Default: 50 + # Setting to unlimited so the linter only is run once to debug all issues. + max-issues-per-linter: 0 + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + # Setting to unlimited so the linter only is run once to debug all issues. + max-same-issues: 0 + # Excluding configuration per-path, per-linter, per-text and per-source. + exclude-rules: + # TODO: Having appropriate comments for exported objects helps development, + # even for objects in internal packages. Appropriate comments for all + # exported objects should be added and this exclusion removed. + - path: '.*internal/.*' + text: "exported (method|function|type|const) (.+) should have comment or be unexported" + linters: + - revive + # Yes, they are, but it's okay in a test. + - path: _test\.go + text: "exported func.*returns unexported type.*which can be annoying to use" + linters: + - revive + # Example test functions should be treated like main. + - path: example.*_test\.go + text: "calls to (.+) only in main[(][)] or init[(][)] functions" + linters: + - revive + include: + # revive exported should have comment or be unexported. + - EXC0012 + # revive package comment should be of the form ... + - EXC0013 + +linters-settings: + depguard: + # Check the list against standard lib. + # Default: false + include-go-root: true + # A list of packages for the list type specified. + # Default: [] + packages: + - "crypto/md5" + - "crypto/sha1" + - "crypto/**/pkix" + ignore-file-rules: + - "**/*_test.go" + additional-guards: + # Do not allow testing packages in non-test files. + - list-type: denylist + include-go-root: true + packages: + - testing + - github.com/stretchr/testify + ignore-file-rules: + - "**/*_test.go" + - "**/*test/*.go" + - "**/internal/matchers/*.go" + godot: + exclude: + # Exclude sentence fragments for lists. + - '^[ ]*[-•]' + # Exclude sentences prefixing a list. + - ':$' + goimports: + local-prefixes: go.opentelemetry.io + misspell: + locale: US + ignore-words: + - cancelled + revive: + # Sets the default failure confidence. + # This means that linting errors with less than 0.8 confidence will be ignored. + # Default: 0.8 + confidence: 0.01 + rules: + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#blank-imports + - name: blank-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr + - name: bool-literal-in-expr + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#constant-logical-expr + - name: constant-logical-expr + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument + # TODO (#3372) reenable linter when it is compatible. https://github.com/golangci/golangci-lint/issues/3280 + - name: context-as-argument + disabled: true + arguments: + allowTypesBefore: "*testing.T" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type + - name: context-keys-type + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#deep-exit + - name: deep-exit + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#defer + - name: defer + disabled: false + arguments: + - ["call-chain", "loop"] + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#dot-imports + - name: dot-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#duplicated-imports + - name: duplicated-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#early-return + - name: early-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-block + - name: empty-block + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-lines + - name: empty-lines + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-naming + - name: error-naming + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-return + - name: error-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-strings + - name: error-strings + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#errorf + - name: errorf + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#exported + - name: exported + disabled: false + arguments: + - "sayRepetitiveInsteadOfStutters" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#flag-parameter + - name: flag-parameter + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#identical-branches + - name: identical-branches + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return + - name: if-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#increment-decrement + - name: increment-decrement + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#indent-error-flow + - name: indent-error-flow + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-shadowing + - name: import-shadowing + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#package-comments + - name: package-comments + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range + - name: range + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-in-closure + - name: range-val-in-closure + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-address + - name: range-val-address + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#redefines-builtin-id + - name: redefines-builtin-id + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-format + - name: string-format + disabled: false + arguments: + - - panic + - '/^[^\n]*$/' + - must not contain line breaks + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#struct-tag + - name: struct-tag + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#superfluous-else + - name: superfluous-else + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#time-equal + - name: time-equal + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-naming + - name: var-naming + disabled: false + arguments: + - ["ID"] # AllowList + - ["Otel", "Aws", "Gcp"] # DenyList + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-declaration + - name: var-declaration + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unconditional-recursion + - name: unconditional-recursion + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-return + - name: unexported-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unhandled-error + - name: unhandled-error + disabled: false + arguments: + - "fmt.Fprint" + - "fmt.Fprintf" + - "fmt.Fprintln" + - "fmt.Print" + - "fmt.Printf" + - "fmt.Println" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unnecessary-stmt + - name: unnecessary-stmt + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break + - name: useless-break + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value + - name: waitgroup-by-value + disabled: false diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore new file mode 100644 index 00000000..40d62fa2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.lycheeignore @@ -0,0 +1,6 @@ +http://localhost +http://jaeger-collector +https://github.com/open-telemetry/opentelemetry-go/milestone/ +https://github.com/open-telemetry/opentelemetry-go/projects +file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries +file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual diff --git a/vendor/go.opentelemetry.io/otel/.markdownlint.yaml b/vendor/go.opentelemetry.io/otel/.markdownlint.yaml new file mode 100644 index 00000000..3202496c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.markdownlint.yaml @@ -0,0 +1,29 @@ +# Default state for all rules +default: true + +# ul-style +MD004: false + +# hard-tabs +MD010: false + +# line-length +MD013: false + +# no-duplicate-header +MD024: + siblings_only: true + +#single-title +MD025: false + +# ol-prefix +MD029: + style: ordered + +# no-inline-html +MD033: false + +# fenced-code-language +MD040: false + diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md new file mode 100644 index 00000000..1d9726f6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -0,0 +1,2369 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). + +This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +## [1.14.0/0.37.0/0.0.4] 2023-02-27 + +This release is the last to support [Go 1.18]. +The next release will require at least [Go 1.19]. + +### Added + +- The `event` type semantic conventions are added to `go.opentelemetry.io/otel/semconv/v1.17.0`. (#3697) +- Support [Go 1.20]. (#3693) +- The `go.opentelemetry.io/otel/semconv/v1.18.0` package. + The package contains semantic conventions from the `v1.18.0` version of the OpenTelemetry specification. (#3719) + - The following `const` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: + - `OtelScopeNameKey` -> `OTelScopeNameKey` + - `OtelScopeVersionKey` -> `OTelScopeVersionKey` + - `OtelLibraryNameKey` -> `OTelLibraryNameKey` + - `OtelLibraryVersionKey` -> `OTelLibraryVersionKey` + - `OtelStatusCodeKey` -> `OTelStatusCodeKey` + - `OtelStatusDescriptionKey` -> `OTelStatusDescriptionKey` + - `OtelStatusCodeOk` -> `OTelStatusCodeOk` + - `OtelStatusCodeError` -> `OTelStatusCodeError` + - The following `func` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: + - `OtelScopeName` -> `OTelScopeName` + - `OtelScopeVersion` -> `OTelScopeVersion` + - `OtelLibraryName` -> `OTelLibraryName` + - `OtelLibraryVersion` -> `OTelLibraryVersion` + - `OtelStatusDescription` -> `OTelStatusDescription` +- A `IsSampled` method is added to the `SpanContext` implementation in `go.opentelemetry.io/otel/bridge/opentracing` to expose the span sampled state. + See the [README](./bridge/opentracing/README.md) for more information. (#3570) +- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/metric`. (#3738) +- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/trace`. (#3739) +- The following environment variables are supported by the periodic `Reader` in `go.opentelemetry.io/otel/sdk/metric`. (#3763) + - `OTEL_METRIC_EXPORT_INTERVAL` sets the time between collections and exports. + - `OTEL_METRIC_EXPORT_TIMEOUT` sets the timeout an export is attempted. + +### Changed + +- Fall-back to `TextMapCarrier` when it's not `HttpHeader`s in `go.opentelemetry.io/otel/bridge/opentracing`. (#3679) +- The `Collect` method of the `"go.opentelemetry.io/otel/sdk/metric".Reader` interface is updated to accept the `metricdata.ResourceMetrics` value the collection will be made into. + This change is made to enable memory reuse by SDK users. (#3732) +- The `WithUnit` option in `go.opentelemetry.io/otel/sdk/metric/instrument` is updated to accept a `string` for the unit value. (#3776) + +### Fixed + +- Ensure `go.opentelemetry.io/otel` does not use generics. (#3723, #3725) +- Multi-reader `MeterProvider`s now export metrics for all readers, instead of just the first reader. (#3720, #3724) +- Remove use of deprecated `"math/rand".Seed` in `go.opentelemetry.io/otel/example/prometheus`. (#3733) +- Do not silently drop unknown schema data with `Parse` in `go.opentelemetry.io/otel/schema/v1.1`. (#3743) +- Data race issue in OTLP exporter retry mechanism. (#3755, #3756) +- Wrapping empty errors when exporting in `go.opentelemetry.io/otel/sdk/metric`. (#3698, #3772) +- Incorrect "all" and "resource" definition for schema files in `go.opentelemetry.io/otel/schema/v1.1`. (#3777) + +### Deprecated + +- The `go.opentelemetry.io/otel/metric/unit` package is deprecated. + Use the equivalent unit string instead. (#3776) + - Use `"1"` instead of `unit.Dimensionless` + - Use `"By"` instead of `unit.Bytes` + - Use `"ms"` instead of `unit.Milliseconds` + +## [1.13.0/0.36.0] 2023-02-07 + +### Added + +- Attribute `KeyValue` creations functions to `go.opentelemetry.io/otel/semconv/v1.17.0` for all non-enum semantic conventions. + These functions ensure semantic convention type correctness. (#3675) + +### Fixed + +- Removed the `http.target` attribute from being added by `ServerRequest` in the following packages. (#3687) + - `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.14.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.15.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.16.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.17.0/httpconv` + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncint64` package is removed. (#3631) + +## [1.12.0/0.35.0] 2023-01-28 + +### Added + +- The `WithInt64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. + This options is used to configure `int64` Observer callbacks during their creation. (#3507) +- The `WithFloat64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. + This options is used to configure `float64` Observer callbacks during their creation. (#3507) +- The `Producer` interface and `Reader.RegisterProducer(Producer)` to `go.opentelemetry.io/otel/sdk/metric`. + These additions are used to enable external metric Producers. (#3524) +- The `Callback` function type to `go.opentelemetry.io/otel/metric`. + This new named function type is registered with a `Meter`. (#3564) +- The `go.opentelemetry.io/otel/semconv/v1.13.0` package. + The package contains semantic conventions from the `v1.13.0` version of the OpenTelemetry specification. (#3499) + - The `EndUserAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientRequest` and `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPAttributesFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientResponse` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPClientAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPServerAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPServerMetricAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `NetAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `Transport` in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` and `ClientRequest` or `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `SpanStatusFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `SpanStatusFromHTTPStatusCodeAndSpanKind` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `ClientStatus` and `ServerStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `Client` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Conn`. + - The `Server` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Listener`. +- The `go.opentelemetry.io/otel/semconv/v1.14.0` package. + The package contains semantic conventions from the `v1.14.0` version of the OpenTelemetry specification. (#3566) +- The `go.opentelemetry.io/otel/semconv/v1.15.0` package. + The package contains semantic conventions from the `v1.15.0` version of the OpenTelemetry specification. (#3578) +- The `go.opentelemetry.io/otel/semconv/v1.16.0` package. + The package contains semantic conventions from the `v1.16.0` version of the OpenTelemetry specification. (#3579) +- Metric instruments to `go.opentelemetry.io/otel/metric/instrument`. + These instruments are use as replacements of the depreacted `go.opentelemetry.io/otel/metric/instrument/{asyncfloat64,asyncint64,syncfloat64,syncint64}` packages.(#3575, #3586) + - `Float64ObservableCounter` replaces the `asyncfloat64.Counter` + - `Float64ObservableUpDownCounter` replaces the `asyncfloat64.UpDownCounter` + - `Float64ObservableGauge` replaces the `asyncfloat64.Gauge` + - `Int64ObservableCounter` replaces the `asyncint64.Counter` + - `Int64ObservableUpDownCounter` replaces the `asyncint64.UpDownCounter` + - `Int64ObservableGauge` replaces the `asyncint64.Gauge` + - `Float64Counter` replaces the `syncfloat64.Counter` + - `Float64UpDownCounter` replaces the `syncfloat64.UpDownCounter` + - `Float64Histogram` replaces the `syncfloat64.Histogram` + - `Int64Counter` replaces the `syncint64.Counter` + - `Int64UpDownCounter` replaces the `syncint64.UpDownCounter` + - `Int64Histogram` replaces the `syncint64.Histogram` +- `NewTracerProvider` to `go.opentelemetry.io/otel/bridge/opentracing`. + This is used to create `WrapperTracer` instances from a `TracerProvider`. (#3116) +- The `Extrema` type to `go.opentelemetry.io/otel/sdk/metric/metricdata`. + This type is used to represent min/max values and still be able to distinguish unset and zero values. (#3487) +- The `go.opentelemetry.io/otel/semconv/v1.17.0` package. + The package contains semantic conventions from the `v1.17.0` version of the OpenTelemetry specification. (#3599) + +### Changed + +- Jaeger and Zipkin exporter use `github.com/go-logr/logr` as the logging interface, and add the `WithLogr` option. (#3497, #3500) +- Instrument configuration in `go.opentelemetry.io/otel/metric/instrument` is split into specific options and confguration based on the instrument type. (#3507) + - Use the added `Int64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncint64`. + - Use the added `Float64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncfloat64`. + - Use the added `Int64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncint64`. + - Use the added `Float64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncfloat64`. +- Return a `Registration` from the `RegisterCallback` method of a `Meter` in the `go.opentelemetry.io/otel/metric` package. + This `Registration` can be used to unregister callbacks. (#3522) +- Global error handler uses an atomic value instead of a mutex. (#3543) +- Add `NewMetricProducer` to `go.opentelemetry.io/otel/bridge/opencensus`, which can be used to pass OpenCensus metrics to an OpenTelemetry Reader. (#3541) +- Global logger uses an atomic value instead of a mutex. (#3545) +- The `Shutdown` method of the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` releases all computational resources when called the first time. (#3551) +- The `Sampler` returned from `TraceIDRatioBased` `go.opentelemetry.io/otel/sdk/trace` now uses the rightmost bits for sampling decisions. + This fixes random sampling when using ID generators like `xray.IDGenerator` and increasing parity with other language implementations. (#3557) +- Errors from `go.opentelemetry.io/otel/exporters/otlp/otlptrace` exporters are wrapped in erros identifying their signal name. + Existing users of the exporters attempting to identify specific errors will need to use `errors.Unwrap()` to get the underlying error. (#3516) +- Exporters from `go.opentelemetry.io/otel/exporters/otlp` will print the final retryable error message when attempts to retry time out. (#3514) +- The instrument kind names in `go.opentelemetry.io/otel/sdk/metric` are updated to match the API. (#3562) + - `InstrumentKindSyncCounter` is renamed to `InstrumentKindCounter` + - `InstrumentKindSyncUpDownCounter` is renamed to `InstrumentKindUpDownCounter` + - `InstrumentKindSyncHistogram` is renamed to `InstrumentKindHistogram` + - `InstrumentKindAsyncCounter` is renamed to `InstrumentKindObservableCounter` + - `InstrumentKindAsyncUpDownCounter` is renamed to `InstrumentKindObservableUpDownCounter` + - `InstrumentKindAsyncGauge` is renamed to `InstrumentKindObservableGauge` +- The `RegisterCallback` method of the `Meter` in `go.opentelemetry.io/otel/metric` changed. + - The named `Callback` replaces the inline function parameter. (#3564) + - `Callback` is required to return an error. (#3576) + - `Callback` accepts the added `Observer` parameter added. + This new parameter is used by `Callback` implementations to observe values for asynchronous instruments instead of calling the `Observe` method of the instrument directly. (#3584) + - The slice of `instrument.Asynchronous` is now passed as a variadic argument. (#3587) +- The exporter from `go.opentelemetry.io/otel/exporters/zipkin` is updated to use the `v1.16.0` version of semantic conventions. + This means it no longer uses the removed `net.peer.ip` or `http.host` attributes to determine the remote endpoint. + Instead it uses the `net.sock.peer` attributes. (#3581) +- The `Min` and `Max` fields of the `HistogramDataPoint` in `go.opentelemetry.io/otel/sdk/metric/metricdata` are now defined with the added `Extrema` type instead of a `*float64`. (#3487) + +### Fixed + +- Asynchronous instruments that use sum aggregators and attribute filters correctly add values from equivalent attribute sets that have been filtered. (#3439, #3549) +- The `RegisterCallback` method of the `Meter` from `go.opentelemetry.io/otel/sdk/metric` only registers a callback for instruments created by that meter. + Trying to register a callback with instruments from a different meter will result in an error being returned. (#3584) + +### Deprecated + +- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` is deprecated. + Use `NewMetricProducer` instead. (#3541) +- The `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/syncint64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `NewWrappedTracerProvider` in `go.opentelemetry.io/otel/bridge/opentracing` is now deprecated. + Use `NewTracerProvider` instead. (#3116) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/sdk/metric/view` package is removed. (#3520) +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncint64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Int64ObservableCounter` + - The `UpDownCounter` method is replaced by `Meter.Int64ObservableUpDownCounter` + - The `Gauge` method is replaced by `Meter.Int64ObservableGauge` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncfloat64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Float64ObservableCounter` + - The `UpDownCounter` method is replaced by `Meter.Float64ObservableUpDownCounter` + - The `Gauge` method is replaced by `Meter.Float64ObservableGauge` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncint64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Int64Counter` + - The `UpDownCounter` method is replaced by `Meter.Int64UpDownCounter` + - The `Histogram` method is replaced by `Meter.Int64Histogram` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncfloat64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Float64Counter` + - The `UpDownCounter` method is replaced by `Meter.Float64UpDownCounter` + - The `Histogram` method is replaced by `Meter.Float64Histogram` + +## [1.11.2/0.34.0] 2022-12-05 + +### Added + +- The `WithView` `Option` is added to the `go.opentelemetry.io/otel/sdk/metric` package. + This option is used to configure the view(s) a `MeterProvider` will use for all `Reader`s that are registered with it. (#3387) +- Add Instrumentation Scope and Version as info metric and label in Prometheus exporter. + This can be disabled using the `WithoutScopeInfo()` option added to that package.(#3273, #3357) +- OTLP exporters now recognize: (#3363) + - `OTEL_EXPORTER_OTLP_INSECURE` + - `OTEL_EXPORTER_OTLP_TRACES_INSECURE` + - `OTEL_EXPORTER_OTLP_METRICS_INSECURE` + - `OTEL_EXPORTER_OTLP_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE` +- The `View` type and related `NewView` function to create a view according to the OpenTelemetry specification are added to `go.opentelemetry.io/otel/sdk/metric`. + These additions are replacements for the `View` type and `New` function from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) +- The `Instrument` and `InstrumentKind` type are added to `go.opentelemetry.io/otel/sdk/metric`. + These additions are replacements for the `Instrument` and `InstrumentKind` types from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) +- The `Stream` type is added to `go.opentelemetry.io/otel/sdk/metric` to define a metric data stream a view will produce. (#3459) +- The `AssertHasAttributes` allows instrument authors to test that datapoints returned have appropriate attributes. (#3487) + +### Changed + +- The `"go.opentelemetry.io/otel/sdk/metric".WithReader` option no longer accepts views to associate with the `Reader`. + Instead, views are now registered directly with the `MeterProvider` via the new `WithView` option. + The views registered with the `MeterProvider` apply to all `Reader`s. (#3387) +- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/sdk/metric".Exporter` interface. (#3260) +- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric".Client` interface. (#3260) +- The `WithTemporalitySelector` and `WithAggregationSelector` `ReaderOption`s have been changed to `ManualReaderOption`s in the `go.opentelemetry.io/otel/sdk/metric` package. (#3260) +- The periodic reader in the `go.opentelemetry.io/otel/sdk/metric` package now uses the temporality and aggregation selectors from its configured exporter instead of accepting them as options. (#3260) + +### Fixed + +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter fixes duplicated `_total` suffixes. (#3369) +- Remove comparable requirement for `Reader`s. (#3387) +- Cumulative metrics from the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) are defined as monotonic sums, instead of non-monotonic. (#3389) +- Asynchronous counters (`Counter` and `UpDownCounter`) from the metric SDK now produce delta sums when configured with delta temporality. (#3398) +- Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340) +- `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436) +- Reenabled Attribute Filters in the Metric SDK. (#3396) +- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggragation. (#3408) +- Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432) +- Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440) +- Prevent duplicate Prometheus description, unit, and type. (#3469) +- Prevents panic when using incorrect `attribute.Value.As[Type]Slice()`. (#3489) + +### Removed + +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.Client` interface is removed. (#3486) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.New` function is removed. Use the `otlpmetric[http|grpc].New` directly. (#3486) + +### Deprecated + +- The `go.opentelemetry.io/otel/sdk/metric/view` package is deprecated. + Use `Instrument`, `InstrumentKind`, `View`, and `NewView` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3476) + +## [1.11.1/0.33.0] 2022-10-19 + +### Added + +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` registers with a Prometheus registerer on creation. + By default, it will register with the default Prometheus registerer. + A non-default registerer can be used by passing the `WithRegisterer` option. (#3239) +- Added the `WithAggregationSelector` option to the `go.opentelemetry.io/otel/exporters/prometheus` package to change the default `AggregationSelector` used. (#3341) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` converts the `Resource` associated with metric exports into a `target_info` metric. (#3285) + +### Changed + +- The `"go.opentelemetry.io/otel/exporters/prometheus".New` function is updated to return an error. + It will return an error if the exporter fails to register with Prometheus. (#3239) + +### Fixed + +- The URL-encoded values from the `OTEL_RESOURCE_ATTRIBUTES` environment variable are decoded. (#2963) +- The `baggage.NewMember` function decodes the `value` parameter instead of directly using it. + This fixes the implementation to be compliant with the W3C specification. (#3226) +- Slice attributes of the `attribute` package are now comparable based on their value, not instance. (#3108 #3252) +- The `Shutdown` and `ForceFlush` methods of the `"go.opentelemetry.io/otel/sdk/trace".TraceProvider` no longer return an error when no processor is registered. (#3268) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` cumulatively sums histogram buckets. (#3281) +- The sum of each histogram data point is now uniquely exported by the `go.opentelemetry.io/otel/exporters/otlpmetric` exporters. (#3284, #3293) +- Recorded values for asynchronous counters (`Counter` and `UpDownCounter`) are interpreted as exact, not incremental, sum values by the metric SDK. (#3350, #3278) +- `UpDownCounters` are now correctly output as Prometheus gauges in the `go.opentelemetry.io/otel/exporters/prometheus` exporter. (#3358) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` no longer describes the metrics it will send to Prometheus on startup. + Instead the exporter is defined as an "unchecked" collector for Prometheus. + This fixes the `reader is not registered` warning currently emitted on startup. (#3291 #3342) +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now correctly adds `_total` suffixes to counter metrics. (#3360) +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now adds a unit suffix to metric names. + This can be disabled using the `WithoutUnits()` option added to that package. (#3352) + +## [1.11.0/0.32.3] 2022-10-12 + +### Added + +- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlptrace/otlptracehttp`). (#3261) + +### Changed + +- `span.SetStatus` has been updated such that calls that lower the status are now no-ops. (#3214) +- Upgrade `golang.org/x/sys/unix` from `v0.0.0-20210423185535-09eb48e85fd7` to `v0.0.0-20220919091848-fb04ddd9f9c8`. + This addresses [GO-2022-0493](https://pkg.go.dev/vuln/GO-2022-0493). (#3235) + +## [0.32.2] Metric SDK (Alpha) - 2022-10-11 + +### Added + +- Added an example of using metric views to customize instruments. (#3177) +- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetrichttp`). (#3261) + +### Changed + +- Flush pending measurements with the `PeriodicReader` in the `go.opentelemetry.io/otel/sdk/metric` when `ForceFlush` or `Shutdown` are called. (#3220) +- Update histogram default bounds to match the requirements of the latest specification. (#3222) +- Encode the HTTP status code in the OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`) as an integer. (#3265) + +### Fixed + +- Use default view if instrument does not match any registered view of a reader. (#3224, #3237) +- Return the same instrument every time a user makes the exact same instrument creation call. (#3229, #3251) +- Return the existing instrument when a view transforms a creation call to match an existing instrument. (#3240, #3251) +- Log a warning when a conflicting instrument (e.g. description, unit, data-type) is created instead of returning an error. (#3251) +- The OpenCensus bridge no longer sends empty batches of metrics. (#3263) + +## [0.32.1] Metric SDK (Alpha) - 2022-09-22 + +### Changed + +- The Prometheus exporter sanitizes OpenTelemetry instrument names when exporting. + Invalid characters are replaced with `_`. (#3212) + +### Added + +- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been reintroduced. (#3192) +- The OpenCensus bridge example (`go.opentelemetry.io/otel/example/opencensus`) has been reintroduced. (#3206) + +### Fixed + +- Updated go.mods to point to valid versions of the sdk. (#3216) +- Set the `MeterProvider` resource on all exported metric data. (#3218) + +## [0.32.0] Revised Metric SDK (Alpha) - 2022-09-18 + +### Changed + +- The metric SDK in `go.opentelemetry.io/otel/sdk/metric` is completely refactored to comply with the OpenTelemetry specification. + Please see the package documentation for how the new SDK is initialized and configured. (#3175) +- Update the minimum supported go version to go1.18. Removes support for go1.17 (#3179) + +### Removed + +- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been removed. + A new bridge compliant with the revised metric SDK will be added back in a future release. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/histogram` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/sum` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/basic` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/controllertest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/time` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/export/aggregation` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/export` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/metrictest` package is removed. + A replacement package that supports the new metric SDK will be added back in a future release. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/number` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/basic` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/processortest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/reducer` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/registry` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/sdkapi` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/selector/simple` package is removed, see the new metric SDK. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".ErrUninitializedInstrument` variable was removed. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".ErrBadInstrument` variable was removed. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".Accumulator` type was removed, see the `MeterProvider`in the new metric SDK. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".NewAccumulator` function was removed, see `NewMeterProvider`in the new metric SDK. (#3175) +- The deprecated `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets` function was removed. (#3175) + +## [1.10.0] - 2022-09-09 + +### Added + +- Support Go 1.19. (#3077) + Include compatibility testing and document support. (#3077) +- Support the OTLP ExportTracePartialSuccess response; these are passed to the registered error handler. (#3106) +- Upgrade go.opentelemetry.io/proto/otlp from v0.18.0 to v0.19.0 (#3107) + +### Changed + +- Fix misidentification of OpenTelemetry `SpanKind` in OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`). (#3096) +- Attempting to start a span with a nil `context` will no longer cause a panic. (#3110) +- All exporters will be shutdown even if one reports an error (#3091) +- Ensure valid UTF-8 when truncating over-length attribute values. (#3156) + +## [1.9.0/0.0.3] - 2022-08-01 + +### Added + +- Add support for Schema Files format 1.1.x (metric "split" transform) with the new `go.opentelemetry.io/otel/schema/v1.1` package. (#2999) +- Add the `go.opentelemetry.io/otel/semconv/v1.11.0` package. + The package contains semantic conventions from the `v1.11.0` version of the OpenTelemetry specification. (#3009) +- Add the `go.opentelemetry.io/otel/semconv/v1.12.0` package. + The package contains semantic conventions from the `v1.12.0` version of the OpenTelemetry specification. (#3010) +- Add the `http.method` attribute to HTTP server metric from all `go.opentelemetry.io/otel/semconv/*` packages. (#3018) + +### Fixed + +- Invalid warning for context setup being deferred in `go.opentelemetry.io/otel/bridge/opentracing` package. (#3029) + +## [1.8.0/0.31.0] - 2022-07-08 + +### Added + +- Add support for `opentracing.TextMap` format in the `Inject` and `Extract` methods +of the `"go.opentelemetry.io/otel/bridge/opentracing".BridgeTracer` type. (#2911) + +### Changed + +- The `crosslink` make target has been updated to use the `go.opentelemetry.io/build-tools/crosslink` package. (#2886) +- In the `go.opentelemetry.io/otel/sdk/instrumentation` package rename `Library` to `Scope` and alias `Library` as `Scope` (#2976) +- Move metric no-op implementation form `nonrecording` to `metric` package. (#2866) + +### Removed + +- Support for go1.16. Support is now only for go1.17 and go1.18 (#2917) + +### Deprecated + +- The `Library` struct in the `go.opentelemetry.io/otel/sdk/instrumentation` package is deprecated. + Use the equivalent `Scope` struct instead. (#2977) +- The `ReadOnlySpan.InstrumentationLibrary` method from the `go.opentelemetry.io/otel/sdk/trace` package is deprecated. + Use the equivalent `ReadOnlySpan.InstrumentationScope` method instead. (#2977) + +## [1.7.0/0.30.0] - 2022-04-28 + +### Added + +- Add the `go.opentelemetry.io/otel/semconv/v1.8.0` package. + The package contains semantic conventions from the `v1.8.0` version of the OpenTelemetry specification. (#2763) +- Add the `go.opentelemetry.io/otel/semconv/v1.9.0` package. + The package contains semantic conventions from the `v1.9.0` version of the OpenTelemetry specification. (#2792) +- Add the `go.opentelemetry.io/otel/semconv/v1.10.0` package. + The package contains semantic conventions from the `v1.10.0` version of the OpenTelemetry specification. (#2842) +- Added an in-memory exporter to metrictest to aid testing with a full SDK. (#2776) + +### Fixed + +- Globally delegated instruments are unwrapped before delegating asynchronous callbacks. (#2784) +- Remove import of `testing` package in non-tests builds of the `go.opentelemetry.io/otel` package. (#2786) + +### Changed + +- The `WithLabelEncoder` option from the `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` package is renamed to `WithAttributeEncoder`. (#2790) +- The `LabelFilterSelector` interface from `go.opentelemetry.io/otel/sdk/metric/processor/reducer` is renamed to `AttributeFilterSelector`. + The method included in the renamed interface also changed from `LabelFilterFor` to `AttributeFilterFor`. (#2790) +- The `Metadata.Labels` method from the `go.opentelemetry.io/otel/sdk/metric/export` package is renamed to `Metadata.Attributes`. + Consequentially, the `Record` type from the same package also has had the embedded method renamed. (#2790) + +### Deprecated + +- The `Iterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `Iterator.Attribute` method instead. (#2790) +- The `Iterator.IndexedLabel` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `Iterator.IndexedAttribute` method instead. (#2790) +- The `MergeIterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `MergeIterator.Attribute` method instead. (#2790) + +### Removed + +- Removed the `Batch` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864) +- Removed the `Measurement` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864) + +## [0.29.0] - 2022-04-11 + +### Added + +- The metrics global package was added back into several test files. (#2764) +- The `Meter` function is added back to the `go.opentelemetry.io/otel/metric/global` package. + This function is a convenience function equivalent to calling `global.MeterProvider().Meter(...)`. (#2750) + +### Removed + +- Removed module the `go.opentelemetry.io/otel/sdk/export/metric`. + Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2720) + +### Changed + +- Don't panic anymore when setting a global MeterProvider to itself. (#2749) +- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` from `v0.12.1` to `v0.15.0`. + This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibraryMetrics` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeMetrics`. (#2748) + +## [1.6.3] - 2022-04-07 + +### Fixed + +- Allow non-comparable global `MeterProvider`, `TracerProvider`, and `TextMapPropagator` types to be set. (#2772, #2773) + +## [1.6.2] - 2022-04-06 + +### Changed + +- Don't panic anymore when setting a global TracerProvider or TextMapPropagator to itself. (#2749) +- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace` from `v0.12.1` to `v0.15.0`. + This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibrarySpans` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeSpans`. (#2748) + +## [1.6.1] - 2022-03-28 + +### Fixed + +- The `go.opentelemetry.io/otel/schema/*` packages now use the correct schema URL for their `SchemaURL` constant. + Instead of using `"https://opentelemetry.io/schemas/v"` they now use the correct URL without a `v` prefix, `"https://opentelemetry.io/schemas/"`. (#2743, #2744) + +### Security + +- Upgrade `go.opentelemetry.io/proto/otlp` from `v0.12.0` to `v0.12.1`. + This includes an indirect upgrade of `github.com/grpc-ecosystem/grpc-gateway` which resolves [a vulnerability](https://nvd.nist.gov/vuln/detail/CVE-2019-11254) from `gopkg.in/yaml.v2` in version `v2.2.3`. (#2724, #2728) + +## [1.6.0/0.28.0] - 2022-03-23 + +### ⚠️ Notice ⚠️ + +This update is a breaking change of the unstable Metrics API. +Code instrumented with the `go.opentelemetry.io/otel/metric` will need to be modified. + +### Added + +- Add metrics exponential histogram support. + New mapping functions have been made available in `sdk/metric/aggregator/exponential/mapping` for other OpenTelemetry projects to take dependencies on. (#2502) +- Add Go 1.18 to our compatibility tests. (#2679) +- Allow configuring the Sampler with the `OTEL_TRACES_SAMPLER` and `OTEL_TRACES_SAMPLER_ARG` environment variables. (#2305, #2517) +- Add the `metric/global` for obtaining and setting the global `MeterProvider`. (#2660) + +### Changed + +- The metrics API has been significantly changed to match the revised OpenTelemetry specification. + High-level changes include: + + - Synchronous and asynchronous instruments are now handled by independent `InstrumentProvider`s. + These `InstrumentProvider`s are managed with a `Meter`. + - Synchronous and asynchronous instruments are grouped into their own packages based on value types. + - Asynchronous callbacks can now be registered with a `Meter`. + + Be sure to check out the metric module documentation for more information on how to use the revised API. (#2587, #2660) + +### Fixed + +- Fallback to general attribute limits when span specific ones are not set in the environment. (#2675, #2677) + +## [1.5.0] - 2022-03-16 + +### Added + +- Log the Exporters configuration in the TracerProviders message. (#2578) +- Added support to configure the span limits with environment variables. + The following environment variables are supported. (#2606, #2637) + - `OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT` + - `OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT` + - `OTEL_SPAN_EVENT_COUNT_LIMIT` + - `OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT` + - `OTEL_SPAN_LINK_COUNT_LIMIT` + - `OTEL_LINK_ATTRIBUTE_COUNT_LIMIT` + + If the provided environment variables are invalid (negative), the default values would be used. +- Rename the `gc` runtime name to `go` (#2560) +- Add resource container ID detection. (#2418) +- Add span attribute value length limit. + The new `AttributeValueLengthLimit` field is added to the `"go.opentelemetry.io/otel/sdk/trace".SpanLimits` type to configure this limit for a `TracerProvider`. + The default limit for this resource is "unlimited". (#2637) +- Add the `WithRawSpanLimits` option to `go.opentelemetry.io/otel/sdk/trace`. + This option replaces the `WithSpanLimits` option. + Zero or negative values will not be changed to the default value like `WithSpanLimits` does. + Setting a limit to zero will effectively disable the related resource it limits and setting to a negative value will mean that resource is unlimited. + Consequentially, limits should be constructed using `NewSpanLimits` and updated accordingly. (#2637) + +### Changed + +- Drop oldest tracestate `Member` when capacity is reached. (#2592) +- Add event and link drop counts to the exported data from the `oltptrace` exporter. (#2601) +- Unify path cleaning functionally in the `otlpmetric` and `otlptrace` configuration. (#2639) +- Change the debug message from the `sdk/trace.BatchSpanProcessor` to reflect the count is cumulative. (#2640) +- Introduce new internal `envconfig` package for OTLP exporters. (#2608) +- If `http.Request.Host` is empty, fall back to use `URL.Host` when populating `http.host` in the `semconv` packages. (#2661) + +### Fixed + +- Remove the OTLP trace exporter limit of SpanEvents when exporting. (#2616) +- Default to port `4318` instead of `4317` for the `otlpmetrichttp` and `otlptracehttp` client. (#2614, #2625) +- Unlimited span limits are now supported (negative values). (#2636, #2637) + +### Deprecated + +- Deprecated `"go.opentelemetry.io/otel/sdk/trace".WithSpanLimits`. + Use `WithRawSpanLimits` instead. + That option allows setting unlimited and zero limits, this option does not. + This option will be kept until the next major version incremented release. (#2637) + +## [1.4.1] - 2022-02-16 + +### Fixed + +- Fix race condition in reading the dropped spans number for the `BatchSpanProcessor`. (#2615) + +## [1.4.0] - 2022-02-11 + +### Added + +- Use `OTEL_EXPORTER_ZIPKIN_ENDPOINT` environment variable to specify zipkin collector endpoint. (#2490) +- Log the configuration of `TracerProvider`s, and `Tracer`s for debugging. + To enable use a logger with Verbosity (V level) `>=1`. (#2500) +- Added support to configure the batch span-processor with environment variables. + The following environment variables are used. (#2515) + - `OTEL_BSP_SCHEDULE_DELAY` + - `OTEL_BSP_EXPORT_TIMEOUT` + - `OTEL_BSP_MAX_QUEUE_SIZE`. + - `OTEL_BSP_MAX_EXPORT_BATCH_SIZE` + +### Changed + +- Zipkin exporter exports `Resource` attributes in the `Tags` field. (#2589) + +### Deprecated + +- Deprecate module the `go.opentelemetry.io/otel/sdk/export/metric`. + Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2382) +- Deprecate `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets`. (#2445) + +### Fixed + +- Fixed the instrument kind for noop async instruments to correctly report an implementation. (#2461) +- Fix UDP packets overflowing with Jaeger payloads. (#2489, #2512) +- Change the `otlpmetric.Client` interface's `UploadMetrics` method to accept a single `ResourceMetrics` instead of a slice of them. (#2491) +- Specify explicit buckets in Prometheus example, fixing issue where example only has `+inf` bucket. (#2419, #2493) +- W3C baggage will now decode urlescaped values. (#2529) +- Baggage members are now only validated once, when calling `NewMember` and not also when adding it to the baggage itself. (#2522) +- The order attributes are dropped from spans in the `go.opentelemetry.io/otel/sdk/trace` package when capacity is reached is fixed to be in compliance with the OpenTelemetry specification. + Instead of dropping the least-recently-used attribute, the last added attribute is dropped. + This drop order still only applies to attributes with unique keys not already contained in the span. + If an attribute is added with a key already contained in the span, that attribute is updated to the new value being added. (#2576) + +### Removed + +- Updated `go.opentelemetry.io/proto/otlp` from `v0.11.0` to `v0.12.0`. This version removes a number of deprecated methods. (#2546) + - [`Metric.GetIntGauge()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntGauge) + - [`Metric.GetIntHistogram()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntHistogram) + - [`Metric.GetIntSum()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntSum) + +## [1.3.0] - 2021-12-10 + +### ⚠️ Notice ⚠️ + +We have updated the project minimum supported Go version to 1.16 + +### Added + +- Added an internal Logger. + This can be used by the SDK and API to provide users with feedback of the internal state. + To enable verbose logs configure the logger which will print V(1) logs. For debugging information configure to print V(5) logs. (#2343) +- Add the `WithRetry` `Option` and the `RetryConfig` type to the `go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp` package to specify retry behavior consistently. (#2425) +- Add `SpanStatusFromHTTPStatusCodeAndSpanKind` to all `semconv` packages to return a span status code similar to `SpanStatusFromHTTPStatusCode`, but exclude `4XX` HTTP errors as span errors if the span is of server kind. (#2296) + +### Changed + +- The `"go.opentelemetry.io/otel/exporter/otel/otlptrace/otlptracegrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2329) +- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2425) +- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".RetrySettings` type is renamed to `RetryConfig`. (#2425) +- The `go.opentelemetry.io/otel/exporter/otel/*` gRPC exporters now default to using the host's root CA set if none are provided by the user and `WithInsecure` is not specified. (#2432) +- Change `resource.Default` to be evaluated the first time it is called, rather than on import. This allows the caller the option to update `OTEL_RESOURCE_ATTRIBUTES` first, such as with `os.Setenv`. (#2371) + +### Fixed + +- The `go.opentelemetry.io/otel/exporter/otel/*` exporters are updated to handle per-signal and universal endpoints according to the OpenTelemetry specification. + Any per-signal endpoint set via an `OTEL_EXPORTER_OTLP__ENDPOINT` environment variable is now used without modification of the path. + When `OTEL_EXPORTER_OTLP_ENDPOINT` is set, if it contains a path, that path is used as a base path which per-signal paths are appended to. (#2433) +- Basic metric controller updated to use sync.Map to avoid blocking calls (#2381) +- The `go.opentelemetry.io/otel/exporter/jaeger` correctly sets the `otel.status_code` value to be a string of `ERROR` or `OK` instead of an integer code. (#2439, #2440) + +### Deprecated + +- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithMaxAttempts` `Option`, use the new `WithRetry` `Option` instead. (#2425) +- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithBackoff` `Option`, use the new `WithRetry` `Option` instead. (#2425) + +### Removed + +- Remove the metric Processor's ability to convert cumulative to delta aggregation temporality. (#2350) +- Remove the metric Bound Instruments interface and implementations. (#2399) +- Remove the metric MinMaxSumCount kind aggregation and the corresponding OTLP export path. (#2423) +- Metric SDK removes the "exact" aggregator for histogram instruments, as it performed a non-standard aggregation for OTLP export (creating repeated Gauge points) and worked its way into a number of confusing examples. (#2348) + +## [1.2.0] - 2021-11-12 + +### Changed + +- Metric SDK `export.ExportKind`, `export.ExportKindSelector` types have been renamed to `aggregation.Temporality` and `aggregation.TemporalitySelector` respectively to keep in line with current specification and protocol along with built-in selectors (e.g., `aggregation.CumulativeTemporalitySelector`, ...). (#2274) +- The Metric `Exporter` interface now requires a `TemporalitySelector` method instead of an `ExportKindSelector`. (#2274) +- Metrics API cleanup. The `metric/sdkapi` package has been created to relocate the API-to-SDK interface: + - The following interface types simply moved from `metric` to `metric/sdkapi`: `Descriptor`, `MeterImpl`, `InstrumentImpl`, `SyncImpl`, `BoundSyncImpl`, `AsyncImpl`, `AsyncRunner`, `AsyncSingleRunner`, and `AsyncBatchRunner` + - The following struct types moved and are replaced with type aliases, since they are exposed to the user: `Observation`, `Measurement`. + - The No-op implementations of sync and async instruments are no longer exported, new functions `sdkapi.NewNoopAsyncInstrument()` and `sdkapi.NewNoopSyncInstrument()` are provided instead. (#2271) +- Update the SDK `BatchSpanProcessor` to export all queued spans when `ForceFlush` is called. (#2080, #2335) + +### Added + +- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002) +- Added a new `schema` module to help parse Schema Files in OTEP 0152 format. (#2267) +- Added a new `MapCarrier` to the `go.opentelemetry.io/otel/propagation` package to hold propagated cross-cutting concerns as a `map[string]string` held in memory. (#2334) + +## [1.1.0] - 2021-10-27 + +### Added + +- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002) +- Add the `go.opentelemetry.io/otel/semconv/v1.7.0` package. + The package contains semantic conventions from the `v1.7.0` version of the OpenTelemetry specification. (#2320) +- Add the `go.opentelemetry.io/otel/semconv/v1.6.1` package. + The package contains semantic conventions from the `v1.6.1` version of the OpenTelemetry specification. (#2321) +- Add the `go.opentelemetry.io/otel/semconv/v1.5.0` package. + The package contains semantic conventions from the `v1.5.0` version of the OpenTelemetry specification. (#2322) + - When upgrading from the `semconv/v1.4.0` package note the following name changes: + - `K8SReplicasetUIDKey` -> `K8SReplicaSetUIDKey` + - `K8SReplicasetNameKey` -> `K8SReplicaSetNameKey` + - `K8SStatefulsetUIDKey` -> `K8SStatefulSetUIDKey` + - `k8SStatefulsetNameKey` -> `K8SStatefulSetNameKey` + - `K8SDaemonsetUIDKey` -> `K8SDaemonSetUIDKey` + - `K8SDaemonsetNameKey` -> `K8SDaemonSetNameKey` + +### Changed + +- Links added to a span will be dropped by the SDK if they contain an invalid span context (#2275). + +### Fixed + +- The `"go.opentelemetry.io/otel/semconv/v1.4.0".HTTPServerAttributesFromHTTPRequest` now correctly only sets the HTTP client IP attribute even if the connection was routed with proxies and there are multiple addresses in the `X-Forwarded-For` header. (#2282, #2284) +- The `"go.opentelemetry.io/otel/semconv/v1.4.0".NetAttributesFromHTTPRequest` function correctly handles IPv6 addresses as IP addresses and sets the correct net peer IP instead of the net peer hostname attribute. (#2283, #2285) +- The simple span processor shutdown method deterministically returns the exporter error status if it simultaneously finishes when the deadline is reached. (#2290, #2289) + +## [1.0.1] - 2021-10-01 + +### Fixed + +- json stdout exporter no longer crashes due to concurrency bug. (#2265) + +## [Metrics 0.24.0] - 2021-10-01 + +### Changed + +- NoopMeterProvider is now private and NewNoopMeterProvider must be used to obtain a noopMeterProvider. (#2237) +- The Metric SDK `Export()` function takes a new two-level reader interface for iterating over results one instrumentation library at a time. (#2197) + - The former `"go.opentelemetry.io/otel/sdk/export/metric".CheckpointSet` is renamed `Reader`. + - The new interface is named `"go.opentelemetry.io/otel/sdk/export/metric".InstrumentationLibraryReader`. + +## [1.0.0] - 2021-09-20 + +This is the first stable release for the project. +This release includes an API and SDK for the tracing signal that will comply with the stability guarantees defined by the projects [versioning policy](./VERSIONING.md). + +### Added + +- OTLP trace exporter now sets the `SchemaURL` field in the exported telemetry if the Tracer has `WithSchemaURL` option. (#2242) + +### Fixed + +- Slice-valued attributes can correctly be used as map keys. (#2223) + +### Removed + +- Removed the `"go.opentelemetry.io/otel/exporters/zipkin".WithSDKOptions` function. (#2248) +- Removed the deprecated package `go.opentelemetry.io/otel/oteltest`. (#2234) +- Removed the deprecated package `go.opentelemetry.io/otel/bridge/opencensus/utils`. (#2233) +- Removed deprecated functions, types, and methods from `go.opentelemetry.io/otel/attribute` package. + Use the typed functions and methods added to the package instead. (#2235) + - The `Key.Array` method is removed. + - The `Array` function is removed. + - The `Any` function is removed. + - The `ArrayValue` function is removed. + - The `AsArray` function is removed. + +## [1.0.0-RC3] - 2021-09-02 + +### Added + +- Added `ErrorHandlerFunc` to use a function as an `"go.opentelemetry.io/otel".ErrorHandler`. (#2149) +- Added `"go.opentelemetry.io/otel/trace".WithStackTrace` option to add a stack trace when using `span.RecordError` or when panic is handled in `span.End`. (#2163) +- Added typed slice attribute types and functionality to the `go.opentelemetry.io/otel/attribute` package to replace the existing array type and functions. (#2162) + - `BoolSlice`, `IntSlice`, `Int64Slice`, `Float64Slice`, and `StringSlice` replace the use of the `Array` function in the package. +- Added the `go.opentelemetry.io/otel/example/fib` example package. + Included is an example application that computes Fibonacci numbers. (#2203) + +### Changed + +- Metric instruments have been renamed to match the (feature-frozen) metric API specification: + - ValueRecorder becomes Histogram + - ValueObserver becomes Gauge + - SumObserver becomes CounterObserver + - UpDownSumObserver becomes UpDownCounterObserver + The API exported from this project is still considered experimental. (#2202) +- Metric SDK/API implementation type `InstrumentKind` moves into `sdkapi` sub-package. (#2091) +- The Metrics SDK export record no longer contains a Resource pointer, the SDK `"go.opentelemetry.io/otel/sdk/trace/export/metric".Exporter.Export()` function for push-based exporters now takes a single Resource argument, pull-based exporters use `"go.opentelemetry.io/otel/sdk/metric/controller/basic".Controller.Resource()`. (#2120) +- The JSON output of the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` is harmonized now such that the output is "plain" JSON objects after each other of the form `{ ... } { ... } { ... }`. Earlier the JSON objects describing a span were wrapped in a slice for each `Exporter.ExportSpans` call, like `[ { ... } ][ { ... } { ... } ]`. Outputting JSON object directly after each other is consistent with JSON loggers, and a bit easier to parse and read. (#2196) +- Update the `NewTracerConfig`, `NewSpanStartConfig`, `NewSpanEndConfig`, and `NewEventConfig` function in the `go.opentelemetry.io/otel/trace` package to return their respective configurations as structs instead of pointers to the struct. (#2212) + +### Deprecated + +- The `go.opentelemetry.io/otel/bridge/opencensus/utils` package is deprecated. + All functionality from this package now exists in the `go.opentelemetry.io/otel/bridge/opencensus` package. + The functions from that package should be used instead. (#2166) +- The `"go.opentelemetry.io/otel/attribute".Array` function and the related `ARRAY` value type is deprecated. + Use the typed `*Slice` functions and types added to the package instead. (#2162) +- The `"go.opentelemetry.io/otel/attribute".Any` function is deprecated. + Use the typed functions instead. (#2181) +- The `go.opentelemetry.io/otel/oteltest` package is deprecated. + The `"go.opentelemetry.io/otel/sdk/trace/tracetest".SpanRecorder` can be registered with the default SDK (`go.opentelemetry.io/otel/sdk/trace`) as a `SpanProcessor` and used as a replacement for this deprecated package. (#2188) + +### Removed + +- Removed metrics test package `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#2105) + +### Fixed + +- The `fromEnv` detector no longer throws an error when `OTEL_RESOURCE_ATTRIBUTES` environment variable is not set or empty. (#2138) +- Setting the global `ErrorHandler` with `"go.opentelemetry.io/otel".SetErrorHandler` multiple times is now supported. (#2160, #2140) +- The `"go.opentelemetry.io/otel/attribute".Any` function now supports `int32` values. (#2169) +- Multiple calls to `"go.opentelemetry.io/otel/sdk/metric/controller/basic".WithResource()` are handled correctly, and when no resources are provided `"go.opentelemetry.io/otel/sdk/resource".Default()` is used. (#2120) +- The `WithoutTimestamps` option for the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter causes the exporter to correctly ommit timestamps. (#2195) +- Fixed typos in resources.go. (#2201) + +## [1.0.0-RC2] - 2021-07-26 + +### Added + +- Added `WithOSDescription` resource configuration option to set OS (Operating System) description resource attribute (`os.description`). (#1840) +- Added `WithOS` resource configuration option to set all OS (Operating System) resource attributes at once. (#1840) +- Added the `WithRetry` option to the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package. + This option is a replacement for the removed `WithMaxAttempts` and `WithBackoff` options. (#2095) +- Added API `LinkFromContext` to return Link which encapsulates SpanContext from provided context and also encapsulates attributes. (#2115) +- Added a new `Link` type under the SDK `otel/sdk/trace` package that counts the number of attributes that were dropped for surpassing the `AttributePerLinkCountLimit` configured in the Span's `SpanLimits`. + This new type replaces the equal-named API `Link` type found in the `otel/trace` package for most usages within the SDK. + For example, instances of this type are now returned by the `Links()` function of `ReadOnlySpan`s provided in places like the `OnEnd` function of `SpanProcessor` implementations. (#2118) +- Added the `SpanRecorder` type to the `go.opentelemetry.io/otel/skd/trace/tracetest` package. + This type can be used with the default SDK as a `SpanProcessor` during testing. (#2132) + +### Changed + +- The `SpanModels` function is now exported from the `go.opentelemetry.io/otel/exporters/zipkin` package to convert OpenTelemetry spans into Zipkin model spans. (#2027) +- Rename the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".RetrySettings` to `RetryConfig`. (#2095) + +### Deprecated + +- The `TextMapCarrier` and `TextMapPropagator` from the `go.opentelemetry.io/otel/oteltest` package and their associated creation functions (`TextMapCarrier`, `NewTextMapPropagator`) are deprecated. (#2114) +- The `Harness` type from the `go.opentelemetry.io/otel/oteltest` package and its associated creation function, `NewHarness` are deprecated and will be removed in the next release. (#2123) +- The `TraceStateFromKeyValues` function from the `go.opentelemetry.io/otel/oteltest` package is deprecated. + Use the `trace.ParseTraceState` function instead. (#2122) + +### Removed + +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/jaeger`. (#2020) +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/zipkin`. (#2020) +- Removed the `"go.opentelemetry.io/otel/sdk/resource".WithBuiltinDetectors` function. + The explicit `With*` options for every built-in detector should be used instead. (#2026 #2097) +- Removed the `WithMaxAttempts` and `WithBackoff` options from the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package. + The retry logic of the package has been updated to match the `otlptracegrpc` package and accordingly a `WithRetry` option is added that should be used instead. (#2095) +- Removed `DroppedAttributeCount` field from `otel/trace.Link` struct. (#2118) + +### Fixed + +- When using WithNewRoot, don't use the parent context for making sampling decisions. (#2032) +- `oteltest.Tracer` now creates a valid `SpanContext` when using `WithNewRoot`. (#2073) +- OS type detector now sets the correct `dragonflybsd` value for DragonFly BSD. (#2092) +- The OTel span status is correctly transformed into the OTLP status in the `go.opentelemetry.io/otel/exporters/otlp/otlptrace` package. + This fix will by default set the status to `Unset` if it is not explicitly set to `Ok` or `Error`. (#2099 #2102) +- The `Inject` method for the `"go.opentelemetry.io/otel/propagation".TraceContext` type no longer injects empty `tracestate` values. (#2108) +- Use `6831` as default Jaeger agent port instead of `6832`. (#2131) + +## [Experimental Metrics v0.22.0] - 2021-07-19 + +### Added + +- Adds HTTP support for OTLP metrics exporter. (#2022) + +### Removed + +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/metric/prometheus`. (#2020) + +## [1.0.0-RC1] / 0.21.0 - 2021-06-18 + +With this release we are introducing a split in module versions. The tracing API and SDK are entering the `v1.0.0` Release Candidate phase with `v1.0.0-RC1` +while the experimental metrics API and SDK continue with `v0.x` releases at `v0.21.0`. Modules at major version 1 or greater will not depend on modules +with major version 0. + +### Added + +- Adds `otlpgrpc.WithRetry`option for configuring the retry policy for transient errors on the otlp/gRPC exporter. (#1832) + - The following status codes are defined as transient errors: + | gRPC Status Code | Description | + | ---------------- | ----------- | + | 1 | Cancelled | + | 4 | Deadline Exceeded | + | 8 | Resource Exhausted | + | 10 | Aborted | + | 10 | Out of Range | + | 14 | Unavailable | + | 15 | Data Loss | +- Added `Status` type to the `go.opentelemetry.io/otel/sdk/trace` package to represent the status of a span. (#1874) +- Added `SpanStub` type and its associated functions to the `go.opentelemetry.io/otel/sdk/trace/tracetest` package. + This type can be used as a testing replacement for the `SpanSnapshot` that was removed from the `go.opentelemetry.io/otel/sdk/trace` package. (#1873) +- Adds support for scheme in `OTEL_EXPORTER_OTLP_ENDPOINT` according to the spec. (#1886) +- Adds `trace.WithSchemaURL` option for configuring the tracer with a Schema URL. (#1889) +- Added an example of using OpenTelemetry Go as a trace context forwarder. (#1912) +- `ParseTraceState` is added to the `go.opentelemetry.io/otel/trace` package. + It can be used to decode a `TraceState` from a `tracestate` header string value. (#1937) +- Added `Len` method to the `TraceState` type in the `go.opentelemetry.io/otel/trace` package. + This method returns the number of list-members the `TraceState` holds. (#1937) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace` that defines a trace exporter that uses a `otlptrace.Client` to send data. + Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` implementing a gRPC `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing .(#1922) +- Added `Baggage`, `Member`, and `Property` types to the `go.opentelemetry.io/otel/baggage` package along with their related functions. (#1967) +- Added `ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext` functions to the `go.opentelemetry.io/otel/baggage` package. + These functions replace the `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions from that package and directly work with the new `Baggage` type. (#1967) +- The `OTEL_SERVICE_NAME` environment variable is the preferred source for `service.name`, used by the environment resource detector if a service name is present both there and in `OTEL_RESOURCE_ATTRIBUTES`. (#1969) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` implementing an HTTP `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing. (#1963) +- Changes `go.opentelemetry.io/otel/sdk/resource.NewWithAttributes` to require a schema URL. The old function is still available as `resource.NewSchemaless`. This is a breaking change. (#1938) +- Several builtin resource detectors now correctly populate the schema URL. (#1938) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` that defines a metrics exporter that uses a `otlpmetric.Client` to send data. +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` implementing a gRPC `otlpmetric.Client` and offers convenience functions, `New` and `NewUnstarted`, to create an `otlpmetric.Exporter`.(#1991) +- Added `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter. (#2005) +- Added `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` exporter. (#2005) +- Added a `TracerProvider()` method to the `"go.opentelemetry.io/otel/trace".Span` interface. This can be used to obtain a `TracerProvider` from a given span that utilizes the same trace processing pipeline. (#2009) + +### Changed + +- Make `NewSplitDriver` from `go.opentelemetry.io/otel/exporters/otlp` take variadic arguments instead of a `SplitConfig` item. + `NewSplitDriver` now automatically implements an internal `noopDriver` for `SplitConfig` fields that are not initialized. (#1798) +- `resource.New()` now creates a Resource without builtin detectors. Previous behavior is now achieved by using `WithBuiltinDetectors` Option. (#1810) +- Move the `Event` type from the `go.opentelemetry.io/otel` package to the `go.opentelemetry.io/otel/sdk/trace` package. (#1846) +- CI builds validate against last two versions of Go, dropping 1.14 and adding 1.16. (#1865) +- BatchSpanProcessor now report export failures when calling `ForceFlush()` method. (#1860) +- `Set.Encoded(Encoder)` no longer caches the result of an encoding. (#1855) +- Renamed `CloudZoneKey` to `CloudAvailabilityZoneKey` in Resource semantic conventions according to spec. (#1871) +- The `StatusCode` and `StatusMessage` methods of the `ReadOnlySpan` interface and the `Span` produced by the `go.opentelemetry.io/otel/sdk/trace` package have been replaced with a single `Status` method. + This method returns the status of a span using the new `Status` type. (#1874) +- Updated `ExportSpans` method of the`SpanExporter` interface type to accept `ReadOnlySpan`s instead of the removed `SpanSnapshot`. + This brings the export interface into compliance with the specification in that it now accepts an explicitly immutable type instead of just an implied one. (#1873) +- Unembed `SpanContext` in `Link`. (#1877) +- Generate Semantic conventions from the specification YAML. (#1891) +- Spans created by the global `Tracer` obtained from `go.opentelemetry.io/otel`, prior to a functioning `TracerProvider` being set, now propagate the span context from their parent if one exists. (#1901) +- The `"go.opentelemetry.io/otel".Tracer` function now accepts tracer options. (#1902) +- Move the `go.opentelemetry.io/otel/unit` package to `go.opentelemetry.io/otel/metric/unit`. (#1903) +- Changed `go.opentelemetry.io/otel/trace.TracerConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config.) (#1921) +- Changed `go.opentelemetry.io/otel/trace.SpanConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Changed `span.End()` now only accepts Options that are allowed at `End()`. (#1921) +- Changed `go.opentelemetry.io/otel/metric.InstrumentConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Changed `go.opentelemetry.io/otel/metric.MeterConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Refactored option types according to the contribution style guide. (#1882) +- Move the `go.opentelemetry.io/otel/trace.TraceStateFromKeyValues` function to the `go.opentelemetry.io/otel/oteltest` package. + This function is preserved for testing purposes where it may be useful to create a `TraceState` from `attribute.KeyValue`s, but it is not intended for production use. + The new `ParseTraceState` function should be used to create a `TraceState`. (#1931) +- Updated `MarshalJSON` method of the `go.opentelemetry.io/otel/trace.TraceState` type to marshal the type into the string representation of the `TraceState`. (#1931) +- The `TraceState.Delete` method from the `go.opentelemetry.io/otel/trace` package no longer returns an error in addition to a `TraceState`. (#1931) +- Updated `Get` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931) +- Updated `Insert` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a pair of `string`s instead of an `attribute.KeyValue` type. (#1931) +- Updated `Delete` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/stdout` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/metric/prometheus` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985) +- Renamed `NewUnstartedExporter` to `NewUnstarted` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985) +- The `go.opentelemetry.io/otel/semconv` package has been moved to `go.opentelemetry.io/otel/semconv/v1.4.0` to allow for multiple [telemetry schema](https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md) versions to be used concurrently. (#1987) +- Metrics test helpers in `go.opentelemetry.io/otel/oteltest` have been moved to `go.opentelemetry.io/otel/metric/metrictest`. (#1988) + +### Deprecated + +- The `go.opentelemetry.io/otel/exporters/metric/prometheus` is deprecated, use `go.opentelemetry.io/otel/exporters/prometheus` instead. (#1993) +- The `go.opentelemetry.io/otel/exporters/trace/jaeger` is deprecated, use `go.opentelemetry.io/otel/exporters/jaeger` instead. (#1993) +- The `go.opentelemetry.io/otel/exporters/trace/zipkin` is deprecated, use `go.opentelemetry.io/otel/exporters/zipkin` instead. (#1993) + +### Removed + +- Removed `resource.WithoutBuiltin()`. Use `resource.New()`. (#1810) +- Unexported types `resource.FromEnv`, `resource.Host`, and `resource.TelemetrySDK`, Use the corresponding `With*()` to use individually. (#1810) +- Removed the `Tracer` and `IsRecording` method from the `ReadOnlySpan` in the `go.opentelemetry.io/otel/sdk/trace`. + The `Tracer` method is not a required to be included in this interface and given the mutable nature of the tracer that is associated with a span, this method is not appropriate. + The `IsRecording` method returns if the span is recording or not. + A read-only span value does not need to know if updates to it will be recorded or not. + By definition, it cannot be updated so there is no point in communicating if an update is recorded. (#1873) +- Removed the `SpanSnapshot` type from the `go.opentelemetry.io/otel/sdk/trace` package. + The use of this type has been replaced with the use of the explicitly immutable `ReadOnlySpan` type. + When a concrete representation of a read-only span is needed for testing, the newly added `SpanStub` in the `go.opentelemetry.io/otel/sdk/trace/tracetest` package should be used. (#1873) +- Removed the `Tracer` method from the `Span` interface in the `go.opentelemetry.io/otel/trace` package. + Using the same tracer that created a span introduces the error where an instrumentation library's `Tracer` is used by other code instead of their own. + The `"go.opentelemetry.io/otel".Tracer` function or a `TracerProvider` should be used to acquire a library specific `Tracer` instead. (#1900) + - The `TracerProvider()` method on the `Span` interface may also be used to obtain a `TracerProvider` using the same trace processing pipeline. (#2009) +- The `http.url` attribute generated by `HTTPClientAttributesFromHTTPRequest` will no longer include username or password information. (#1919) +- Removed `IsEmpty` method of the `TraceState` type in the `go.opentelemetry.io/otel/trace` package in favor of using the added `TraceState.Len` method. (#1931) +- Removed `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions in the `go.opentelemetry.io/otel/baggage` package. + Handling of baggage is now done using the added `Baggage` type and related context functions (`ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext`) in that package. (#1967) +- The `InstallNewPipeline` and `NewExportPipeline` creation functions in all the exporters (prometheus, otlp, stdout, jaeger, and zipkin) have been removed. + These functions were deemed premature attempts to provide convenience that did not achieve this aim. (#1985) +- The `go.opentelemetry.io/otel/exporters/otlp` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace` instead. (#1990) +- The `go.opentelemetry.io/otel/exporters/stdout` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` or `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` instead. (#2005) + +### Fixed + +- Only report errors from the `"go.opentelemetry.io/otel/sdk/resource".Environment` function when they are not `nil`. (#1850, #1851) +- The `Shutdown` method of the simple `SpanProcessor` in the `go.opentelemetry.io/otel/sdk/trace` package now honors the context deadline or cancellation. (#1616, #1856) +- BatchSpanProcessor now drops span batches that failed to be exported. (#1860) +- Use `http://localhost:14268/api/traces` as default Jaeger collector endpoint instead of `http://localhost:14250`. (#1898) +- Allow trailing and leading whitespace in the parsing of a `tracestate` header. (#1931) +- Add logic to determine if the channel is closed to fix Jaeger exporter test panic with close closed channel. (#1870, #1973) +- Avoid transport security when OTLP endpoint is a Unix socket. (#2001) + +### Security + +## [0.20.0] - 2021-04-23 + +### Added + +- The OTLP exporter now has two new convenience functions, `NewExportPipeline` and `InstallNewPipeline`, setup and install the exporter in tracing and metrics pipelines. (#1373) +- Adds semantic conventions for exceptions. (#1492) +- Added Jaeger Environment variables: `OTEL_EXPORTER_JAEGER_AGENT_HOST`, `OTEL_EXPORTER_JAEGER_AGENT_PORT` + These environment variables can be used to override Jaeger agent hostname and port (#1752) +- Option `ExportTimeout` was added to batch span processor. (#1755) +- `trace.TraceFlags` is now a defined type over `byte` and `WithSampled(bool) TraceFlags` and `IsSampled() bool` methods have been added to it. (#1770) +- The `Event` and `Link` struct types from the `go.opentelemetry.io/otel` package now include a `DroppedAttributeCount` field to record the number of attributes that were not recorded due to configured limits being reached. (#1771) +- The Jaeger exporter now reports dropped attributes for a Span event in the exported log. (#1771) +- Adds test to check BatchSpanProcessor ignores `OnEnd` and `ForceFlush` post `Shutdown`. (#1772) +- Extract resource attributes from the `OTEL_RESOURCE_ATTRIBUTES` environment variable and merge them with the `resource.Default` resource as well as resources provided to the `TracerProvider` and metric `Controller`. (#1785) +- Added `WithOSType` resource configuration option to set OS (Operating System) type resource attribute (`os.type`). (#1788) +- Added `WithProcess*` resource configuration options to set Process resource attributes. (#1788) + - `process.pid` + - `process.executable.name` + - `process.executable.path` + - `process.command_args` + - `process.owner` + - `process.runtime.name` + - `process.runtime.version` + - `process.runtime.description` +- Adds `k8s.node.name` and `k8s.node.uid` attribute keys to the `semconv` package. (#1789) +- Added support for configuring OTLP/HTTP and OTLP/gRPC Endpoints, TLS Certificates, Headers, Compression and Timeout via Environment Variables. (#1758, #1769 and #1811) + - `OTEL_EXPORTER_OTLP_ENDPOINT` + - `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` + - `OTEL_EXPORTER_OTLP_METRICS_ENDPOINT` + - `OTEL_EXPORTER_OTLP_HEADERS` + - `OTEL_EXPORTER_OTLP_TRACES_HEADERS` + - `OTEL_EXPORTER_OTLP_METRICS_HEADERS` + - `OTEL_EXPORTER_OTLP_COMPRESSION` + - `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION` + - `OTEL_EXPORTER_OTLP_METRICS_COMPRESSION` + - `OTEL_EXPORTER_OTLP_TIMEOUT` + - `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` + - `OTEL_EXPORTER_OTLP_METRICS_TIMEOUT` + - `OTEL_EXPORTER_OTLP_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE` +- Adds `otlpgrpc.WithTimeout` option for configuring timeout to the otlp/gRPC exporter. (#1821) +- Adds `jaeger.WithMaxPacketSize` option for configuring maximum UDP packet size used when connecting to the Jaeger agent. (#1853) + +### Fixed + +- The `Span.IsRecording` implementation from `go.opentelemetry.io/otel/sdk/trace` always returns false when not being sampled. (#1750) +- The Jaeger exporter now correctly sets tags for the Span status code and message. + This means it uses the correct tag keys (`"otel.status_code"`, `"otel.status_description"`) and does not set the status message as a tag unless it is set on the span. (#1761) +- The Jaeger exporter now correctly records Span event's names using the `"event"` key for a tag. + Additionally, this tag is overridden, as specified in the OTel specification, if the event contains an attribute with that key. (#1768) +- Zipkin Exporter: Ensure mapping between OTel and Zipkin span data complies with the specification. (#1688) +- Fixed typo for default service name in Jaeger Exporter. (#1797) +- Fix flaky OTLP for the reconnnection of the client connection. (#1527, #1814) +- Fix Jaeger exporter dropping of span batches that exceed the UDP packet size limit. + Instead, the exporter now splits the batch into smaller sendable batches. (#1828) + +### Changed + +- Span `RecordError` now records an `exception` event to comply with the semantic convention specification. (#1492) +- Jaeger exporter was updated to use thrift v0.14.1. (#1712) +- Migrate from using internally built and maintained version of the OTLP to the one hosted at `go.opentelemetry.io/proto/otlp`. (#1713) +- Migrate from using `github.com/gogo/protobuf` to `google.golang.org/protobuf` to match `go.opentelemetry.io/proto/otlp`. (#1713) +- The storage of a local or remote Span in a `context.Context` using its SpanContext is unified to store just the current Span. + The Span's SpanContext can now self-identify as being remote or not. + This means that `"go.opentelemetry.io/otel/trace".ContextWithRemoteSpanContext` will now overwrite any existing current Span, not just existing remote Spans, and make it the current Span in a `context.Context`. (#1731) +- Improve OTLP/gRPC exporter connection errors. (#1737) +- Information about a parent span context in a `"go.opentelemetry.io/otel/export/trace".SpanSnapshot` is unified in a new `Parent` field. + The existing `ParentSpanID` and `HasRemoteParent` fields are removed in favor of this. (#1748) +- The `ParentContext` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is updated to hold a `context.Context` containing the parent span. + This changes it to make `SamplingParameters` conform with the OpenTelemetry specification. (#1749) +- Updated Jaeger Environment Variables: `JAEGER_ENDPOINT`, `JAEGER_USER`, `JAEGER_PASSWORD` + to `OTEL_EXPORTER_JAEGER_ENDPOINT`, `OTEL_EXPORTER_JAEGER_USER`, `OTEL_EXPORTER_JAEGER_PASSWORD` in compliance with OTel specification. (#1752) +- Modify `BatchSpanProcessor.ForceFlush` to abort after timeout/cancellation. (#1757) +- The `DroppedAttributeCount` field of the `Span` in the `go.opentelemetry.io/otel` package now only represents the number of attributes dropped for the span itself. + It no longer is a conglomerate of itself, events, and link attributes that have been dropped. (#1771) +- Make `ExportSpans` in Jaeger Exporter honor context deadline. (#1773) +- Modify Zipkin Exporter default service name, use default resource's serviceName instead of empty. (#1777) +- The `go.opentelemetry.io/otel/sdk/export/trace` package is merged into the `go.opentelemetry.io/otel/sdk/trace` package. (#1778) +- The prometheus.InstallNewPipeline example is moved from comment to example test (#1796) +- The convenience functions for the stdout exporter have been updated to return the `TracerProvider` implementation and enable the shutdown of the exporter. (#1800) +- Replace the flush function returned from the Jaeger exporter's convenience creation functions (`InstallNewPipeline` and `NewExportPipeline`) with the `TracerProvider` implementation they create. + This enables the caller to shutdown and flush using the related `TracerProvider` methods. (#1822) +- Updated the Jaeger exporter to have a default endpoint, `http://localhost:14250`, for the collector. (#1824) +- Changed the function `WithCollectorEndpoint` in the Jaeger exporter to no longer accept an endpoint as an argument. + The endpoint can be passed with the `CollectorEndpointOption` using the `WithEndpoint` function or by setting the `OTEL_EXPORTER_JAEGER_ENDPOINT` environment variable value appropriately. (#1824) +- The Jaeger exporter no longer batches exported spans itself, instead it relies on the SDK's `BatchSpanProcessor` for this functionality. (#1830) +- The Jaeger exporter creation functions (`NewRawExporter`, `NewExportPipeline`, and `InstallNewPipeline`) no longer accept the removed `Option` type as a variadic argument. (#1830) + +### Removed + +- Removed Jaeger Environment variables: `JAEGER_SERVICE_NAME`, `JAEGER_DISABLED`, `JAEGER_TAGS` + These environment variables will no longer be used to override values of the Jaeger exporter (#1752) +- No longer set the links for a `Span` in `go.opentelemetry.io/otel/sdk/trace` that is configured to be a new root. + This is unspecified behavior that the OpenTelemetry community plans to standardize in the future. + To prevent backwards incompatible changes when it is specified, these links are removed. (#1726) +- Setting error status while recording error with Span from oteltest package. (#1729) +- The concept of a remote and local Span stored in a context is unified to just the current Span. + Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed. + Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContex` can be used to return the current Span. + If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731) +- The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed. + This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749) +- The `trace.FlagsDebug` and `trace.FlagsDeferred` constants have been removed and will be localized to the B3 propagator. (#1770) +- Remove `Process` configuration, `WithProcessFromEnv` and `ProcessFromEnv`, and type from the Jaeger exporter package. + The information that could be configured in the `Process` struct should be configured in a `Resource` instead. (#1776, #1804) +- Remove the `WithDisabled` option from the Jaeger exporter. + To disable the exporter unregister it from the `TracerProvider` or use a no-operation `TracerProvider`. (#1806) +- Removed the functions `CollectorEndpointFromEnv` and `WithCollectorEndpointOptionFromEnv` from the Jaeger exporter. + These functions for retrieving specific environment variable values are redundant of other internal functions and + are not intended for end user use. (#1824) +- Removed the Jaeger exporter `WithSDKOptions` `Option`. + This option was used to set SDK options for the exporter creation convenience functions. + These functions are provided as a way to easily setup or install the exporter with what are deemed reasonable SDK settings for common use cases. + If the SDK needs to be configured differently, the `NewRawExporter` function and direct setup of the SDK with the desired settings should be used. (#1825) +- The `WithBufferMaxCount` and `WithBatchMaxCount` `Option`s from the Jaeger exporter are removed. + The exporter no longer batches exports, instead relying on the SDK's `BatchSpanProcessor` for this functionality. (#1830) +- The Jaeger exporter `Option` type is removed. + The type is no longer used by the exporter to configure anything. + All the previous configurations these options provided were duplicates of SDK configuration. + They have been removed in favor of using the SDK configuration and focuses the exporter configuration to be only about the endpoints it will send telemetry to. (#1830) + +## [0.19.0] - 2021-03-18 + +### Added + +- Added `Marshaler` config option to `otlphttp` to enable otlp over json or protobufs. (#1586) +- A `ForceFlush` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` to flush all registered `SpanProcessor`s. (#1608) +- Added `WithSampler` and `WithSpanLimits` to tracer provider. (#1633, #1702) +- `"go.opentelemetry.io/otel/trace".SpanContext` now has a `remote` property, and `IsRemote()` predicate, that is true when the `SpanContext` has been extracted from remote context data. (#1701) +- A `Valid` method to the `"go.opentelemetry.io/otel/attribute".KeyValue` type. (#1703) + +### Changed + +- `trace.SpanContext` is now immutable and has no exported fields. (#1573) + - `trace.NewSpanContext()` can be used in conjunction with the `trace.SpanContextConfig` struct to initialize a new `SpanContext` where all values are known. +- Update the `ForceFlush` method signature to the `"go.opentelemetry.io/otel/sdk/trace".SpanProcessor` to accept a `context.Context` and return an error. (#1608) +- Update the `Shutdown` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` return an error on shutdown failure. (#1608) +- The SimpleSpanProcessor will now shut down the enclosed `SpanExporter` and gracefully ignore subsequent calls to `OnEnd` after `Shutdown` is called. (#1612) +- `"go.opentelemetry.io/sdk/metric/controller.basic".WithPusher` is replaced with `WithExporter` to provide consistent naming across project. (#1656) +- Added non-empty string check for trace `Attribute` keys. (#1659) +- Add `description` to SpanStatus only when `StatusCode` is set to error. (#1662) +- Jaeger exporter falls back to `resource.Default`'s `service.name` if the exported Span does not have one. (#1673) +- Jaeger exporter populates Jaeger's Span Process from Resource. (#1673) +- Renamed the `LabelSet` method of `"go.opentelemetry.io/otel/sdk/resource".Resource` to `Set`. (#1692) +- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1693) +- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1693) + +### Removed + +- Removed `serviceName` parameter from Zipkin exporter and uses resource instead. (#1549) +- Removed `WithConfig` from tracer provider to avoid overriding configuration. (#1633) +- Removed the exported `SimpleSpanProcessor` and `BatchSpanProcessor` structs. + These are now returned as a SpanProcessor interface from their respective constructors. (#1638) +- Removed `WithRecord()` from `trace.SpanOption` when creating a span. (#1660) +- Removed setting status to `Error` while recording an error as a span event in `RecordError`. (#1663) +- Removed `jaeger.WithProcess` configuration option. (#1673) +- Removed `ApplyConfig` method from `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` and the now unneeded `Config` struct. (#1693) + +### Fixed + +- Jaeger Exporter: Ensure mapping between OTEL and Jaeger span data complies with the specification. (#1626) +- `SamplingResult.TraceState` is correctly propagated to a newly created span's `SpanContext`. (#1655) +- The `otel-collector` example now correctly flushes metric events prior to shutting down the exporter. (#1678) +- Do not set span status message in `SpanStatusFromHTTPStatusCode` if it can be inferred from `http.status_code`. (#1681) +- Synchronization issues in global trace delegate implementation. (#1686) +- Reduced excess memory usage by global `TracerProvider`. (#1687) + +## [0.18.0] - 2021-03-03 + +### Added + +- Added `resource.Default()` for use with meter and tracer providers. (#1507) +- `AttributePerEventCountLimit` and `AttributePerLinkCountLimit` for `SpanLimits`. (#1535) +- Added `Keys()` method to `propagation.TextMapCarrier` and `propagation.HeaderCarrier` to adapt `http.Header` to this interface. (#1544) +- Added `code` attributes to `go.opentelemetry.io/otel/semconv` package. (#1558) +- Compatibility testing suite in the CI system for the following systems. (#1567) + | OS | Go Version | Architecture | + | ------- | ---------- | ------------ | + | Ubuntu | 1.15 | amd64 | + | Ubuntu | 1.14 | amd64 | + | Ubuntu | 1.15 | 386 | + | Ubuntu | 1.14 | 386 | + | MacOS | 1.15 | amd64 | + | MacOS | 1.14 | amd64 | + | Windows | 1.15 | amd64 | + | Windows | 1.14 | amd64 | + | Windows | 1.15 | 386 | + | Windows | 1.14 | 386 | + +### Changed + +- Replaced interface `oteltest.SpanRecorder` with its existing implementation + `StandardSpanRecorder`. (#1542) +- Default span limit values to 128. (#1535) +- Rename `MaxEventsPerSpan`, `MaxAttributesPerSpan` and `MaxLinksPerSpan` to `EventCountLimit`, `AttributeCountLimit` and `LinkCountLimit`, and move these fields into `SpanLimits`. (#1535) +- Renamed the `otel/label` package to `otel/attribute`. (#1541) +- Vendor the Jaeger exporter's dependency on Apache Thrift. (#1551) +- Parallelize the CI linting and testing. (#1567) +- Stagger timestamps in exact aggregator tests. (#1569) +- Changed all examples to use `WithBatchTimeout(5 * time.Second)` rather than `WithBatchTimeout(5)`. (#1621) +- Prevent end-users from implementing some interfaces (#1575) + + ``` + "otel/exporters/otlp/otlphttp".Option + "otel/exporters/stdout".Option + "otel/oteltest".Option + "otel/trace".TracerOption + "otel/trace".SpanOption + "otel/trace".EventOption + "otel/trace".LifeCycleOption + "otel/trace".InstrumentationOption + "otel/sdk/resource".Option + "otel/sdk/trace".ParentBasedSamplerOption + "otel/sdk/trace".ReadOnlySpan + "otel/sdk/trace".ReadWriteSpan + ``` + +### Removed + +- Removed attempt to resample spans upon changing the span name with `span.SetName()`. (#1545) +- The `test-benchmark` is no longer a dependency of the `precommit` make target. (#1567) +- Removed the `test-386` make target. + This was replaced with a full compatibility testing suite (i.e. multi OS/arch) in the CI system. (#1567) + +### Fixed + +- The sequential timing check of timestamps in the stdout exporter are now setup explicitly to be sequential (#1571). (#1572) +- Windows build of Jaeger tests now compiles with OS specific functions (#1576). (#1577) +- The sequential timing check of timestamps of go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue are now setup explicitly to be sequential (#1578). (#1579) +- Validate tracestate header keys with vendors according to the W3C TraceContext specification (#1475). (#1581) +- The OTLP exporter includes related labels for translations of a GaugeArray (#1563). (#1570) + +## [0.17.0] - 2021-02-12 + +### Changed + +- Rename project default branch from `master` to `main`. (#1505) +- Reverse order in which `Resource` attributes are merged, per change in spec. (#1501) +- Add tooling to maintain "replace" directives in go.mod files automatically. (#1528) +- Create new modules: otel/metric, otel/trace, otel/oteltest, otel/sdk/export/metric, otel/sdk/metric (#1528) +- Move metric-related public global APIs from otel to otel/metric/global. (#1528) + +## Fixed + +- Fixed otlpgrpc reconnection issue. +- The example code in the README.md of `go.opentelemetry.io/otel/exporters/otlp` is moved to a compiled example test and used the new `WithAddress` instead of `WithEndpoint`. (#1513) +- The otel-collector example now uses the default OTLP receiver port of the collector. + +## [0.16.0] - 2021-01-13 + +### Added + +- Add the `ReadOnlySpan` and `ReadWriteSpan` interfaces to provide better control for accessing span data. (#1360) +- `NewGRPCDriver` function returns a `ProtocolDriver` that maintains a single gRPC connection to the collector. (#1369) +- Added documentation about the project's versioning policy. (#1388) +- Added `NewSplitDriver` for OTLP exporter that allows sending traces and metrics to different endpoints. (#1418) +- Added codeql worfklow to GitHub Actions (#1428) +- Added Gosec workflow to GitHub Actions (#1429) +- Add new HTTP driver for OTLP exporter in `exporters/otlp/otlphttp`. Currently it only supports the binary protobuf payloads. (#1420) +- Add an OpenCensus exporter bridge. (#1444) + +### Changed + +- Rename `internal/testing` to `internal/internaltest`. (#1449) +- Rename `export.SpanData` to `export.SpanSnapshot` and use it only for exporting spans. (#1360) +- Store the parent's full `SpanContext` rather than just its span ID in the `span` struct. (#1360) +- Improve span duration accuracy. (#1360) +- Migrated CI/CD from CircleCI to GitHub Actions (#1382) +- Remove duplicate checkout from GitHub Actions workflow (#1407) +- Metric `array` aggregator renamed `exact` to match its `aggregation.Kind` (#1412) +- Metric `exact` aggregator includes per-point timestamps (#1412) +- Metric stdout exporter uses MinMaxSumCount aggregator for ValueRecorder instruments (#1412) +- `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369) +- Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369) +- Unify endpoint API that related to OTel exporter. (#1401) +- Optimize metric histogram aggregator to re-use its slice of buckets. (#1435) +- Metric aggregator Count() and histogram Bucket.Counts are consistently `uint64`. (1430) +- Histogram aggregator accepts functional options, uses default boundaries if none given. (#1434) +- `SamplingResult` now passed a `Tracestate` from the parent `SpanContext` (#1432) +- Moved gRPC driver for OTLP exporter to `exporters/otlp/otlpgrpc`. (#1420) +- The `TraceContext` propagator now correctly propagates `TraceState` through the `SpanContext`. (#1447) +- Metric Push and Pull Controller components are combined into a single "basic" Controller: + - `WithExporter()` and `Start()` to configure Push behavior + - `Start()` is optional; use `Collect()` and `ForEach()` for Pull behavior + - `Start()` and `Stop()` accept Context. (#1378) +- The `Event` type is moved from the `otel/sdk/export/trace` package to the `otel/trace` API package. (#1452) + +### Removed + +- Remove `errUninitializedSpan` as its only usage is now obsolete. (#1360) +- Remove Metric export functionality related to quantiles and summary data points: this is not specified (#1412) +- Remove DDSketch metric aggregator; our intention is to re-introduce this as an option of the histogram aggregator after [new OTLP histogram data types](https://github.com/open-telemetry/opentelemetry-proto/pull/226) are released (#1412) + +### Fixed + +- `BatchSpanProcessor.Shutdown()` will now shutdown underlying `export.SpanExporter`. (#1443) + +## [0.15.0] - 2020-12-10 + +### Added + +- The `WithIDGenerator` `TracerProviderOption` is added to the `go.opentelemetry.io/otel/trace` package to configure an `IDGenerator` for the `TracerProvider`. (#1363) + +### Changed + +- The Zipkin exporter now uses the Span status code to determine. (#1328) +- `NewExporter` and `Start` functions in `go.opentelemetry.io/otel/exporters/otlp` now receive `context.Context` as a first parameter. (#1357) +- Move the OpenCensus example into `example` directory. (#1359) +- Moved the SDK's `internal.IDGenerator` interface in to the `sdk/trace` package to enable support for externally-defined ID generators. (#1363) +- Bump `github.com/google/go-cmp` from 0.5.3 to 0.5.4 (#1374) +- Bump `github.com/golangci/golangci-lint` in `/internal/tools` (#1375) + +### Fixed + +- Metric SDK `SumObserver` and `UpDownSumObserver` instruments correctness fixes. (#1381) + +## [0.14.0] - 2020-11-19 + +### Added + +- An `EventOption` and the related `NewEventConfig` function are added to the `go.opentelemetry.io/otel` package to configure Span events. (#1254) +- A `TextMapPropagator` and associated `TextMapCarrier` are added to the `go.opentelemetry.io/otel/oteltest` package to test `TextMap` type propagators and their use. (#1259) +- `SpanContextFromContext` returns `SpanContext` from context. (#1255) +- `TraceState` has been added to `SpanContext`. (#1340) +- `DeploymentEnvironmentKey` added to `go.opentelemetry.io/otel/semconv` package. (#1323) +- Add an OpenCensus to OpenTelemetry tracing bridge. (#1305) +- Add a parent context argument to `SpanProcessor.OnStart` to follow the specification. (#1333) +- Add missing tests for `sdk/trace/attributes_map.go`. (#1337) + +### Changed + +- Move the `go.opentelemetry.io/otel/api/trace` package into `go.opentelemetry.io/otel/trace` with the following changes. (#1229) (#1307) + - `ID` has been renamed to `TraceID`. + - `IDFromHex` has been renamed to `TraceIDFromHex`. + - `EmptySpanContext` is removed. +- Move the `go.opentelemetry.io/otel/api/trace/tracetest` package into `go.opentelemetry.io/otel/oteltest`. (#1229) +- OTLP Exporter updates: + - supports OTLP v0.6.0 (#1230, #1354) + - supports configurable aggregation temporality (default: Cumulative, optional: Stateless). (#1296) +- The Sampler is now called on local child spans. (#1233) +- The `Kind` type from the `go.opentelemetry.io/otel/api/metric` package was renamed to `InstrumentKind` to more specifically describe what it is and avoid semantic ambiguity. (#1240) +- The `MetricKind` method of the `Descriptor` type in the `go.opentelemetry.io/otel/api/metric` package was renamed to `Descriptor.InstrumentKind`. + This matches the returned type and fixes misuse of the term metric. (#1240) +- Move test harness from the `go.opentelemetry.io/otel/api/apitest` package into `go.opentelemetry.io/otel/oteltest`. (#1241) +- Move the `go.opentelemetry.io/otel/api/metric/metrictest` package into `go.opentelemetry.io/oteltest` as part of #964. (#1252) +- Move the `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric` as part of #1303. (#1321) +- Move the `go.opentelemetry.io/otel/api/metric/registry` package into `go.opentelemetry.io/otel/metric/registry` as a part of #1303. (#1316) +- Move the `Number` type (together with related functions) from `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric/number` as a part of #1303. (#1316) +- The function signature of the Span `AddEvent` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required name and a variable number of `EventOption`s. (#1254) +- The function signature of the Span `RecordError` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required error value and a variable number of `EventOption`s. (#1254) +- Move the `go.opentelemetry.io/otel/api/global` package to `go.opentelemetry.io/otel`. (#1262) (#1330) +- Move the `Version` function from `go.opentelemetry.io/otel/sdk` to `go.opentelemetry.io/otel`. (#1330) +- Rename correlation context header from `"otcorrelations"` to `"baggage"` to match the OpenTelemetry specification. (#1267) +- Fix `Code.UnmarshalJSON` to work with valid JSON only. (#1276) +- The `resource.New()` method changes signature to support builtin attributes and functional options, including `telemetry.sdk.*` and + `host.name` semantic conventions; the former method is renamed `resource.NewWithAttributes`. (#1235) +- The Prometheus exporter now exports non-monotonic counters (i.e. `UpDownCounter`s) as gauges. (#1210) +- Correct the `Span.End` method documentation in the `otel` API to state updates are not allowed on a span after it has ended. (#1310) +- Updated span collection limits for attribute, event and link counts to 1000 (#1318) +- Renamed `semconv.HTTPUrlKey` to `semconv.HTTPURLKey`. (#1338) + +### Removed + +- The `ErrInvalidHexID`, `ErrInvalidTraceIDLength`, `ErrInvalidSpanIDLength`, `ErrInvalidSpanIDLength`, or `ErrNilSpanID` from the `go.opentelemetry.io/otel` package are unexported now. (#1243) +- The `AddEventWithTimestamp` method on the `Span` interface in `go.opentelemetry.io/otel` is removed due to its redundancy. + It is replaced by using the `AddEvent` method with a `WithTimestamp` option. (#1254) +- The `MockSpan` and `MockTracer` types are removed from `go.opentelemetry.io/otel/oteltest`. + `Tracer` and `Span` from the same module should be used in their place instead. (#1306) +- `WorkerCount` option is removed from `go.opentelemetry.io/otel/exporters/otlp`. (#1350) +- Remove the following labels types: INT32, UINT32, UINT64 and FLOAT32. (#1314) + +### Fixed + +- Rename `MergeItererator` to `MergeIterator` in the `go.opentelemetry.io/otel/label` package. (#1244) +- The `go.opentelemetry.io/otel/api/global` packages global TextMapPropagator now delegates functionality to a globally set delegate for all previously returned propagators. (#1258) +- Fix condition in `label.Any`. (#1299) +- Fix global `TracerProvider` to pass options to its configured provider. (#1329) +- Fix missing handler for `ExactKind` aggregator in OTLP metrics transformer (#1309) + +## [0.13.0] - 2020-10-08 + +### Added + +- OTLP Metric exporter supports Histogram aggregation. (#1209) +- The `Code` struct from the `go.opentelemetry.io/otel/codes` package now supports JSON marshaling and unmarshaling as well as implements the `Stringer` interface. (#1214) +- A Baggage API to implement the OpenTelemetry specification. (#1217) +- Add Shutdown method to sdk/trace/provider, shutdown processors in the order they were registered. (#1227) + +### Changed + +- Set default propagator to no-op propagator. (#1184) +- The `HTTPSupplier`, `HTTPExtractor`, `HTTPInjector`, and `HTTPPropagator` from the `go.opentelemetry.io/otel/api/propagation` package were replaced with unified `TextMapCarrier` and `TextMapPropagator` in the `go.opentelemetry.io/otel/propagation` package. (#1212) (#1325) +- The `New` function from the `go.opentelemetry.io/otel/api/propagation` package was replaced with `NewCompositeTextMapPropagator` in the `go.opentelemetry.io/otel` package. (#1212) +- The status codes of the `go.opentelemetry.io/otel/codes` package have been updated to match the latest OpenTelemetry specification. + They now are `Unset`, `Error`, and `Ok`. + They no longer track the gRPC codes. (#1214) +- The `StatusCode` field of the `SpanData` struct in the `go.opentelemetry.io/otel/sdk/export/trace` package now uses the codes package from this package instead of the gRPC project. (#1214) +- Move the `go.opentelemetry.io/otel/api/baggage` package into `go.opentelemetry.io/otel/baggage`. (#1217) (#1325) +- A `Shutdown` method of `SpanProcessor` and all its implementations receives a context and returns an error. (#1264) + +### Fixed + +- Copies of data from arrays and slices passed to `go.opentelemetry.io/otel/label.ArrayValue()` are now used in the returned `Value` instead of using the mutable data itself. (#1226) + +### Removed + +- The `ExtractHTTP` and `InjectHTTP` functions from the `go.opentelemetry.io/otel/api/propagation` package were removed. (#1212) +- The `Propagators` interface from the `go.opentelemetry.io/otel/api/propagation` package was removed to conform to the OpenTelemetry specification. + The explicit `TextMapPropagator` type can be used in its place as this is the `Propagator` type the specification defines. (#1212) +- The `SetAttribute` method of the `Span` from the `go.opentelemetry.io/otel/api/trace` package was removed given its redundancy with the `SetAttributes` method. (#1216) +- The internal implementation of Baggage storage is removed in favor of using the new Baggage API functionality. (#1217) +- Remove duplicate hostname key `HostHostNameKey` in Resource semantic conventions. (#1219) +- Nested array/slice support has been removed. (#1226) + +## [0.12.0] - 2020-09-24 + +### Added + +- A `SpanConfigure` function in `go.opentelemetry.io/otel/api/trace` to create a new `SpanConfig` from `SpanOption`s. (#1108) +- In the `go.opentelemetry.io/otel/api/trace` package, `NewTracerConfig` was added to construct new `TracerConfig`s. + This addition was made to conform with our project option conventions. (#1155) +- Instrumentation library information was added to the Zipkin exporter. (#1119) +- The `SpanProcessor` interface now has a `ForceFlush()` method. (#1166) +- More semantic conventions for k8s as resource attributes. (#1167) + +### Changed + +- Add reconnecting udp connection type to Jaeger exporter. + This change adds a new optional implementation of the udp conn interface used to detect changes to an agent's host dns record. + It then adopts the new destination address to ensure the exporter doesn't get stuck. This change was ported from jaegertracing/jaeger-client-go#520. (#1063) +- Replace `StartOption` and `EndOption` in `go.opentelemetry.io/otel/api/trace` with `SpanOption`. + This change is matched by replacing the `StartConfig` and `EndConfig` with a unified `SpanConfig`. (#1108) +- Replace the `LinkedTo` span option in `go.opentelemetry.io/otel/api/trace` with `WithLinks`. + This is be more consistent with our other option patterns, i.e. passing the item to be configured directly instead of its component parts, and provides a cleaner function signature. (#1108) +- The `go.opentelemetry.io/otel/api/trace` `TracerOption` was changed to an interface to conform to project option conventions. (#1109) +- Move the `B3` and `TraceContext` from within the `go.opentelemetry.io/otel/api/trace` package to their own `go.opentelemetry.io/otel/propagators` package. + This removal of the propagators is reflective of the OpenTelemetry specification for these propagators as well as cleans up the `go.opentelemetry.io/otel/api/trace` API. (#1118) +- Rename Jaeger tags used for instrumentation library information to reflect changes in OpenTelemetry specification. (#1119) +- Rename `ProbabilitySampler` to `TraceIDRatioBased` and change semantics to ignore parent span sampling status. (#1115) +- Move `tools` package under `internal`. (#1141) +- Move `go.opentelemetry.io/otel/api/correlation` package to `go.opentelemetry.io/otel/api/baggage`. (#1142) + The `correlation.CorrelationContext` propagator has been renamed `baggage.Baggage`. Other exported functions and types are unchanged. +- Rename `ParentOrElse` sampler to `ParentBased` and allow setting samplers depending on parent span. (#1153) +- In the `go.opentelemetry.io/otel/api/trace` package, `SpanConfigure` was renamed to `NewSpanConfig`. (#1155) +- Change `dependabot.yml` to add a `Skip Changelog` label to dependabot-sourced PRs. (#1161) +- The [configuration style guide](https://github.com/open-telemetry/opentelemetry-go/blob/master/CONTRIBUTING.md#config) has been updated to + recommend the use of `newConfig()` instead of `configure()`. (#1163) +- The `otlp.Config` type has been unexported and changed to `otlp.config`, along with its initializer. (#1163) +- Ensure exported interface types include parameter names and update the + Style Guide to reflect this styling rule. (#1172) +- Don't consider unset environment variable for resource detection to be an error. (#1170) +- Rename `go.opentelemetry.io/otel/api/metric.ConfigureInstrument` to `NewInstrumentConfig` and + `go.opentelemetry.io/otel/api/metric.ConfigureMeter` to `NewMeterConfig`. +- ValueObserver instruments use LastValue aggregator by default. (#1165) +- OTLP Metric exporter supports LastValue aggregation. (#1165) +- Move the `go.opentelemetry.io/otel/api/unit` package to `go.opentelemetry.io/otel/unit`. (#1185) +- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190) +- Rename `NoopProvider` to `NoopMeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190) +- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metric/metrictest` package. (#1190) +- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric/registry` package. (#1190) +- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metri/registryc` package. (#1190) +- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190) +- Rename `NoopProvider` to `NoopTracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190) +- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190) +- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190) +- Rename `WrapperProvider` to `WrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190) +- Rename `NewWrapperProvider` to `NewWrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190) +- Rename `Provider` method of the pull controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/pull` package. (#1190) +- Rename `Provider` method of the push controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/push` package. (#1190) +- Rename `ProviderOptions` to `TracerProviderConfig` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Rename `ProviderOption` to `TracerProviderOption` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Renamed `SamplingDecision` values to comply with OpenTelemetry specification change. (#1192) +- Renamed Zipkin attribute names from `ot.status_code & ot.status_description` to `otel.status_code & otel.status_description`. (#1201) +- The default SDK now invokes registered `SpanProcessor`s in the order they were registered with the `TracerProvider`. (#1195) +- Add test of spans being processed by the `SpanProcessor`s in the order they were registered. (#1203) + +### Removed + +- Remove the B3 propagator from `go.opentelemetry.io/otel/propagators`. It is now located in the + `go.opentelemetry.io/contrib/propagators/` module. (#1191) +- Remove the semantic convention for HTTP status text, `HTTPStatusTextKey` from package `go.opentelemetry.io/otel/semconv`. (#1194) + +### Fixed + +- Zipkin example no longer mentions `ParentSampler`, corrected to `ParentBased`. (#1171) +- Fix missing shutdown processor in otel-collector example. (#1186) +- Fix missing shutdown processor in basic and namedtracer examples. (#1197) + +## [0.11.0] - 2020-08-24 + +### Added + +- Support for exporting array-valued attributes via OTLP. (#992) +- `Noop` and `InMemory` `SpanBatcher` implementations to help with testing integrations. (#994) +- Support for filtering metric label sets. (#1047) +- A dimensionality-reducing metric Processor. (#1057) +- Integration tests for more OTel Collector Attribute types. (#1062) +- A new `WithSpanProcessor` `ProviderOption` is added to the `go.opentelemetry.io/otel/sdk/trace` package to create a `Provider` and automatically register the `SpanProcessor`. (#1078) + +### Changed + +- Rename `sdk/metric/processor/test` to `sdk/metric/processor/processortest`. (#1049) +- Rename `sdk/metric/controller/test` to `sdk/metric/controller/controllertest`. (#1049) +- Rename `api/testharness` to `api/apitest`. (#1049) +- Rename `api/trace/testtrace` to `api/trace/tracetest`. (#1049) +- Change Metric Processor to merge multiple observations. (#1024) +- The `go.opentelemetry.io/otel/bridge/opentracing` bridge package has been made into its own module. + This removes the package dependencies of this bridge from the rest of the OpenTelemetry based project. (#1038) +- Renamed `go.opentelemetry.io/otel/api/standard` package to `go.opentelemetry.io/otel/semconv` to avoid the ambiguous and generic name `standard` and better describe the package as containing OpenTelemetry semantic conventions. (#1016) +- The environment variable used for resource detection has been changed from `OTEL_RESOURCE_LABELS` to `OTEL_RESOURCE_ATTRIBUTES` (#1042) +- Replace `WithSyncer` with `WithBatcher` in examples. (#1044) +- Replace the `google.golang.org/grpc/codes` dependency in the API with an equivalent `go.opentelemetry.io/otel/codes` package. (#1046) +- Merge the `go.opentelemetry.io/otel/api/label` and `go.opentelemetry.io/otel/api/kv` into the new `go.opentelemetry.io/otel/label` package. (#1060) +- Unify Callback Function Naming. + Rename `*Callback` with `*Func`. (#1061) +- CI builds validate against last two versions of Go, dropping 1.13 and adding 1.15. (#1064) +- The `go.opentelemetry.io/otel/sdk/export/trace` interfaces `SpanSyncer` and `SpanBatcher` have been replaced with a specification compliant `Exporter` interface. + This interface still supports the export of `SpanData`, but only as a slice. + Implementation are also required now to return any error from `ExportSpans` if one occurs as well as implement a `Shutdown` method for exporter clean-up. (#1078) +- The `go.opentelemetry.io/otel/sdk/trace` `NewBatchSpanProcessor` function no longer returns an error. + If a `nil` exporter is passed as an argument to this function, instead of it returning an error, it now returns a `BatchSpanProcessor` that handles the export of `SpanData` by not taking any action. (#1078) +- The `go.opentelemetry.io/otel/sdk/trace` `NewProvider` function to create a `Provider` no longer returns an error, instead only a `*Provider`. + This change is related to `NewBatchSpanProcessor` not returning an error which was the only error this function would return. (#1078) + +### Removed + +- Duplicate, unused API sampler interface. (#999) + Use the [`Sampler` interface](https://github.com/open-telemetry/opentelemetry-go/blob/v0.11.0/sdk/trace/sampling.go) provided by the SDK instead. +- The `grpctrace` instrumentation was moved to the `go.opentelemetry.io/contrib` repository and out of this repository. + This move includes moving the `grpc` example to the `go.opentelemetry.io/contrib` as well. (#1027) +- The `WithSpan` method of the `Tracer` interface. + The functionality this method provided was limited compared to what a user can provide themselves. + It was removed with the understanding that if there is sufficient user need it can be added back based on actual user usage. (#1043) +- The `RegisterSpanProcessor` and `UnregisterSpanProcessor` functions. + These were holdovers from an approach prior to the TracerProvider design. They were not used anymore. (#1077) +- The `oterror` package. (#1026) +- The `othttp` and `httptrace` instrumentations were moved to `go.opentelemetry.io/contrib`. (#1032) + +### Fixed + +- The `semconv.HTTPServerMetricAttributesFromHTTPRequest()` function no longer generates the high-cardinality `http.request.content.length` label. (#1031) +- Correct instrumentation version tag in Jaeger exporter. (#1037) +- The SDK span will now set an error event if the `End` method is called during a panic (i.e. it was deferred). (#1043) +- Move internally generated protobuf code from the `go.opentelemetry.io/otel` to the OTLP exporter to reduce dependency overhead. (#1050) +- The `otel-collector` example referenced outdated collector processors. (#1006) + +## [0.10.0] - 2020-07-29 + +This release migrates the default OpenTelemetry SDK into its own Go module, decoupling the SDK from the API and reducing dependencies for instrumentation packages. + +### Added + +- The Zipkin exporter now has `NewExportPipeline` and `InstallNewPipeline` constructor functions to match the common pattern. + These function build a new exporter with default SDK options and register the exporter with the `global` package respectively. (#944) +- Add propagator option for gRPC instrumentation. (#986) +- The `testtrace` package now tracks the `trace.SpanKind` for each span. (#987) + +### Changed + +- Replace the `RegisterGlobal` `Option` in the Jaeger exporter with an `InstallNewPipeline` constructor function. + This matches the other exporter constructor patterns and will register a new exporter after building it with default configuration. (#944) +- The trace (`go.opentelemetry.io/otel/exporters/trace/stdout`) and metric (`go.opentelemetry.io/otel/exporters/metric/stdout`) `stdout` exporters are now merged into a single exporter at `go.opentelemetry.io/otel/exporters/stdout`. + This new exporter was made into its own Go module to follow the pattern of all exporters and decouple it from the `go.opentelemetry.io/otel` module. (#956, #963) +- Move the `go.opentelemetry.io/otel/exporters/test` test package to `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#962) +- The `go.opentelemetry.io/otel/api/kv/value` package was merged into the parent `go.opentelemetry.io/otel/api/kv` package. (#968) + - `value.Bool` was replaced with `kv.BoolValue`. + - `value.Int64` was replaced with `kv.Int64Value`. + - `value.Uint64` was replaced with `kv.Uint64Value`. + - `value.Float64` was replaced with `kv.Float64Value`. + - `value.Int32` was replaced with `kv.Int32Value`. + - `value.Uint32` was replaced with `kv.Uint32Value`. + - `value.Float32` was replaced with `kv.Float32Value`. + - `value.String` was replaced with `kv.StringValue`. + - `value.Int` was replaced with `kv.IntValue`. + - `value.Uint` was replaced with `kv.UintValue`. + - `value.Array` was replaced with `kv.ArrayValue`. +- Rename `Infer` to `Any` in the `go.opentelemetry.io/otel/api/kv` package. (#972) +- Change `othttp` to use the `httpsnoop` package to wrap the `ResponseWriter` so that optional interfaces (`http.Hijacker`, `http.Flusher`, etc.) that are implemented by the original `ResponseWriter`are also implemented by the wrapped `ResponseWriter`. (#979) +- Rename `go.opentelemetry.io/otel/sdk/metric/aggregator/test` package to `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest`. (#980) +- Make the SDK into its own Go module called `go.opentelemetry.io/otel/sdk`. (#985) +- Changed the default trace `Sampler` from `AlwaysOn` to `ParentOrElse(AlwaysOn)`. (#989) + +### Removed + +- The `IndexedAttribute` function from the `go.opentelemetry.io/otel/api/label` package was removed in favor of `IndexedLabel` which it was synonymous with. (#970) + +### Fixed + +- Bump github.com/golangci/golangci-lint from 1.28.3 to 1.29.0 in /tools. (#953) +- Bump github.com/google/go-cmp from 0.5.0 to 0.5.1. (#957) +- Use `global.Handle` for span export errors in the OTLP exporter. (#946) +- Correct Go language formatting in the README documentation. (#961) +- Remove default SDK dependencies from the `go.opentelemetry.io/otel/api` package. (#977) +- Remove default SDK dependencies from the `go.opentelemetry.io/otel/instrumentation` package. (#983) +- Move documented examples for `go.opentelemetry.io/otel/instrumentation/grpctrace` interceptors into Go example tests. (#984) + +## [0.9.0] - 2020-07-20 + +### Added + +- A new Resource Detector interface is included to allow resources to be automatically detected and included. (#939) +- A Detector to automatically detect resources from an environment variable. (#939) +- Github action to generate protobuf Go bindings locally in `internal/opentelemetry-proto-gen`. (#938) +- OTLP .proto files from `open-telemetry/opentelemetry-proto` imported as a git submodule under `internal/opentelemetry-proto`. + References to `github.com/open-telemetry/opentelemetry-proto` changed to `go.opentelemetry.io/otel/internal/opentelemetry-proto-gen`. (#942) + +### Changed + +- Non-nil value `struct`s for key-value pairs will be marshalled using JSON rather than `Sprintf`. (#948) + +### Removed + +- Removed dependency on `github.com/open-telemetry/opentelemetry-collector`. (#943) + +## [0.8.0] - 2020-07-09 + +### Added + +- The `B3Encoding` type to represent the B3 encoding(s) the B3 propagator can inject. + A value for HTTP supported encodings (Multiple Header: `MultipleHeader`, Single Header: `SingleHeader`) are included. (#882) +- The `FlagsDeferred` trace flag to indicate if the trace sampling decision has been deferred. (#882) +- The `FlagsDebug` trace flag to indicate if the trace is a debug trace. (#882) +- Add `peer.service` semantic attribute. (#898) +- Add database-specific semantic attributes. (#899) +- Add semantic convention for `faas.coldstart` and `container.id`. (#909) +- Add http content size semantic conventions. (#905) +- Include `http.request_content_length` in HTTP request basic attributes. (#905) +- Add semantic conventions for operating system process resource attribute keys. (#919) +- The Jaeger exporter now has a `WithBatchMaxCount` option to specify the maximum number of spans sent in a batch. (#931) + +### Changed + +- Update `CONTRIBUTING.md` to ask for updates to `CHANGELOG.md` with each pull request. (#879) +- Use lowercase header names for B3 Multiple Headers. (#881) +- The B3 propagator `SingleHeader` field has been replaced with `InjectEncoding`. + This new field can be set to combinations of the `B3Encoding` bitmasks and will inject trace information in these encodings. + If no encoding is set, the propagator will default to `MultipleHeader` encoding. (#882) +- The B3 propagator now extracts from either HTTP encoding of B3 (Single Header or Multiple Header) based on what is contained in the header. + Preference is given to Single Header encoding with Multiple Header being the fallback if Single Header is not found or is invalid. + This behavior change is made to dynamically support all correctly encoded traces received instead of having to guess the expected encoding prior to receiving. (#882) +- Extend semantic conventions for RPC. (#900) +- To match constant naming conventions in the `api/standard` package, the `FaaS*` key names are appended with a suffix of `Key`. (#920) + - `"api/standard".FaaSName` -> `FaaSNameKey` + - `"api/standard".FaaSID` -> `FaaSIDKey` + - `"api/standard".FaaSVersion` -> `FaaSVersionKey` + - `"api/standard".FaaSInstance` -> `FaaSInstanceKey` + +### Removed + +- The `FlagsUnused` trace flag is removed. + The purpose of this flag was to act as the inverse of `FlagsSampled`, the inverse of `FlagsSampled` is used instead. (#882) +- The B3 header constants (`B3SingleHeader`, `B3DebugFlagHeader`, `B3TraceIDHeader`, `B3SpanIDHeader`, `B3SampledHeader`, `B3ParentSpanIDHeader`) are removed. + If B3 header keys are needed [the authoritative OpenZipkin package constants](https://pkg.go.dev/github.com/openzipkin/zipkin-go@v0.2.2/propagation/b3?tab=doc#pkg-constants) should be used instead. (#882) + +### Fixed + +- The B3 Single Header name is now correctly `b3` instead of the previous `X-B3`. (#881) +- The B3 propagator now correctly supports sampling only values (`b3: 0`, `b3: 1`, or `b3: d`) for a Single B3 Header. (#882) +- The B3 propagator now propagates the debug flag. + This removes the behavior of changing the debug flag into a set sampling bit. + Instead, this now follow the B3 specification and omits the `X-B3-Sampling` header. (#882) +- The B3 propagator now tracks "unset" sampling state (meaning "defer the decision") and does not set the `X-B3-Sampling` header when injecting. (#882) +- Bump github.com/itchyny/gojq from 0.10.3 to 0.10.4 in /tools. (#883) +- Bump github.com/opentracing/opentracing-go from v1.1.1-0.20190913142402-a7454ce5950e to v1.2.0. (#885) +- The tracing time conversion for OTLP spans is now correctly set to `UnixNano`. (#896) +- Ensure span status is not set to `Unknown` when no HTTP status code is provided as it is assumed to be `200 OK`. (#908) +- Ensure `httptrace.clientTracer` closes `http.headers` span. (#912) +- Prometheus exporter will not apply stale updates or forget inactive metrics. (#903) +- Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905) +- Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913) +- Update otel-colector example to use the v0.5.0 collector. (#915) +- The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922) +- The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922) +- The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists. + This is in accordance with OpenTelemetry semantic conventions. (#922) +- Correlation Context extractor will no longer insert an empty map into the returned context when no valid values are extracted. (#923) +- Bump google.golang.org/api from 0.28.0 to 0.29.0 in /exporters/trace/jaeger. (#925) +- Bump github.com/itchyny/gojq from 0.10.4 to 0.11.0 in /tools. (#926) +- Bump github.com/golangci/golangci-lint from 1.28.1 to 1.28.2 in /tools. (#930) + +## [0.7.0] - 2020-06-26 + +This release implements the v0.5.0 version of the OpenTelemetry specification. + +### Added + +- The othttp instrumentation now includes default metrics. (#861) +- This CHANGELOG file to track all changes in the project going forward. +- Support for array type attributes. (#798) +- Apply transitive dependabot go.mod dependency updates as part of a new automatic Github workflow. (#844) +- Timestamps are now passed to exporters for each export. (#835) +- Add new `Accumulation` type to metric SDK to transport telemetry from `Accumulator`s to `Processor`s. + This replaces the prior `Record` `struct` use for this purpose. (#835) +- New dependabot integration to automate package upgrades. (#814) +- `Meter` and `Tracer` implementations accept instrumentation version version as an optional argument. + This instrumentation version is passed on to exporters. (#811) (#805) (#802) +- The OTLP exporter includes the instrumentation version in telemetry it exports. (#811) +- Environment variables for Jaeger exporter are supported. (#796) +- New `aggregation.Kind` in the export metric API. (#808) +- New example that uses OTLP and the collector. (#790) +- Handle errors in the span `SetName` during span initialization. (#791) +- Default service config to enable retries for retry-able failed requests in the OTLP exporter and an option to override this default. (#777) +- New `go.opentelemetry.io/otel/api/oterror` package to uniformly support error handling and definitions for the project. (#778) +- New `global` default implementation of the `go.opentelemetry.io/otel/api/oterror.Handler` interface to be used to handle errors prior to an user defined `Handler`. + There is also functionality for the user to register their `Handler` as well as a convenience function `Handle` to handle an error with this global `Handler`(#778) +- Options to specify propagators for httptrace and grpctrace instrumentation. (#784) +- The required `application/json` header for the Zipkin exporter is included in all exports. (#774) +- Integrate HTTP semantics helpers from the contrib repository into the `api/standard` package. #769 + +### Changed + +- Rename `Integrator` to `Processor` in the metric SDK. (#863) +- Rename `AggregationSelector` to `AggregatorSelector`. (#859) +- Rename `SynchronizedCopy` to `SynchronizedMove`. (#858) +- Rename `simple` integrator to `basic` integrator. (#857) +- Merge otlp collector examples. (#841) +- Change the metric SDK to support cumulative, delta, and pass-through exporters directly. + With these changes, cumulative and delta specific exporters are able to request the correct kind of aggregation from the SDK. (#840) +- The `Aggregator.Checkpoint` API is renamed to `SynchronizedCopy` and adds an argument, a different `Aggregator` into which the copy is stored. (#812) +- The `export.Aggregator` contract is that `Update()` and `SynchronizedCopy()` are synchronized with each other. + All the aggregation interfaces (`Sum`, `LastValue`, ...) are not meant to be synchronized, as the caller is expected to synchronize aggregators at a higher level after the `Accumulator`. + Some of the `Aggregators` used unnecessary locking and that has been cleaned up. (#812) +- Use of `metric.Number` was replaced by `int64` now that we use `sync.Mutex` in the `MinMaxSumCount` and `Histogram` `Aggregators`. (#812) +- Replace `AlwaysParentSample` with `ParentSample(fallback)` to match the OpenTelemetry v0.5.0 specification. (#810) +- Rename `sdk/export/metric/aggregator` to `sdk/export/metric/aggregation`. #808 +- Send configured headers with every request in the OTLP exporter, instead of just on connection creation. (#806) +- Update error handling for any one off error handlers, replacing, instead, with the `global.Handle` function. (#791) +- Rename `plugin` directory to `instrumentation` to match the OpenTelemetry specification. (#779) +- Makes the argument order to Histogram and DDSketch `New()` consistent. (#781) + +### Removed + +- `Uint64NumberKind` and related functions from the API. (#864) +- Context arguments from `Aggregator.Checkpoint` and `Integrator.Process` as they were unused. (#803) +- `SpanID` is no longer included in parameters for sampling decision to match the OpenTelemetry specification. (#775) + +### Fixed + +- Upgrade OTLP exporter to opentelemetry-proto matching the opentelemetry-collector v0.4.0 release. (#866) +- Allow changes to `go.sum` and `go.mod` when running dependabot tidy-up. (#871) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1. (#824) +- Bump github.com/prometheus/client_golang from 1.7.0 to 1.7.1 in /exporters/metric/prometheus. (#867) +- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/jaeger. (#853) +- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/zipkin. (#854) +- Bumps github.com/golang/protobuf from 1.3.2 to 1.4.2 (#848) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/otlp (#817) +- Bump github.com/golangci/golangci-lint from 1.25.1 to 1.27.0 in /tools (#828) +- Bump github.com/prometheus/client_golang from 1.5.0 to 1.7.0 in /exporters/metric/prometheus (#838) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/jaeger (#829) +- Bump github.com/benbjohnson/clock from 1.0.0 to 1.0.3 (#815) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/zipkin (#823) +- Bump github.com/itchyny/gojq from 0.10.1 to 0.10.3 in /tools (#830) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/metric/prometheus (#822) +- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/zipkin (#820) +- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/jaeger (#831) +- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 (#836) +- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/trace/jaeger (#837) +- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/otlp (#839) +- Bump google.golang.org/api from 0.20.0 to 0.28.0 in /exporters/trace/jaeger (#843) +- Set span status from HTTP status code in the othttp instrumentation. (#832) +- Fixed typo in push controller comment. (#834) +- The `Aggregator` testing has been updated and cleaned. (#812) +- `metric.Number(0)` expressions are replaced by `0` where possible. (#812) +- Fixed `global` `handler_test.go` test failure. #804 +- Fixed `BatchSpanProcessor.Shutdown` to wait until all spans are processed. (#766) +- Fixed OTLP example's accidental early close of exporter. (#807) +- Ensure zipkin exporter reads and closes response body. (#788) +- Update instrumentation to use `api/standard` keys instead of custom keys. (#782) +- Clean up tools and RELEASING documentation. (#762) + +## [0.6.0] - 2020-05-21 + +### Added + +- Support for `Resource`s in the prometheus exporter. (#757) +- New pull controller. (#751) +- New `UpDownSumObserver` instrument. (#750) +- OpenTelemetry collector demo. (#711) +- New `SumObserver` instrument. (#747) +- New `UpDownCounter` instrument. (#745) +- New timeout `Option` and configuration function `WithTimeout` to the push controller. (#742) +- New `api/standards` package to implement semantic conventions and standard key-value generation. (#731) + +### Changed + +- Rename `Register*` functions in the metric API to `New*` for all `Observer` instruments. (#761) +- Use `[]float64` for histogram boundaries, not `[]metric.Number`. (#758) +- Change OTLP example to use exporter as a trace `Syncer` instead of as an unneeded `Batcher`. (#756) +- Replace `WithResourceAttributes()` with `WithResource()` in the trace SDK. (#754) +- The prometheus exporter now uses the new pull controller. (#751) +- Rename `ScheduleDelayMillis` to `BatchTimeout` in the trace `BatchSpanProcessor`.(#752) +- Support use of synchronous instruments in asynchronous callbacks (#725) +- Move `Resource` from the `Export` method parameter into the metric export `Record`. (#739) +- Rename `Observer` instrument to `ValueObserver`. (#734) +- The push controller now has a method (`Provider()`) to return a `metric.Provider` instead of the old `Meter` method that acted as a `metric.Provider`. (#738) +- Replace `Measure` instrument by `ValueRecorder` instrument. (#732) +- Rename correlation context header from `"Correlation-Context"` to `"otcorrelations"` to match the OpenTelemetry specification. (#727) + +### Fixed + +- Ensure gRPC `ClientStream` override methods do not panic in grpctrace package. (#755) +- Disable parts of `BatchSpanProcessor` test until a fix is found. (#743) +- Fix `string` case in `kv` `Infer` function. (#746) +- Fix panic in grpctrace client interceptors. (#740) +- Refactor the `api/metrics` push controller and add `CheckpointSet` synchronization. (#737) +- Rewrite span batch process queue batching logic. (#719) +- Remove the push controller named Meter map. (#738) +- Fix Histogram aggregator initial state (fix #735). (#736) +- Ensure golang alpine image is running `golang-1.14` for examples. (#733) +- Added test for grpctrace `UnaryInterceptorClient`. (#695) +- Rearrange `api/metric` code layout. (#724) + +## [0.5.0] - 2020-05-13 + +### Added + +- Batch `Observer` callback support. (#717) +- Alias `api` types to root package of project. (#696) +- Create basic `othttp.Transport` for simple client instrumentation. (#678) +- `SetAttribute(string, interface{})` to the trace API. (#674) +- Jaeger exporter option that allows user to specify custom http client. (#671) +- `Stringer` and `Infer` methods to `key`s. (#662) + +### Changed + +- Rename `NewKey` in the `kv` package to just `Key`. (#721) +- Move `core` and `key` to `kv` package. (#720) +- Make the metric API `Meter` a `struct` so the abstract `MeterImpl` can be passed and simplify implementation. (#709) +- Rename SDK `Batcher` to `Integrator` to match draft OpenTelemetry SDK specification. (#710) +- Rename SDK `Ungrouped` integrator to `simple.Integrator` to match draft OpenTelemetry SDK specification. (#710) +- Rename SDK `SDK` `struct` to `Accumulator` to match draft OpenTelemetry SDK specification. (#710) +- Move `Number` from `core` to `api/metric` package. (#706) +- Move `SpanContext` from `core` to `trace` package. (#692) +- Change traceparent header from `Traceparent` to `traceparent` to implement the W3C specification. (#681) + +### Fixed + +- Update tooling to run generators in all submodules. (#705) +- gRPC interceptor regexp to match methods without a service name. (#683) +- Use a `const` for padding 64-bit B3 trace IDs. (#701) +- Update `mockZipkin` listen address from `:0` to `127.0.0.1:0`. (#700) +- Left-pad 64-bit B3 trace IDs with zero. (#698) +- Propagate at least the first W3C tracestate header. (#694) +- Remove internal `StateLocker` implementation. (#688) +- Increase instance size CI system uses. (#690) +- Add a `key` benchmark and use reflection in `key.Infer()`. (#679) +- Fix internal `global` test by using `global.Meter` with `RecordBatch()`. (#680) +- Reimplement histogram using mutex instead of `StateLocker`. (#669) +- Switch `MinMaxSumCount` to a mutex lock implementation instead of `StateLocker`. (#667) +- Update documentation to not include any references to `WithKeys`. (#672) +- Correct misspelling. (#668) +- Fix clobbering of the span context if extraction fails. (#656) +- Bump `golangci-lint` and work around the corrupting bug. (#666) (#670) + +## [0.4.3] - 2020-04-24 + +### Added + +- `Dockerfile` and `docker-compose.yml` to run example code. (#635) +- New `grpctrace` package that provides gRPC client and server interceptors for both unary and stream connections. (#621) +- New `api/label` package, providing common label set implementation. (#651) +- Support for JSON marshaling of `Resources`. (#654) +- `TraceID` and `SpanID` implementations for `Stringer` interface. (#642) +- `RemoteAddrKey` in the othttp plugin to include the HTTP client address in top-level spans. (#627) +- `WithSpanFormatter` option to the othttp plugin. (#617) +- Updated README to include section for compatible libraries and include reference to the contrib repository. (#612) +- The prometheus exporter now supports exporting histograms. (#601) +- A `String` method to the `Resource` to return a hashable identifier for a now unique resource. (#613) +- An `Iter` method to the `Resource` to return an array `AttributeIterator`. (#613) +- An `Equal` method to the `Resource` test the equivalence of resources. (#613) +- An iterable structure (`AttributeIterator`) for `Resource` attributes. + +### Changed + +- zipkin export's `NewExporter` now requires a `serviceName` argument to ensure this needed values is provided. (#644) +- Pass `Resources` through the metrics export pipeline. (#659) + +### Removed + +- `WithKeys` option from the metric API. (#639) + +### Fixed + +- Use the `label.Set.Equivalent` value instead of an encoding in the batcher. (#658) +- Correct typo `trace.Exporter` to `trace.SpanSyncer` in comments. (#653) +- Use type names for return values in jaeger exporter. (#648) +- Increase the visibility of the `api/key` package by updating comments and fixing usages locally. (#650) +- `Checkpoint` only after `Update`; Keep records in the `sync.Map` longer. (#647) +- Do not cache `reflect.ValueOf()` in metric Labels. (#649) +- Batch metrics exported from the OTLP exporter based on `Resource` and labels. (#626) +- Add error wrapping to the prometheus exporter. (#631) +- Update the OTLP exporter batching of traces to use a unique `string` representation of an associated `Resource` as the batching key. (#623) +- Update OTLP `SpanData` transform to only include the `ParentSpanID` if one exists. (#614) +- Update `Resource` internal representation to uniquely and reliably identify resources. (#613) +- Check return value from `CheckpointSet.ForEach` in prometheus exporter. (#622) +- Ensure spans created by httptrace client tracer reflect operation structure. (#618) +- Create a new recorder rather than reuse when multiple observations in same epoch for asynchronous instruments. #610 +- The default port the OTLP exporter uses to connect to the OpenTelemetry collector is updated to match the one the collector listens on by default. (#611) + +## [0.4.2] - 2020-03-31 + +### Fixed + +- Fix `pre_release.sh` to update version in `sdk/opentelemetry.go`. (#607) +- Fix time conversion from internal to OTLP in OTLP exporter. (#606) + +## [0.4.1] - 2020-03-31 + +### Fixed + +- Update `tag.sh` to create signed tags. (#604) + +## [0.4.0] - 2020-03-30 + +### Added + +- New API package `api/metric/registry` that exposes a `MeterImpl` wrapper for use by SDKs to generate unique instruments. (#580) +- Script to verify examples after a new release. (#579) + +### Removed + +- The dogstatsd exporter due to lack of support. + This additionally removes support for statsd. (#591) +- `LabelSet` from the metric API. + This is replaced by a `[]core.KeyValue` slice. (#595) +- `Labels` from the metric API's `Meter` interface. (#595) + +### Changed + +- The metric `export.Labels` became an interface which the SDK implements and the `export` package provides a simple, immutable implementation of this interface intended for testing purposes. (#574) +- Renamed `internal/metric.Meter` to `MeterImpl`. (#580) +- Renamed `api/global/internal.obsImpl` to `asyncImpl`. (#580) + +### Fixed + +- Corrected missing return in mock span. (#582) +- Update License header for all source files to match CNCF guidelines and include a test to ensure it is present. (#586) (#596) +- Update to v0.3.0 of the OTLP in the OTLP exporter. (#588) +- Update pre-release script to be compatible between GNU and BSD based systems. (#592) +- Add a `RecordBatch` benchmark. (#594) +- Moved span transforms of the OTLP exporter to the internal package. (#593) +- Build both go-1.13 and go-1.14 in circleci to test for all supported versions of Go. (#569) +- Removed unneeded allocation on empty labels in OLTP exporter. (#597) +- Update `BatchedSpanProcessor` to process the queue until no data but respect max batch size. (#599) +- Update project documentation godoc.org links to pkg.go.dev. (#602) + +## [0.3.0] - 2020-03-21 + +This is a first official beta release, which provides almost fully complete metrics, tracing, and context propagation functionality. +There is still a possibility of breaking changes. + +### Added + +- Add `Observer` metric instrument. (#474) +- Add global `Propagators` functionality to enable deferred initialization for propagators registered before the first Meter SDK is installed. (#494) +- Simplified export setup pipeline for the jaeger exporter to match other exporters. (#459) +- The zipkin trace exporter. (#495) +- The OTLP exporter to export metric and trace telemetry to the OpenTelemetry collector. (#497) (#544) (#545) +- Add `StatusMessage` field to the trace `Span`. (#524) +- Context propagation in OpenTracing bridge in terms of OpenTelemetry context propagation. (#525) +- The `Resource` type was added to the SDK. (#528) +- The global API now supports a `Tracer` and `Meter` function as shortcuts to getting a global `*Provider` and calling these methods directly. (#538) +- The metric API now defines a generic `MeterImpl` interface to support general purpose `Meter` construction. + Additionally, `SyncImpl` and `AsyncImpl` are added to support general purpose instrument construction. (#560) +- A metric `Kind` is added to represent the `MeasureKind`, `ObserverKind`, and `CounterKind`. (#560) +- Scripts to better automate the release process. (#576) + +### Changed + +- Default to to use `AlwaysSampler` instead of `ProbabilitySampler` to match OpenTelemetry specification. (#506) +- Renamed `AlwaysSampleSampler` to `AlwaysOnSampler` in the trace API. (#511) +- Renamed `NeverSampleSampler` to `AlwaysOffSampler` in the trace API. (#511) +- The `Status` field of the `Span` was changed to `StatusCode` to disambiguate with the added `StatusMessage`. (#524) +- Updated the trace `Sampler` interface conform to the OpenTelemetry specification. (#531) +- Rename metric API `Options` to `Config`. (#541) +- Rename metric `Counter` aggregator to be `Sum`. (#541) +- Unify metric options into `Option` from instrument specific options. (#541) +- The trace API's `TraceProvider` now support `Resource`s. (#545) +- Correct error in zipkin module name. (#548) +- The jaeger trace exporter now supports `Resource`s. (#551) +- Metric SDK now supports `Resource`s. + The `WithResource` option was added to configure a `Resource` on creation and the `Resource` method was added to the metric `Descriptor` to return the associated `Resource`. (#552) +- Replace `ErrNoLastValue` and `ErrEmptyDataSet` by `ErrNoData` in the metric SDK. (#557) +- The stdout trace exporter now supports `Resource`s. (#558) +- The metric `Descriptor` is now included at the API instead of the SDK. (#560) +- Replace `Ordered` with an iterator in `export.Labels`. (#567) + +### Removed + +- The vendor specific Stackdriver. It is now hosted on 3rd party vendor infrastructure. (#452) +- The `Unregister` method for metric observers as it is not in the OpenTelemetry specification. (#560) +- `GetDescriptor` from the metric SDK. (#575) +- The `Gauge` instrument from the metric API. (#537) + +### Fixed + +- Make histogram aggregator checkpoint consistent. (#438) +- Update README with import instructions and how to build and test. (#505) +- The default label encoding was updated to be unique. (#508) +- Use `NewRoot` in the othttp plugin for public endpoints. (#513) +- Fix data race in `BatchedSpanProcessor`. (#518) +- Skip test-386 for Mac OS 10.15.x (Catalina and upwards). #521 +- Use a variable-size array to represent ordered labels in maps. (#523) +- Update the OTLP protobuf and update changed import path. (#532) +- Use `StateLocker` implementation in `MinMaxSumCount`. (#546) +- Eliminate goroutine leak in histogram stress test. (#547) +- Update OTLP exporter with latest protobuf. (#550) +- Add filters to the othttp plugin. (#556) +- Provide an implementation of the `Header*` filters that do not depend on Go 1.14. (#565) +- Encode labels once during checkpoint. + The checkpoint function is executed in a single thread so we can do the encoding lazily before passing the encoded version of labels to the exporter. + This is a cheap and quick way to avoid encoding the labels on every collection interval. (#572) +- Run coverage over all packages in `COVERAGE_MOD_DIR`. (#573) + +## [0.2.3] - 2020-03-04 + +### Added + +- `RecordError` method on `Span`s in the trace API to Simplify adding error events to spans. (#473) +- Configurable push frequency for exporters setup pipeline. (#504) + +### Changed + +- Rename the `exporter` directory to `exporters`. + The `go.opentelemetry.io/otel/exporter/trace/jaeger` package was mistakenly released with a `v1.0.0` tag instead of `v0.1.0`. + This resulted in all subsequent releases not becoming the default latest. + A consequence of this was that all `go get`s pulled in the incompatible `v0.1.0` release of that package when pulling in more recent packages from other otel packages. + Renaming the `exporter` directory to `exporters` fixes this issue by renaming the package and therefore clearing any existing dependency tags. + Consequentially, this action also renames *all* exporter packages. (#502) + +### Removed + +- The `CorrelationContextHeader` constant in the `correlation` package is no longer exported. (#503) + +## [0.2.2] - 2020-02-27 + +### Added + +- `HTTPSupplier` interface in the propagation API to specify methods to retrieve and store a single value for a key to be associated with a carrier. (#467) +- `HTTPExtractor` interface in the propagation API to extract information from an `HTTPSupplier` into a context. (#467) +- `HTTPInjector` interface in the propagation API to inject information into an `HTTPSupplier.` (#467) +- `Config` and configuring `Option` to the propagator API. (#467) +- `Propagators` interface in the propagation API to contain the set of injectors and extractors for all supported carrier formats. (#467) +- `HTTPPropagator` interface in the propagation API to inject and extract from an `HTTPSupplier.` (#467) +- `WithInjectors` and `WithExtractors` functions to the propagator API to configure injectors and extractors to use. (#467) +- `ExtractHTTP` and `InjectHTTP` functions to apply configured HTTP extractors and injectors to a passed context. (#467) +- Histogram aggregator. (#433) +- `DefaultPropagator` function and have it return `trace.TraceContext` as the default context propagator. (#456) +- `AlwaysParentSample` sampler to the trace API. (#455) +- `WithNewRoot` option function to the trace API to specify the created span should be considered a root span. (#451) + +### Changed + +- Renamed `WithMap` to `ContextWithMap` in the correlation package. (#481) +- Renamed `FromContext` to `MapFromContext` in the correlation package. (#481) +- Move correlation context propagation to correlation package. (#479) +- Do not default to putting remote span context into links. (#480) +- `Tracer.WithSpan` updated to accept `StartOptions`. (#472) +- Renamed `MetricKind` to `Kind` to not stutter in the type usage. (#432) +- Renamed the `export` package to `metric` to match directory structure. (#432) +- Rename the `api/distributedcontext` package to `api/correlation`. (#444) +- Rename the `api/propagators` package to `api/propagation`. (#444) +- Move the propagators from the `propagators` package into the `trace` API package. (#444) +- Update `Float64Gauge`, `Int64Gauge`, `Float64Counter`, `Int64Counter`, `Float64Measure`, and `Int64Measure` metric methods to use value receivers instead of pointers. (#462) +- Moved all dependencies of tools package to a tools directory. (#466) + +### Removed + +- Binary propagators. (#467) +- NOOP propagator. (#467) + +### Fixed + +- Upgraded `github.com/golangci/golangci-lint` from `v1.21.0` to `v1.23.6` in `tools/`. (#492) +- Fix a possible nil-dereference crash (#478) +- Correct comments for `InstallNewPipeline` in the stdout exporter. (#483) +- Correct comments for `InstallNewPipeline` in the dogstatsd exporter. (#484) +- Correct comments for `InstallNewPipeline` in the prometheus exporter. (#482) +- Initialize `onError` based on `Config` in prometheus exporter. (#486) +- Correct module name in prometheus exporter README. (#475) +- Removed tracer name prefix from span names. (#430) +- Fix `aggregator_test.go` import package comment. (#431) +- Improved detail in stdout exporter. (#436) +- Fix a dependency issue (generate target should depend on stringer, not lint target) in Makefile. (#442) +- Reorders the Makefile targets within `precommit` target so we generate files and build the code before doing linting, so we can get much nicer errors about syntax errors from the compiler. (#442) +- Reword function documentation in gRPC plugin. (#446) +- Send the `span.kind` tag to Jaeger from the jaeger exporter. (#441) +- Fix `metadataSupplier` in the jaeger exporter to overwrite the header if existing instead of appending to it. (#441) +- Upgraded to Go 1.13 in CI. (#465) +- Correct opentelemetry.io URL in trace SDK documentation. (#464) +- Refactored reference counting logic in SDK determination of stale records. (#468) +- Add call to `runtime.Gosched` in instrument `acquireHandle` logic to not block the collector. (#469) + +## [0.2.1.1] - 2020-01-13 + +### Fixed + +- Use stateful batcher on Prometheus exporter fixing regresion introduced in #395. (#428) + +## [0.2.1] - 2020-01-08 + +### Added + +- Global meter forwarding implementation. + This enables deferred initialization for metric instruments registered before the first Meter SDK is installed. (#392) +- Global trace forwarding implementation. + This enables deferred initialization for tracers registered before the first Trace SDK is installed. (#406) +- Standardize export pipeline creation in all exporters. (#395) +- A testing, organization, and comments for 64-bit field alignment. (#418) +- Script to tag all modules in the project. (#414) + +### Changed + +- Renamed `propagation` package to `propagators`. (#362) +- Renamed `B3Propagator` propagator to `B3`. (#362) +- Renamed `TextFormatPropagator` propagator to `TextFormat`. (#362) +- Renamed `BinaryPropagator` propagator to `Binary`. (#362) +- Renamed `BinaryFormatPropagator` propagator to `BinaryFormat`. (#362) +- Renamed `NoopTextFormatPropagator` propagator to `NoopTextFormat`. (#362) +- Renamed `TraceContextPropagator` propagator to `TraceContext`. (#362) +- Renamed `SpanOption` to `StartOption` in the trace API. (#369) +- Renamed `StartOptions` to `StartConfig` in the trace API. (#369) +- Renamed `EndOptions` to `EndConfig` in the trace API. (#369) +- `Number` now has a pointer receiver for its methods. (#375) +- Renamed `CurrentSpan` to `SpanFromContext` in the trace API. (#379) +- Renamed `SetCurrentSpan` to `ContextWithSpan` in the trace API. (#379) +- Renamed `Message` in Event to `Name` in the trace API. (#389) +- Prometheus exporter no longer aggregates metrics, instead it only exports them. (#385) +- Renamed `HandleImpl` to `BoundInstrumentImpl` in the metric API. (#400) +- Renamed `Float64CounterHandle` to `Float64CounterBoundInstrument` in the metric API. (#400) +- Renamed `Int64CounterHandle` to `Int64CounterBoundInstrument` in the metric API. (#400) +- Renamed `Float64GaugeHandle` to `Float64GaugeBoundInstrument` in the metric API. (#400) +- Renamed `Int64GaugeHandle` to `Int64GaugeBoundInstrument` in the metric API. (#400) +- Renamed `Float64MeasureHandle` to `Float64MeasureBoundInstrument` in the metric API. (#400) +- Renamed `Int64MeasureHandle` to `Int64MeasureBoundInstrument` in the metric API. (#400) +- Renamed `Release` method for bound instruments in the metric API to `Unbind`. (#400) +- Renamed `AcquireHandle` method for bound instruments in the metric API to `Bind`. (#400) +- Renamed the `File` option in the stdout exporter to `Writer`. (#404) +- Renamed all `Options` to `Config` for all metric exports where this wasn't already the case. + +### Fixed + +- Aggregator import path corrected. (#421) +- Correct links in README. (#368) +- The README was updated to match latest code changes in its examples. (#374) +- Don't capitalize error statements. (#375) +- Fix ignored errors. (#375) +- Fix ambiguous variable naming. (#375) +- Removed unnecessary type casting. (#375) +- Use named parameters. (#375) +- Updated release schedule. (#378) +- Correct http-stackdriver example module name. (#394) +- Removed the `http.request` span in `httptrace` package. (#397) +- Add comments in the metrics SDK (#399) +- Initialize checkpoint when creating ddsketch aggregator to prevent panic when merging into a empty one. (#402) (#403) +- Add documentation of compatible exporters in the README. (#405) +- Typo fix. (#408) +- Simplify span check logic in SDK tracer implementation. (#419) + +## [0.2.0] - 2019-12-03 + +### Added + +- Unary gRPC tracing example. (#351) +- Prometheus exporter. (#334) +- Dogstatsd metrics exporter. (#326) + +### Changed + +- Rename `MaxSumCount` aggregation to `MinMaxSumCount` and add the `Min` interface for this aggregation. (#352) +- Rename `GetMeter` to `Meter`. (#357) +- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355) +- Rename `HTTPB3Propagator` to `B3Propagator`. (#355) +- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355) +- Move `/global` package to `/api/global`. (#356) +- Rename `GetTracer` to `Tracer`. (#347) + +### Removed + +- `SetAttribute` from the `Span` interface in the trace API. (#361) +- `AddLink` from the `Span` interface in the trace API. (#349) +- `Link` from the `Span` interface in the trace API. (#349) + +### Fixed + +- Exclude example directories from coverage report. (#365) +- Lint make target now implements automatic fixes with `golangci-lint` before a second run to report the remaining issues. (#360) +- Drop `GO111MODULE` environment variable in Makefile as Go 1.13 is the project specified minimum version and this is environment variable is not needed for that version of Go. (#359) +- Run the race checker for all test. (#354) +- Redundant commands in the Makefile are removed. (#354) +- Split the `generate` and `lint` targets of the Makefile. (#354) +- Renames `circle-ci` target to more generic `ci` in Makefile. (#354) +- Add example Prometheus binary to gitignore. (#358) +- Support negative numbers with the `MaxSumCount`. (#335) +- Resolve race conditions in `push_test.go` identified in #339. (#340) +- Use `/usr/bin/env bash` as a shebang in scripts rather than `/bin/bash`. (#336) +- Trace benchmark now tests both `AlwaysSample` and `NeverSample`. + Previously it was testing `AlwaysSample` twice. (#325) +- Trace benchmark now uses a `[]byte` for `TraceID` to fix failing test. (#325) +- Added a trace benchmark to test variadic functions in `setAttribute` vs `setAttributes` (#325) +- The `defaultkeys` batcher was only using the encoded label set as its map key while building a checkpoint. + This allowed distinct label sets through, but any metrics sharing a label set could be overwritten or merged incorrectly. + This was corrected. (#333) + +## [0.1.2] - 2019-11-18 + +### Fixed + +- Optimized the `simplelru` map for attributes to reduce the number of allocations. (#328) +- Removed unnecessary unslicing of parameters that are already a slice. (#324) + +## [0.1.1] - 2019-11-18 + +This release contains a Metrics SDK with stdout exporter and supports basic aggregations such as counter, gauges, array, maxsumcount, and ddsketch. + +### Added + +- Metrics stdout export pipeline. (#265) +- Array aggregation for raw measure metrics. (#282) +- The core.Value now have a `MarshalJSON` method. (#281) + +### Removed + +- `WithService`, `WithResources`, and `WithComponent` methods of tracers. (#314) +- Prefix slash in `Tracer.Start()` for the Jaeger example. (#292) + +### Changed + +- Allocation in LabelSet construction to reduce GC overhead. (#318) +- `trace.WithAttributes` to append values instead of replacing (#315) +- Use a formula for tolerance in sampling tests. (#298) +- Move export types into trace and metric-specific sub-directories. (#289) +- `SpanKind` back to being based on an `int` type. (#288) + +### Fixed + +- URL to OpenTelemetry website in README. (#323) +- Name of othttp default tracer. (#321) +- `ExportSpans` for the stackdriver exporter now handles `nil` context. (#294) +- CI modules cache to correctly restore/save from/to the cache. (#316) +- Fix metric SDK race condition between `LoadOrStore` and the assignment `rec.recorder = i.meter.exporter.AggregatorFor(rec)`. (#293) +- README now reflects the new code structure introduced with these changes. (#291) +- Make the basic example work. (#279) + +## [0.1.0] - 2019-11-04 + +This is the first release of open-telemetry go library. +It contains api and sdk for trace and meter. + +### Added + +- Initial OpenTelemetry trace and metric API prototypes. +- Initial OpenTelemetry trace, metric, and export SDK packages. +- A wireframe bridge to support compatibility with OpenTracing. +- Example code for a basic, http-stackdriver, http, jaeger, and named tracer setup. +- Exporters for Jaeger, Stackdriver, and stdout. +- Propagators for binary, B3, and trace-context protocols. +- Project information and guidelines in the form of a README and CONTRIBUTING. +- Tools to build the project and a Makefile to automate the process. +- Apache-2.0 license. +- CircleCI build CI manifest files. +- CODEOWNERS file to track owners of this project. + +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.14.0...HEAD +[1.14.0/0.37.0/0.0.4]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.14.0 +[1.13.0/0.36.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.13.0 +[1.12.0/0.35.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.12.0 +[1.11.2/0.34.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.2 +[1.11.1/0.33.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.1 +[1.11.0/0.32.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.0 +[0.32.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.2 +[0.32.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.1 +[0.32.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.0 +[1.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.10.0 +[1.9.0/0.0.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.9.0 +[1.8.0/0.31.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.8.0 +[1.7.0/0.30.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.7.0 +[0.29.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.29.0 +[1.6.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.3 +[1.6.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.2 +[1.6.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.1 +[1.6.0/0.28.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.0 +[1.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.5.0 +[1.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.1 +[1.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.0 +[1.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.3.0 +[1.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.2.0 +[1.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.1.0 +[1.0.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.1 +[Metrics 0.24.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.24.0 +[1.0.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0 +[1.0.0-RC3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC3 +[1.0.0-RC2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC2 +[Experimental Metrics v0.22.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.22.0 +[1.0.0-RC1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC1 +[0.20.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.20.0 +[0.19.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.19.0 +[0.18.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.18.0 +[0.17.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.17.0 +[0.16.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.16.0 +[0.15.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.15.0 +[0.14.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.14.0 +[0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.13.0 +[0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.12.0 +[0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.11.0 +[0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.10.0 +[0.9.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.9.0 +[0.8.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.8.0 +[0.7.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.7.0 +[0.6.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.6.0 +[0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.5.0 +[0.4.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.3 +[0.4.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.2 +[0.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.1 +[0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.0 +[0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.3.0 +[0.2.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.3 +[0.2.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.2 +[0.2.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1.1 +[0.2.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1 +[0.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.0 +[0.1.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.2 +[0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 +[0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 + +[Go 1.20]: https://go.dev/doc/go1.20 +[Go 1.19]: https://go.dev/doc/go1.19 +[Go 1.18]: https://go.dev/doc/go1.18 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS new file mode 100644 index 00000000..c4012ed6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -0,0 +1,17 @@ +##################################################### +# +# List of approvers for this repository +# +##################################################### +# +# Learn about membership in OpenTelemetry community: +# https://github.com/open-telemetry/community/blob/main/community-membership.md +# +# +# Learn about CODEOWNERS file format: +# https://help.github.com/en/articles/about-code-owners +# + +* @jmacd @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu + +CODEOWNERS @MrAlias @Aneurysm9 @MadVikingGod diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md new file mode 100644 index 00000000..a6928bfd --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -0,0 +1,526 @@ +# Contributing to opentelemetry-go + +The Go special interest group (SIG) meets regularly. See the +OpenTelemetry +[community](https://github.com/open-telemetry/community#golang-sdk) +repo for information on this and other language SIGs. + +See the [public meeting +notes](https://docs.google.com/document/d/1A63zSWX0x2CyCK_LoNhmQC4rqhLpYXJzXbEPDUQ2n6w/edit#heading=h.9tngw7jdwd6b) +for a summary description of past meetings. To request edit access, +join the meeting or get in touch on +[Slack](https://cloud-native.slack.com/archives/C01NPAXACKT). + +## Development + +You can view and edit the source code by cloning this repository: + +```sh +git clone https://github.com/open-telemetry/opentelemetry-go.git +``` + +Run `make test` to run the tests instead of `go test`. + +There are some generated files checked into the repo. To make sure +that the generated files are up-to-date, run `make` (or `make +precommit` - the `precommit` target is the default). + +The `precommit` target also fixes the formatting of the code and +checks the status of the go module files. + +If after running `make precommit` the output of `git status` contains +`nothing to commit, working tree clean` then it means that everything +is up-to-date and properly formatted. + +## Pull Requests + +### How to Send Pull Requests + +Everyone is welcome to contribute code to `opentelemetry-go` via +GitHub pull requests (PRs). + +To create a new PR, fork the project in GitHub and clone the upstream +repo: + +```sh +go get -d go.opentelemetry.io/otel +``` + +(This may print some warning about "build constraints exclude all Go +files", just ignore it.) + +This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You +can alternatively use `git` directly with: + +```sh +git clone https://github.com/open-telemetry/opentelemetry-go +``` + +(Note that `git clone` is *not* using the `go.opentelemetry.io/otel` name - +that name is a kind of a redirector to GitHub that `go get` can +understand, but `git` does not.) + +This would put the project in the `opentelemetry-go` directory in +current working directory. + +Enter the newly created directory and add your fork as a new remote: + +```sh +git remote add git@github.com:/opentelemetry-go +``` + +Check out a new branch, make modifications, run linters and tests, update +`CHANGELOG.md`, and push the branch to your fork: + +```sh +git checkout -b +# edit files +# update changelog +make precommit +git add -p +git commit +git push +``` + +Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull +request ID to the entry you added to `CHANGELOG.md`. + +### How to Receive Comments + +* If the PR is not ready for review, please put `[WIP]` in the title, + tag it as `work-in-progress`, or mark it as + [`draft`](https://github.blog/2019-02-14-introducing-draft-pull-requests/). +* Make sure CLA is signed and CI is clear. + +### How to Get PRs Merged + +A PR is considered to be **ready to merge** when: + +* It has received two approvals from Collaborators/Maintainers (at + different companies). This is not enforced through technical means + and a PR may be **ready to merge** with a single approval if the change + and its approach have been discussed and consensus reached. +* Feedback has been addressed. +* Any substantive changes to your PR will require that you clear any prior + Approval reviews, this includes changes resulting from other feedback. Unless + the approver explicitly stated that their approval will persist across + changes it should be assumed that the PR needs their review again. Other + project members (e.g. approvers, maintainers) can help with this if there are + any questions or if you forget to clear reviews. +* It has been open for review for at least one working day. This gives + people reasonable time to review. +* Trivial changes (typo, cosmetic, doc, etc.) do not have to wait for + one day and may be merged with a single Maintainer's approval. +* `CHANGELOG.md` has been updated to reflect what has been + added, changed, removed, or fixed. +* `README.md` has been updated if necessary. +* Urgent fix can take exception as long as it has been actively + communicated. + +Any Maintainer can merge the PR once it is **ready to merge**. + +## Design Choices + +As with other OpenTelemetry clients, opentelemetry-go follows the +[opentelemetry-specification](https://github.com/open-telemetry/opentelemetry-specification). + +It's especially valuable to read through the [library +guidelines](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/library-guidelines.md). + +### Focus on Capabilities, Not Structure Compliance + +OpenTelemetry is an evolving specification, one where the desires and +use cases are clear, but the method to satisfy those uses cases are +not. + +As such, Contributions should provide functionality and behavior that +conforms to the specification, but the interface and structure is +flexible. + +It is preferable to have contributions follow the idioms of the +language rather than conform to specific API names or argument +patterns in the spec. + +For a deeper discussion, see +[this](https://github.com/open-telemetry/opentelemetry-specification/issues/165). + +## Documentation + +Each non-example Go Module should have its own `README.md` containing: + +- A pkg.go.dev badge which can be generated [here](https://pkg.go.dev/badge/). +- Brief description. +- Installation instructions (and requirements if applicable). +- Hyperlink to an example. Depending on the component the example can be: + - An `example_test.go` like [here](exporters/stdout/stdouttrace/example_test.go). + - A sample Go application with its own `README.md`, like [here](example/zipkin). +- Additional documentation sections such us: + - Configuration, + - Contributing, + - References. + +[Here](exporters/jaeger/README.md) is an example of a concise `README.md`. + +Moreover, it should be possible to navigate to any `README.md` from the +root `README.md`. + +## Style Guide + +One of the primary goals of this project is that it is actually used by +developers. With this goal in mind the project strives to build +user-friendly and idiomatic Go code adhering to the Go community's best +practices. + +For a non-comprehensive but foundational overview of these best practices +the [Effective Go](https://golang.org/doc/effective_go.html) documentation +is an excellent starting place. + +As a convenience for developers building this project the `make precommit` +will format, lint, validate, and in some cases fix the changes you plan to +submit. This check will need to pass for your changes to be able to be +merged. + +In addition to idiomatic Go, the project has adopted certain standards for +implementations of common patterns. These standards should be followed as a +default, and if they are not followed documentation needs to be included as +to the reasons why. + +### Configuration + +When creating an instantiation function for a complex `type T struct`, it is +useful to allow variable number of options to be applied. However, the strong +type system of Go restricts the function design options. There are a few ways +to solve this problem, but we have landed on the following design. + +#### `config` + +Configuration should be held in a `struct` named `config`, or prefixed with +specific type name this Configuration applies to if there are multiple +`config` in the package. This type must contain configuration options. + +```go +// config contains configuration options for a thing. +type config struct { + // options ... +} +``` + +In general the `config` type will not need to be used externally to the +package and should be unexported. If, however, it is expected that the user +will likely want to build custom options for the configuration, the `config` +should be exported. Please, include in the documentation for the `config` +how the user can extend the configuration. + +It is important that internal `config` are not shared across package boundaries. +Meaning a `config` from one package should not be directly used by another. The +one exception is the API packages. The configs from the base API, eg. +`go.opentelemetry.io/otel/trace.TracerConfig` and +`go.opentelemetry.io/otel/metric.InstrumentConfig`, are intended to be consumed +by the SDK therefor it is expected that these are exported. + +When a config is exported we want to maintain forward and backward +compatibility, to achieve this no fields should be exported but should +instead be accessed by methods. + +Optionally, it is common to include a `newConfig` function (with the same +naming scheme). This function wraps any defaults setting and looping over +all options to create a configured `config`. + +```go +// newConfig returns an appropriately configured config. +func newConfig(options ...Option) config { + // Set default values for config. + config := config{/* […] */} + for _, option := range options { + config = option.apply(config) + } + // Preform any validation here. + return config +} +``` + +If validation of the `config` options is also preformed this can return an +error as well that is expected to be handled by the instantiation function +or propagated to the user. + +Given the design goal of not having the user need to work with the `config`, +the `newConfig` function should also be unexported. + +#### `Option` + +To set the value of the options a `config` contains, a corresponding +`Option` interface type should be used. + +```go +type Option interface { + apply(config) config +} +``` + +Having `apply` unexported makes sure that it will not be used externally. +Moreover, the interface becomes sealed so the user cannot easily implement +the interface on its own. + +The `apply` method should return a modified version of the passed config. +This approach, instead of passing a pointer, is used to prevent the config from being allocated to the heap. + +The name of the interface should be prefixed in the same way the +corresponding `config` is (if at all). + +#### Options + +All user configurable options for a `config` must have a related unexported +implementation of the `Option` interface and an exported configuration +function that wraps this implementation. + +The wrapping function name should be prefixed with `With*` (or in the +special case of a boolean options `Without*`) and should have the following +function signature. + +```go +func With*(…) Option { … } +``` + +##### `bool` Options + +```go +type defaultFalseOption bool + +func (o defaultFalseOption) apply(c config) config { + c.Bool = bool(o) + return c +} + +// WithOption sets a T to have an option included. +func WithOption() Option { + return defaultFalseOption(true) +} +``` + +```go +type defaultTrueOption bool + +func (o defaultTrueOption) apply(c config) config { + c.Bool = bool(o) + return c +} + +// WithoutOption sets a T to have Bool option excluded. +func WithoutOption() Option { + return defaultTrueOption(false) +} +``` + +##### Declared Type Options + +```go +type myTypeOption struct { + MyType MyType +} + +func (o myTypeOption) apply(c config) config { + c.MyType = o.MyType + return c +} + +// WithMyType sets T to have include MyType. +func WithMyType(t MyType) Option { + return myTypeOption{t} +} +``` + +##### Functional Options + +```go +type optionFunc func(config) config + +func (fn optionFunc) apply(c config) config { + return fn(c) +} + +// WithMyType sets t as MyType. +func WithMyType(t MyType) Option { + return optionFunc(func(c config) config { + c.MyType = t + return c + }) +} +``` + +#### Instantiation + +Using this configuration pattern to configure instantiation with a `NewT` +function. + +```go +func NewT(options ...Option) T {…} +``` + +Any required parameters can be declared before the variadic `options`. + +#### Dealing with Overlap + +Sometimes there are multiple complex `struct` that share common +configuration and also have distinct configuration. To avoid repeated +portions of `config`s, a common `config` can be used with the union of +options being handled with the `Option` interface. + +For example. + +```go +// config holds options for all animals. +type config struct { + Weight float64 + Color string + MaxAltitude float64 +} + +// DogOption apply Dog specific options. +type DogOption interface { + applyDog(config) config +} + +// BirdOption apply Bird specific options. +type BirdOption interface { + applyBird(config) config +} + +// Option apply options for all animals. +type Option interface { + BirdOption + DogOption +} + +type weightOption float64 + +func (o weightOption) applyDog(c config) config { + c.Weight = float64(o) + return c +} + +func (o weightOption) applyBird(c config) config { + c.Weight = float64(o) + return c +} + +func WithWeight(w float64) Option { return weightOption(w) } + +type furColorOption string + +func (o furColorOption) applyDog(c config) config { + c.Color = string(o) + return c +} + +func WithFurColor(c string) DogOption { return furColorOption(c) } + +type maxAltitudeOption float64 + +func (o maxAltitudeOption) applyBird(c config) config { + c.MaxAltitude = float64(o) + return c +} + +func WithMaxAltitude(a float64) BirdOption { return maxAltitudeOption(a) } + +func NewDog(name string, o ...DogOption) Dog {…} +func NewBird(name string, o ...BirdOption) Bird {…} +``` + +### Interfaces + +To allow other developers to better comprehend the code, it is important +to ensure it is sufficiently documented. One simple measure that contributes +to this aim is self-documenting by naming method parameters. Therefore, +where appropriate, methods of every exported interface type should have +their parameters appropriately named. + +#### Interface Stability + +All exported stable interfaces that include the following warning in their +doumentation are allowed to be extended with additional methods. + +> Warning: methods may be added to this interface in minor releases. + +Otherwise, stable interfaces MUST NOT be modified. + +If new functionality is needed for an interface that cannot be changed it MUST +be added by including an additional interface. That added interface can be a +simple interface for the specific functionality that you want to add or it can +be a super-set of the original interface. For example, if you wanted to a +`Close` method to the `Exporter` interface: + +```go +type Exporter interface { + Export() +} +``` + +A new interface, `Closer`, can be added: + +```go +type Closer interface { + Close() +} +``` + +Code that is passed the `Exporter` interface can now check to see if the passed +value also satisfies the new interface. E.g. + +```go +func caller(e Exporter) { + /* ... */ + if c, ok := e.(Closer); ok { + c.Close() + } + /* ... */ +} +``` + +Alternatively, a new type that is the super-set of an `Exporter` can be created. + +```go +type ClosingExporter struct { + Exporter + Close() +} +``` + +This new type can be used similar to the simple interface above in that a +passed `Exporter` type can be asserted to satisfy the `ClosingExporter` type +and the `Close` method called. + +This super-set approach can be useful if there is explicit behavior that needs +to be coupled with the original type and passed as a unified type to a new +function, but, because of this coupling, it also limits the applicability of +the added functionality. If there exist other interfaces where this +functionality should be added, each one will need their own super-set +interfaces and will duplicate the pattern. For this reason, the simple targeted +interface that defines the specific functionality should be preferred. + +## Approvers and Maintainers + +Approvers: + +- [Evan Torrie](https://github.com/evantorrie), Verizon Media +- [Josh MacDonald](https://github.com/jmacd), LightStep +- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics +- [David Ashpole](https://github.com/dashpole), Google +- [Robert Pająk](https://github.com/pellared), Splunk +- [Chester Cheung](https://github.com/hanyuancheung), Tencent +- [Damien Mathieu](https://github.com/dmathieu), Elastic + +Maintainers: + +- [Aaron Clawson](https://github.com/MadVikingGod), LightStep +- [Anthony Mirabella](https://github.com/Aneurysm9), AWS +- [Tyler Yahn](https://github.com/MrAlias), Splunk + +Emeritus: + +- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep + +### Become an Approver or a Maintainer + +See the [community membership document in OpenTelemetry community +repo](https://github.com/open-telemetry/community/blob/main/community-membership.md). diff --git a/vendor/go.opentelemetry.io/otel/LICENSE b/vendor/go.opentelemetry.io/otel/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile new file mode 100644 index 00000000..0e6ffa28 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -0,0 +1,227 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +TOOLS_MOD_DIR := ./internal/tools + +ALL_DOCS := $(shell find . -name '*.md' -type f | sort) +ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort) +OTEL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(ALL_GO_MOD_DIRS)) +ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | grep -E -v '^./example|^$(TOOLS_MOD_DIR)' | sort) + +GO = go +TIMEOUT = 60 + +.DEFAULT_GOAL := precommit + +.PHONY: precommit ci +precommit: dependabot-generate license-check vanity-import-fix misspell go-mod-tidy golangci-lint-fix test-default +ci: dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage + +# Tools + +TOOLS = $(CURDIR)/.tools + +$(TOOLS): + @mkdir -p $@ +$(TOOLS)/%: | $(TOOLS) + cd $(TOOLS_MOD_DIR) && \ + $(GO) build -o $@ $(PACKAGE) + +MULTIMOD = $(TOOLS)/multimod +$(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod + +SEMCONVGEN = $(TOOLS)/semconvgen +$(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen + +CROSSLINK = $(TOOLS)/crosslink +$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink + +SEMCONVKIT = $(TOOLS)/semconvkit +$(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit + +DBOTCONF = $(TOOLS)/dbotconf +$(TOOLS)/dbotconf: PACKAGE=go.opentelemetry.io/build-tools/dbotconf + +GOLANGCI_LINT = $(TOOLS)/golangci-lint +$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint + +MISSPELL = $(TOOLS)/misspell +$(TOOLS)/misspell: PACKAGE=github.com/client9/misspell/cmd/misspell + +GOCOVMERGE = $(TOOLS)/gocovmerge +$(TOOLS)/gocovmerge: PACKAGE=github.com/wadey/gocovmerge + +STRINGER = $(TOOLS)/stringer +$(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer + +PORTO = $(TOOLS)/porto +$(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto + +GOJQ = $(TOOLS)/gojq +$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq + +.PHONY: tools +tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) + +# Build + +.PHONY: generate build + +generate: $(OTEL_GO_MOD_DIRS:%=generate/%) +generate/%: DIR=$* +generate/%: | $(STRINGER) $(PORTO) + @echo "$(GO) generate $(DIR)/..." \ + && cd $(DIR) \ + && PATH="$(TOOLS):$${PATH}" $(GO) generate ./... && $(PORTO) -w . + +build: generate $(OTEL_GO_MOD_DIRS:%=build/%) $(OTEL_GO_MOD_DIRS:%=build-tests/%) +build/%: DIR=$* +build/%: + @echo "$(GO) build $(DIR)/..." \ + && cd $(DIR) \ + && $(GO) build ./... + +build-tests/%: DIR=$* +build-tests/%: + @echo "$(GO) build tests $(DIR)/..." \ + && cd $(DIR) \ + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -vet=off -run xxxxxMatchNothingxxxxx >/dev/null + +# Tests + +TEST_TARGETS := test-default test-bench test-short test-verbose test-race +.PHONY: $(TEST_TARGETS) test +test-default test-race: ARGS=-race +test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=. +test-short: ARGS=-short +test-verbose: ARGS=-v -race +$(TEST_TARGETS): test +test: $(OTEL_GO_MOD_DIRS:%=test/%) +test/%: DIR=$* +test/%: + @echo "$(GO) test -timeout $(TIMEOUT)s $(ARGS) $(DIR)/..." \ + && cd $(DIR) \ + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -timeout $(TIMEOUT)s $(ARGS) + +COVERAGE_MODE = atomic +COVERAGE_PROFILE = coverage.out +.PHONY: test-coverage +test-coverage: | $(GOCOVMERGE) + @set -e; \ + printf "" > coverage.txt; \ + for dir in $(ALL_COVERAGE_MOD_DIRS); do \ + echo "$(GO) test -coverpkg=go.opentelemetry.io/otel/... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" $${dir}/..."; \ + (cd "$${dir}" && \ + $(GO) list ./... \ + | grep -v third_party \ + | grep -v 'semconv/v.*' \ + | xargs $(GO) test -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" && \ + $(GO) tool cover -html=coverage.out -o coverage.html); \ + done; \ + $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt + +.PHONY: golangci-lint golangci-lint-fix +golangci-lint-fix: ARGS=--fix +golangci-lint-fix: golangci-lint +golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%) +golangci-lint/%: DIR=$* +golangci-lint/%: | $(GOLANGCI_LINT) + @echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \ + && cd $(DIR) \ + && $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS) + +.PHONY: crosslink +crosslink: | $(CROSSLINK) + @echo "Updating intra-repository dependencies in all go modules" \ + && $(CROSSLINK) --root=$(shell pwd) --prune + +.PHONY: go-mod-tidy +go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%) +go-mod-tidy/%: DIR=$* +go-mod-tidy/%: | crosslink + @echo "$(GO) mod tidy in $(DIR)" \ + && cd $(DIR) \ + && $(GO) mod tidy -compat=1.18 + +.PHONY: lint-modules +lint-modules: go-mod-tidy + +.PHONY: lint +lint: misspell lint-modules golangci-lint + +.PHONY: vanity-import-check +vanity-import-check: | $(PORTO) + @$(PORTO) --include-internal -l . || echo "(run: make vanity-import-fix)" + +.PHONY: vanity-import-fix +vanity-import-fix: | $(PORTO) + @$(PORTO) --include-internal -w . + +.PHONY: misspell +misspell: | $(MISSPELL) + @$(MISSPELL) -w $(ALL_DOCS) + +.PHONY: license-check +license-check: + @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \ + awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=3 { found=1; next } END { if (!found) print FILENAME }' $$f; \ + done); \ + if [ -n "$${licRes}" ]; then \ + echo "license header checking failed:"; echo "$${licRes}"; \ + exit 1; \ + fi + +DEPENDABOT_CONFIG = .github/dependabot.yml +.PHONY: dependabot-check +dependabot-check: | $(DBOTCONF) + @$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || echo "(run: make dependabot-generate)" + +.PHONY: dependabot-generate +dependabot-generate: | $(DBOTCONF) + @$(DBOTCONF) generate > $(DEPENDABOT_CONFIG) + +.PHONY: check-clean-work-tree +check-clean-work-tree: + @if ! git diff --quiet; then \ + echo; \ + echo 'Working tree is not clean, did you forget to run "make precommit"?'; \ + echo; \ + git status; \ + exit 1; \ + fi + +SEMCONVPKG ?= "semconv/" +.PHONY: semconv-generate +semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT) + [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry specification tag"; exit 1 ) + [ "$(OTEL_SPEC_REPO)" ] || ( echo "OTEL_SPEC_REPO unset: missing path to opentelemetry specification repo"; exit 1 ) + $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" + +.PHONY: prerelease +prerelease: | $(MULTIMOD) + @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) + $(MULTIMOD) verify && $(MULTIMOD) prerelease -m ${MODSET} + +COMMIT ?= "HEAD" +.PHONY: add-tags +add-tags: | $(MULTIMOD) + @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) + $(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md new file mode 100644 index 00000000..878d87e5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -0,0 +1,114 @@ +# OpenTelemetry-Go + +[![CI](https://github.com/open-telemetry/opentelemetry-go/workflows/ci/badge.svg)](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain) +[![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main) +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel) +[![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel) +[![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT) + +OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/). +It provides a set of APIs to directly measure performance and behavior of your software and send this data to observability platforms. + +## Project Status + +| Signal | Status | Project | +| ------- | ---------- | ------- | +| Traces | Stable | N/A | +| Metrics | Alpha | N/A | +| Logs | Frozen [1] | N/A | + +- [1]: The Logs signal development is halted for this project while we develop both Traces and Metrics. + No Logs Pull Requests are currently being accepted. + +Progress and status specific to this repository is tracked in our local +[project boards](https://github.com/open-telemetry/opentelemetry-go/projects) +and +[milestones](https://github.com/open-telemetry/opentelemetry-go/milestones). + +Project versioning information and stability guarantees can be found in the +[versioning documentation](./VERSIONING.md). + +### Compatibility + +OpenTelemetry-Go ensures compatibility with the current supported versions of +the [Go language](https://golang.org/doc/devel/release#policy): + +> Each major Go release is supported until there are two newer major releases. +> For example, Go 1.5 was supported until the Go 1.7 release, and Go 1.6 was supported until the Go 1.8 release. + +For versions of Go that are no longer supported upstream, opentelemetry-go will +stop ensuring compatibility with these versions in the following manner: + +- A minor release of opentelemetry-go will be made to add support for the new + supported release of Go. +- The following minor release of opentelemetry-go will remove compatibility + testing for the oldest (now archived upstream) version of Go. This, and + future, releases of opentelemetry-go may include features only supported by + the currently supported versions of Go. + +Currently, this project supports the following environments. + +| OS | Go Version | Architecture | +| ------- | ---------- | ------------ | +| Ubuntu | 1.20 | amd64 | +| Ubuntu | 1.19 | amd64 | +| Ubuntu | 1.18 | amd64 | +| Ubuntu | 1.20 | 386 | +| Ubuntu | 1.19 | 386 | +| Ubuntu | 1.18 | 386 | +| MacOS | 1.20 | amd64 | +| MacOS | 1.19 | amd64 | +| MacOS | 1.18 | amd64 | +| Windows | 1.20 | amd64 | +| Windows | 1.19 | amd64 | +| Windows | 1.18 | amd64 | +| Windows | 1.20 | 386 | +| Windows | 1.19 | 386 | +| Windows | 1.18 | 386 | + +While this project should work for other systems, no compatibility guarantees +are made for those systems currently. + +## Getting Started + +You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/go/getting-started/). + +OpenTelemetry's goal is to provide a single set of APIs to capture distributed +traces and metrics from your application and send them to an observability +platform. This project allows you to do just that for applications written in +Go. There are two steps to this process: instrument your application, and +configure an exporter. + +### Instrumentation + +To start capturing distributed traces and metric events from your application +it first needs to be instrumented. The easiest way to do this is by using an +instrumentation library for your code. Be sure to check out [the officially +supported instrumentation +libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/instrumentation). + +If you need to extend the telemetry an instrumentation library provides or want +to build your own instrumentation for your application directly you will need +to use the +[Go otel](https://pkg.go.dev/go.opentelemetry.io/otel) +package. The included [examples](./example/) are a good way to see some +practical uses of this process. + +### Export + +Now that your application is instrumented to collect telemetry, it needs an +export pipeline to send that telemetry to an observability platform. + +All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters). + +| Exporter | Metrics | Traces | +| :-----------------------------------: | :-----: | :----: | +| [Jaeger](./exporters/jaeger/) | | ✓ | +| [OTLP](./exporters/otlp/) | ✓ | ✓ | +| [Prometheus](./exporters/prometheus/) | ✓ | | +| [stdout](./exporters/stdout/) | ✓ | ✓ | +| [Zipkin](./exporters/zipkin/) | | ✓ | + +## Contributing + +See the [contributing documentation](CONTRIBUTING.md). diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md new file mode 100644 index 00000000..77d56c93 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -0,0 +1,127 @@ +# Release Process + +## Semantic Convention Generation + +New versions of the [OpenTelemetry specification] mean new versions of the `semconv` package need to be generated. +The `semconv-generate` make target is used for this. + +1. Checkout a local copy of the [OpenTelemetry specification] to the desired release tag. +2. Pull the latest `otel/semconvgen` image: `docker pull otel/semconvgen:latest` +3. Run the `make semconv-generate ...` target from this repository. + +For example, + +```sh +export TAG="v1.13.0" # Change to the release version you are generating. +export OTEL_SPEC_REPO="/absolute/path/to/opentelemetry-specification" +git -C "$OTEL_SPEC_REPO" checkout "tags/$TAG" -b "$TAG" +docker pull otel/semconvgen:latest +make semconv-generate # Uses the exported TAG and OTEL_SPEC_REPO. +``` + +This should create a new sub-package of [`semconv`](./semconv). +Ensure things look correct before submitting a pull request to include the addition. + +**Note**, the generation code was changed to generate versions >= 1.13. +To generate versions prior to this, checkout the old release of this repository (i.e. [2fe8861](https://github.com/open-telemetry/opentelemetry-go/commit/2fe8861a24e20088c065b116089862caf9e3cd8b)). + +## Pre-Release + +First, decide which module sets will be released and update their versions +in `versions.yaml`. Commit this change to a new branch. + +Update go.mod for submodules to depend on the new release which will happen in the next step. + +1. Run the `prerelease` make target. It creates a branch + `prerelease__` that will contain all release changes. + + ``` + make prerelease MODSET= + ``` + +2. Verify the changes. + + ``` + git diff ...prerelease__ + ``` + + This should have changed the version for all modules to be ``. + If these changes look correct, merge them into your pre-release branch: + + ```go + git merge prerelease__ + ``` + +3. Update the [Changelog](./CHANGELOG.md). + - Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand. + To verify this, you can look directly at the commits since the ``. + + ``` + git --no-pager log --pretty=oneline "..HEAD" + ``` + + - Move all the `Unreleased` changes into a new section following the title scheme (`[] - `). + - Update all the appropriate links at the bottom. + +4. Push the changes to upstream and create a Pull Request on GitHub. + Be sure to include the curated changes from the [Changelog](./CHANGELOG.md) in the description. + +## Tag + +Once the Pull Request with all the version changes has been approved and merged it is time to tag the merged commit. + +***IMPORTANT***: It is critical you use the same tag that you used in the Pre-Release step! +Failure to do so will leave things in a broken state. As long as you do not +change `versions.yaml` between pre-release and this step, things should be fine. + +***IMPORTANT***: [There is currently no way to remove an incorrectly tagged version of a Go module](https://github.com/golang/go/issues/34189). +It is critical you make sure the version you push upstream is correct. +[Failure to do so will lead to minor emergencies and tough to work around](https://github.com/open-telemetry/opentelemetry-go/issues/331). + +1. For each module set that will be released, run the `add-tags` make target + using the `` of the commit on the main branch for the merged Pull Request. + + ``` + make add-tags MODSET= COMMIT= + ``` + + It should only be necessary to provide an explicit `COMMIT` value if the + current `HEAD` of your working directory is not the correct commit. + +2. Push tags to the upstream remote (not your fork: `github.com/open-telemetry/opentelemetry-go.git`). + Make sure you push all sub-modules as well. + + ``` + git push upstream + git push upstream + ... + ``` + +## Release + +Finally create a Release for the new `` on GitHub. +The release body should include all the release notes from the Changelog for this release. + +## Verify Examples + +After releasing verify that examples build outside of the repository. + +``` +./verify_examples.sh +``` + +The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them. +This ensures they build with the published release, not the local copy. + +## Post-Release + +### Contrib Repository + +Once verified be sure to [make a release for the `contrib` repository](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md) that uses this release. + +### Website Documentation + +Update [the documentation](./website_docs) for [the OpenTelemetry website](https://opentelemetry.io/docs/go/). +Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate. + +[OpenTelemetry specification]: https://github.com/open-telemetry/opentelemetry-specification diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md new file mode 100644 index 00000000..412f1e36 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/VERSIONING.md @@ -0,0 +1,224 @@ +# Versioning + +This document describes the versioning policy for this repository. This policy +is designed so the following goals can be achieved. + +**Users are provided a codebase of value that is stable and secure.** + +## Policy + +* Versioning of this project will be idiomatic of a Go project using [Go + modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import + versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) + will be used. + * Versions will comply with [semver + 2.0](https://semver.org/spec/v2.0.0.html) with the following exceptions. + * New methods may be added to exported API interfaces. All exported + interfaces that fall within this exception will include the following + paragraph in their public documentation. + + > Warning: methods may be added to this interface in minor releases. + + * If a module is version `v2` or higher, the major version of the module + must be included as a `/vN` at the end of the module paths used in + `go.mod` files (e.g., `module go.opentelemetry.io/otel/v2`, `require + go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path + (e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the + paths used in `go get` commands (e.g., `go get + go.opentelemetry.io/otel/v2@v2.0.1`. Note there is both a `/v2` and a + `@v2.0.1` in that example. One way to think about it is that the module + name now includes the `/v2`, so include `/v2` whenever you are using the + module name). + * If a module is version `v0` or `v1`, do not include the major version in + either the module path or the import path. + * Modules will be used to encapsulate signals and components. + * Experimental modules still under active development will be versioned at + `v0` to imply the stability guarantee defined by + [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). + + > Major version zero (0.y.z) is for initial development. Anything MAY + > change at any time. The public API SHOULD NOT be considered stable. + + * Mature modules for which we guarantee a stable public API will be versioned + with a major version greater than `v0`. + * The decision to make a module stable will be made on a case-by-case + basis by the maintainers of this project. + * Experimental modules will start their versioning at `v0.0.0` and will + increment their minor version when backwards incompatible changes are + released and increment their patch version when backwards compatible + changes are released. + * All stable modules that use the same major version number will use the + same entire version number. + * Stable modules may be released with an incremented minor or patch + version even though that module has not been changed, but rather so + that it will remain at the same version as other stable modules that + did undergo change. + * When an experimental module becomes stable a new stable module version + will be released and will include this now stable module. The new + stable module version will be an increment of the minor version number + and will be applied to all existing stable modules as well as the newly + stable module being released. +* Versioning of the associated [contrib + repository](https://github.com/open-telemetry/opentelemetry-go-contrib) of + this project will be idiomatic of a Go project using [Go + modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import + versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) + will be used. + * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html). + * If a module is version `v2` or higher, the + major version of the module must be included as a `/vN` at the end of the + module paths used in `go.mod` files (e.g., `module + go.opentelemetry.io/contrib/instrumentation/host/v2`, `require + go.opentelemetry.io/contrib/instrumentation/host/v2 v2.0.1`) and in the + package import path (e.g., `import + "go.opentelemetry.io/contrib/instrumentation/host/v2"`). This includes + the paths used in `go get` commands (e.g., `go get + go.opentelemetry.io/contrib/instrumentation/host/v2@v2.0.1`. Note there + is both a `/v2` and a `@v2.0.1` in that example. One way to think about + it is that the module name now includes the `/v2`, so include `/v2` + whenever you are using the module name). + * If a module is version `v0` or `v1`, do not include the major version + in either the module path or the import path. + * In addition to public APIs, telemetry produced by stable instrumentation + will remain stable and backwards compatible. This is to avoid breaking + alerts and dashboard. + * Modules will be used to encapsulate instrumentation, detectors, exporters, + propagators, and any other independent sets of related components. + * Experimental modules still under active development will be versioned at + `v0` to imply the stability guarantee defined by + [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). + + > Major version zero (0.y.z) is for initial development. Anything MAY + > change at any time. The public API SHOULD NOT be considered stable. + + * Mature modules for which we guarantee a stable public API and telemetry will + be versioned with a major version greater than `v0`. + * Experimental modules will start their versioning at `v0.0.0` and will + increment their minor version when backwards incompatible changes are + released and increment their patch version when backwards compatible + changes are released. + * Stable contrib modules cannot depend on experimental modules from this + project. + * All stable contrib modules of the same major version with this project + will use the same entire version as this project. + * Stable modules may be released with an incremented minor or patch + version even though that module's code has not been changed. Instead + the only change that will have been included is to have updated that + modules dependency on this project's stable APIs. + * When an experimental module in contrib becomes stable a new stable + module version will be released and will include this now stable + module. The new stable module version will be an increment of the minor + version number and will be applied to all existing stable contrib + modules, this project's modules, and the newly stable module being + released. + * Contrib modules will be kept up to date with this project's releases. + * Due to the dependency contrib modules will implicitly have on this + project's modules the release of stable contrib modules to match the + released version number will be staggered after this project's release. + There is no explicit time guarantee for how long after this projects + release the contrib release will be. Effort should be made to keep them + as close in time as possible. + * No additional stable release in this project can be made until the + contrib repository has a matching stable release. + * No release can be made in the contrib repository after this project's + stable release except for a stable release of the contrib repository. +* GitHub releases will be made for all releases. +* Go modules will be made available at Go package mirrors. + +## Example Versioning Lifecycle + +To better understand the implementation of the above policy the following +example is provided. This project is simplified to include only the following +modules and their versions: + +* `otel`: `v0.14.0` +* `otel/trace`: `v0.14.0` +* `otel/metric`: `v0.14.0` +* `otel/baggage`: `v0.14.0` +* `otel/sdk/trace`: `v0.14.0` +* `otel/sdk/metric`: `v0.14.0` + +These modules have been developed to a point where the `otel/trace`, +`otel/baggage`, and `otel/sdk/trace` modules have reached a point that they +should be considered for a stable release. The `otel/metric` and +`otel/sdk/metric` are still under active development and the `otel` module +depends on both `otel/trace` and `otel/metric`. + +The `otel` package is refactored to remove its dependencies on `otel/metric` so +it can be released as stable as well. With that done the following release +candidates are made: + +* `otel`: `v1.0.0-RC1` +* `otel/trace`: `v1.0.0-RC1` +* `otel/baggage`: `v1.0.0-RC1` +* `otel/sdk/trace`: `v1.0.0-RC1` + +The `otel/metric` and `otel/sdk/metric` modules remain at `v0.14.0`. + +A few minor issues are discovered in the `otel/trace` package. These issues are +resolved with some minor, but backwards incompatible, changes and are released +as a second release candidate: + +* `otel`: `v1.0.0-RC2` +* `otel/trace`: `v1.0.0-RC2` +* `otel/baggage`: `v1.0.0-RC2` +* `otel/sdk/trace`: `v1.0.0-RC2` + +Notice that all module version numbers are incremented to adhere to our +versioning policy. + +After these release candidates have been evaluated to satisfaction, they are +released as version `v1.0.0`. + +* `otel`: `v1.0.0` +* `otel/trace`: `v1.0.0` +* `otel/baggage`: `v1.0.0` +* `otel/sdk/trace`: `v1.0.0` + +Since both the `go` utility and the Go module system support [the semantic +versioning definition of +precedence](https://semver.org/spec/v2.0.0.html#spec-item-11), this release +will correctly be interpreted as the successor to the previous release +candidates. + +Active development of this project continues. The `otel/metric` module now has +backwards incompatible changes to its API that need to be released and the +`otel/baggage` module has a minor bug fix that needs to be released. The +following release is made: + +* `otel`: `v1.0.1` +* `otel/trace`: `v1.0.1` +* `otel/metric`: `v0.15.0` +* `otel/baggage`: `v1.0.1` +* `otel/sdk/trace`: `v1.0.1` +* `otel/sdk/metric`: `v0.15.0` + +Notice that, again, all stable module versions are incremented in unison and +the `otel/sdk/metric` package, which depends on the `otel/metric` package, also +bumped its version. This bump of the `otel/sdk/metric` package makes sense +given their coupling, though it is not explicitly required by our versioning +policy. + +As we progress, the `otel/metric` and `otel/sdk/metric` packages have reached a +point where they should be evaluated for stability. The `otel` module is +reintegrated with the `otel/metric` package and the following release is made: + +* `otel`: `v1.1.0-RC1` +* `otel/trace`: `v1.1.0-RC1` +* `otel/metric`: `v1.1.0-RC1` +* `otel/baggage`: `v1.1.0-RC1` +* `otel/sdk/trace`: `v1.1.0-RC1` +* `otel/sdk/metric`: `v1.1.0-RC1` + +All the modules are evaluated and determined to a viable stable release. They +are then released as version `v1.1.0` (the minor version is incremented to +indicate the addition of new signal). + +* `otel`: `v1.1.0` +* `otel/trace`: `v1.1.0` +* `otel/metric`: `v1.1.0` +* `otel/baggage`: `v1.1.0` +* `otel/sdk/trace`: `v1.1.0` +* `otel/sdk/metric`: `v1.1.0` diff --git a/vendor/go.opentelemetry.io/otel/attribute/doc.go b/vendor/go.opentelemetry.io/otel/attribute/doc.go new file mode 100644 index 00000000..dafe7424 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/doc.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package attribute provides key and value attributes. +package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go new file mode 100644 index 00000000..fe2bc576 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go @@ -0,0 +1,146 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "bytes" + "sync" + "sync/atomic" +) + +type ( + // Encoder is a mechanism for serializing an attribute set into a specific + // string representation that supports caching, to avoid repeated + // serialization. An example could be an exporter encoding the attribute + // set into a wire representation. + Encoder interface { + // Encode returns the serialized encoding of the attribute set using + // its Iterator. This result may be cached by a attribute.Set. + Encode(iterator Iterator) string + + // ID returns a value that is unique for each class of attribute + // encoder. Attribute encoders allocate these using `NewEncoderID`. + ID() EncoderID + } + + // EncoderID is used to identify distinct Encoder + // implementations, for caching encoded results. + EncoderID struct { + value uint64 + } + + // defaultAttrEncoder uses a sync.Pool of buffers to reduce the number of + // allocations used in encoding attributes. This implementation encodes a + // comma-separated list of key=value, with '/'-escaping of '=', ',', and + // '\'. + defaultAttrEncoder struct { + // pool is a pool of attribute set builders. The buffers in this pool + // grow to a size that most attribute encodings will not allocate new + // memory. + pool sync.Pool // *bytes.Buffer + } +) + +// escapeChar is used to ensure uniqueness of the attribute encoding where +// keys or values contain either '=' or ','. Since there is no parser needed +// for this encoding and its only requirement is to be unique, this choice is +// arbitrary. Users will see these in some exporters (e.g., stdout), so the +// backslash ('\') is used as a conventional choice. +const escapeChar = '\\' + +var ( + _ Encoder = &defaultAttrEncoder{} + + // encoderIDCounter is for generating IDs for other attribute encoders. + encoderIDCounter uint64 + + defaultEncoderOnce sync.Once + defaultEncoderID = NewEncoderID() + defaultEncoderInstance *defaultAttrEncoder +) + +// NewEncoderID returns a unique attribute encoder ID. It should be called +// once per each type of attribute encoder. Preferably in init() or in var +// definition. +func NewEncoderID() EncoderID { + return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)} +} + +// DefaultEncoder returns an attribute encoder that encodes attributes in such +// a way that each escaped attribute's key is followed by an equal sign and +// then by an escaped attribute's value. All key-value pairs are separated by +// a comma. +// +// Escaping is done by prepending a backslash before either a backslash, equal +// sign or a comma. +func DefaultEncoder() Encoder { + defaultEncoderOnce.Do(func() { + defaultEncoderInstance = &defaultAttrEncoder{ + pool: sync.Pool{ + New: func() interface{} { + return &bytes.Buffer{} + }, + }, + } + }) + return defaultEncoderInstance +} + +// Encode is a part of an implementation of the AttributeEncoder interface. +func (d *defaultAttrEncoder) Encode(iter Iterator) string { + buf := d.pool.Get().(*bytes.Buffer) + defer d.pool.Put(buf) + buf.Reset() + + for iter.Next() { + i, keyValue := iter.IndexedAttribute() + if i > 0 { + _, _ = buf.WriteRune(',') + } + copyAndEscape(buf, string(keyValue.Key)) + + _, _ = buf.WriteRune('=') + + if keyValue.Value.Type() == STRING { + copyAndEscape(buf, keyValue.Value.AsString()) + } else { + _, _ = buf.WriteString(keyValue.Value.Emit()) + } + } + return buf.String() +} + +// ID is a part of an implementation of the AttributeEncoder interface. +func (*defaultAttrEncoder) ID() EncoderID { + return defaultEncoderID +} + +// copyAndEscape escapes `=`, `,` and its own escape character (`\`), +// making the default encoding unique. +func copyAndEscape(buf *bytes.Buffer, val string) { + for _, ch := range val { + switch ch { + case '=', ',', escapeChar: + _, _ = buf.WriteRune(escapeChar) + } + _, _ = buf.WriteRune(ch) + } +} + +// Valid returns true if this encoder ID was allocated by +// `NewEncoderID`. Invalid encoder IDs will not be cached. +func (id EncoderID) Valid() bool { + return id.value != 0 +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go new file mode 100644 index 00000000..841b271f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/iterator.go @@ -0,0 +1,161 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +// Iterator allows iterating over the set of attributes in order, sorted by +// key. +type Iterator struct { + storage *Set + idx int +} + +// MergeIterator supports iterating over two sets of attributes while +// eliminating duplicate values from the combined set. The first iterator +// value takes precedence. +type MergeIterator struct { + one oneIterator + two oneIterator + current KeyValue +} + +type oneIterator struct { + iter Iterator + done bool + attr KeyValue +} + +// Next moves the iterator to the next position. Returns false if there are no +// more attributes. +func (i *Iterator) Next() bool { + i.idx++ + return i.idx < i.Len() +} + +// Label returns current KeyValue. Must be called only after Next returns +// true. +// +// Deprecated: Use Attribute instead. +func (i *Iterator) Label() KeyValue { + return i.Attribute() +} + +// Attribute returns the current KeyValue of the Iterator. It must be called +// only after Next returns true. +func (i *Iterator) Attribute() KeyValue { + kv, _ := i.storage.Get(i.idx) + return kv +} + +// IndexedLabel returns current index and attribute. Must be called only +// after Next returns true. +// +// Deprecated: Use IndexedAttribute instead. +func (i *Iterator) IndexedLabel() (int, KeyValue) { + return i.idx, i.Attribute() +} + +// IndexedAttribute returns current index and attribute. Must be called only +// after Next returns true. +func (i *Iterator) IndexedAttribute() (int, KeyValue) { + return i.idx, i.Attribute() +} + +// Len returns a number of attributes in the iterated set. +func (i *Iterator) Len() int { + return i.storage.Len() +} + +// ToSlice is a convenience function that creates a slice of attributes from +// the passed iterator. The iterator is set up to start from the beginning +// before creating the slice. +func (i *Iterator) ToSlice() []KeyValue { + l := i.Len() + if l == 0 { + return nil + } + i.idx = -1 + slice := make([]KeyValue, 0, l) + for i.Next() { + slice = append(slice, i.Attribute()) + } + return slice +} + +// NewMergeIterator returns a MergeIterator for merging two attribute sets. +// Duplicates are resolved by taking the value from the first set. +func NewMergeIterator(s1, s2 *Set) MergeIterator { + mi := MergeIterator{ + one: makeOne(s1.Iter()), + two: makeOne(s2.Iter()), + } + return mi +} + +func makeOne(iter Iterator) oneIterator { + oi := oneIterator{ + iter: iter, + } + oi.advance() + return oi +} + +func (oi *oneIterator) advance() { + if oi.done = !oi.iter.Next(); !oi.done { + oi.attr = oi.iter.Attribute() + } +} + +// Next returns true if there is another attribute available. +func (m *MergeIterator) Next() bool { + if m.one.done && m.two.done { + return false + } + if m.one.done { + m.current = m.two.attr + m.two.advance() + return true + } + if m.two.done { + m.current = m.one.attr + m.one.advance() + return true + } + if m.one.attr.Key == m.two.attr.Key { + m.current = m.one.attr // first iterator attribute value wins + m.one.advance() + m.two.advance() + return true + } + if m.one.attr.Key < m.two.attr.Key { + m.current = m.one.attr + m.one.advance() + return true + } + m.current = m.two.attr + m.two.advance() + return true +} + +// Label returns the current value after Next() returns true. +// +// Deprecated: Use Attribute instead. +func (m *MergeIterator) Label() KeyValue { + return m.current +} + +// Attribute returns the current value after Next() returns true. +func (m *MergeIterator) Attribute() KeyValue { + return m.current +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/key.go b/vendor/go.opentelemetry.io/otel/attribute/key.go new file mode 100644 index 00000000..0656a04e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/key.go @@ -0,0 +1,134 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +// Key represents the key part in key-value pairs. It's a string. The +// allowed character set in the key depends on the use of the key. +type Key string + +// Bool creates a KeyValue instance with a BOOL Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Bool(name, value). +func (k Key) Bool(v bool) KeyValue { + return KeyValue{ + Key: k, + Value: BoolValue(v), + } +} + +// BoolSlice creates a KeyValue instance with a BOOLSLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- BoolSlice(name, value). +func (k Key) BoolSlice(v []bool) KeyValue { + return KeyValue{ + Key: k, + Value: BoolSliceValue(v), + } +} + +// Int creates a KeyValue instance with an INT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int(name, value). +func (k Key) Int(v int) KeyValue { + return KeyValue{ + Key: k, + Value: IntValue(v), + } +} + +// IntSlice creates a KeyValue instance with an INT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- IntSlice(name, value). +func (k Key) IntSlice(v []int) KeyValue { + return KeyValue{ + Key: k, + Value: IntSliceValue(v), + } +} + +// Int64 creates a KeyValue instance with an INT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int64(name, value). +func (k Key) Int64(v int64) KeyValue { + return KeyValue{ + Key: k, + Value: Int64Value(v), + } +} + +// Int64Slice creates a KeyValue instance with an INT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int64Slice(name, value). +func (k Key) Int64Slice(v []int64) KeyValue { + return KeyValue{ + Key: k, + Value: Int64SliceValue(v), + } +} + +// Float64 creates a KeyValue instance with a FLOAT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Float64(name, value). +func (k Key) Float64(v float64) KeyValue { + return KeyValue{ + Key: k, + Value: Float64Value(v), + } +} + +// Float64Slice creates a KeyValue instance with a FLOAT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Float64(name, value). +func (k Key) Float64Slice(v []float64) KeyValue { + return KeyValue{ + Key: k, + Value: Float64SliceValue(v), + } +} + +// String creates a KeyValue instance with a STRING Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- String(name, value). +func (k Key) String(v string) KeyValue { + return KeyValue{ + Key: k, + Value: StringValue(v), + } +} + +// StringSlice creates a KeyValue instance with a STRINGSLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- StringSlice(name, value). +func (k Key) StringSlice(v []string) KeyValue { + return KeyValue{ + Key: k, + Value: StringSliceValue(v), + } +} + +// Defined returns true for non-empty keys. +func (k Key) Defined() bool { + return len(k) != 0 +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go new file mode 100644 index 00000000..1ddf3ce0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/kv.go @@ -0,0 +1,86 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "fmt" +) + +// KeyValue holds a key and value pair. +type KeyValue struct { + Key Key + Value Value +} + +// Valid returns if kv is a valid OpenTelemetry attribute. +func (kv KeyValue) Valid() bool { + return kv.Key.Defined() && kv.Value.Type() != INVALID +} + +// Bool creates a KeyValue with a BOOL Value type. +func Bool(k string, v bool) KeyValue { + return Key(k).Bool(v) +} + +// BoolSlice creates a KeyValue with a BOOLSLICE Value type. +func BoolSlice(k string, v []bool) KeyValue { + return Key(k).BoolSlice(v) +} + +// Int creates a KeyValue with an INT64 Value type. +func Int(k string, v int) KeyValue { + return Key(k).Int(v) +} + +// IntSlice creates a KeyValue with an INT64SLICE Value type. +func IntSlice(k string, v []int) KeyValue { + return Key(k).IntSlice(v) +} + +// Int64 creates a KeyValue with an INT64 Value type. +func Int64(k string, v int64) KeyValue { + return Key(k).Int64(v) +} + +// Int64Slice creates a KeyValue with an INT64SLICE Value type. +func Int64Slice(k string, v []int64) KeyValue { + return Key(k).Int64Slice(v) +} + +// Float64 creates a KeyValue with a FLOAT64 Value type. +func Float64(k string, v float64) KeyValue { + return Key(k).Float64(v) +} + +// Float64Slice creates a KeyValue with a FLOAT64SLICE Value type. +func Float64Slice(k string, v []float64) KeyValue { + return Key(k).Float64Slice(v) +} + +// String creates a KeyValue with a STRING Value type. +func String(k, v string) KeyValue { + return Key(k).String(v) +} + +// StringSlice creates a KeyValue with a STRINGSLICE Value type. +func StringSlice(k string, v []string) KeyValue { + return Key(k).StringSlice(v) +} + +// Stringer creates a new key-value pair with a passed name and a string +// value generated by the passed Stringer interface. +func Stringer(k string, v fmt.Stringer) KeyValue { + return Key(k).String(v.String()) +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go new file mode 100644 index 00000000..26be5983 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -0,0 +1,424 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "encoding/json" + "reflect" + "sort" +) + +type ( + // Set is the representation for a distinct attribute set. It manages an + // immutable set of attributes, with an internal cache for storing + // attribute encodings. + // + // This type supports the Equivalent method of comparison using values of + // type Distinct. + Set struct { + equivalent Distinct + } + + // Distinct wraps a variable-size array of KeyValue, constructed with keys + // in sorted order. This can be used as a map key or for equality checking + // between Sets. + Distinct struct { + iface interface{} + } + + // Filter supports removing certain attributes from attribute sets. When + // the filter returns true, the attribute will be kept in the filtered + // attribute set. When the filter returns false, the attribute is excluded + // from the filtered attribute set, and the attribute instead appears in + // the removed list of excluded attributes. + Filter func(KeyValue) bool + + // Sortable implements sort.Interface, used for sorting KeyValue. This is + // an exported type to support a memory optimization. A pointer to one of + // these is needed for the call to sort.Stable(), which the caller may + // provide in order to avoid an allocation. See NewSetWithSortable(). + Sortable []KeyValue +) + +var ( + // keyValueType is used in computeDistinctReflect. + keyValueType = reflect.TypeOf(KeyValue{}) + + // emptySet is returned for empty attribute sets. + emptySet = &Set{ + equivalent: Distinct{ + iface: [0]KeyValue{}, + }, + } +) + +// EmptySet returns a reference to a Set with no elements. +// +// This is a convenience provided for optimized calling utility. +func EmptySet() *Set { + return emptySet +} + +// reflectValue abbreviates reflect.ValueOf(d). +func (d Distinct) reflectValue() reflect.Value { + return reflect.ValueOf(d.iface) +} + +// Valid returns true if this value refers to a valid Set. +func (d Distinct) Valid() bool { + return d.iface != nil +} + +// Len returns the number of attributes in this set. +func (l *Set) Len() int { + if l == nil || !l.equivalent.Valid() { + return 0 + } + return l.equivalent.reflectValue().Len() +} + +// Get returns the KeyValue at ordered position idx in this set. +func (l *Set) Get(idx int) (KeyValue, bool) { + if l == nil { + return KeyValue{}, false + } + value := l.equivalent.reflectValue() + + if idx >= 0 && idx < value.Len() { + // Note: The Go compiler successfully avoids an allocation for + // the interface{} conversion here: + return value.Index(idx).Interface().(KeyValue), true + } + + return KeyValue{}, false +} + +// Value returns the value of a specified key in this set. +func (l *Set) Value(k Key) (Value, bool) { + if l == nil { + return Value{}, false + } + rValue := l.equivalent.reflectValue() + vlen := rValue.Len() + + idx := sort.Search(vlen, func(idx int) bool { + return rValue.Index(idx).Interface().(KeyValue).Key >= k + }) + if idx >= vlen { + return Value{}, false + } + keyValue := rValue.Index(idx).Interface().(KeyValue) + if k == keyValue.Key { + return keyValue.Value, true + } + return Value{}, false +} + +// HasValue tests whether a key is defined in this set. +func (l *Set) HasValue(k Key) bool { + if l == nil { + return false + } + _, ok := l.Value(k) + return ok +} + +// Iter returns an iterator for visiting the attributes in this set. +func (l *Set) Iter() Iterator { + return Iterator{ + storage: l, + idx: -1, + } +} + +// ToSlice returns the set of attributes belonging to this set, sorted, where +// keys appear no more than once. +func (l *Set) ToSlice() []KeyValue { + iter := l.Iter() + return iter.ToSlice() +} + +// Equivalent returns a value that may be used as a map key. The Distinct type +// guarantees that the result will equal the equivalent. Distinct value of any +// attribute set with the same elements as this, where sets are made unique by +// choosing the last value in the input for any given key. +func (l *Set) Equivalent() Distinct { + if l == nil || !l.equivalent.Valid() { + return emptySet.equivalent + } + return l.equivalent +} + +// Equals returns true if the argument set is equivalent to this set. +func (l *Set) Equals(o *Set) bool { + return l.Equivalent() == o.Equivalent() +} + +// Encoded returns the encoded form of this set, according to encoder. +func (l *Set) Encoded(encoder Encoder) string { + if l == nil || encoder == nil { + return "" + } + + return encoder.Encode(l.Iter()) +} + +func empty() Set { + return Set{ + equivalent: emptySet.equivalent, + } +} + +// NewSet returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. +// +// Except for empty sets, this method adds an additional allocation compared +// with calls that include a Sortable. +func NewSet(kvs ...KeyValue) Set { + // Check for empty set. + if len(kvs) == 0 { + return empty() + } + s, _ := NewSetWithSortableFiltered(kvs, new(Sortable), nil) + return s +} + +// NewSetWithSortable returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. +// +// This call includes a Sortable option as a memory optimization. +func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set { + // Check for empty set. + if len(kvs) == 0 { + return empty() + } + s, _ := NewSetWithSortableFiltered(kvs, tmp, nil) + return s +} + +// NewSetWithFiltered returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. +// +// This call includes a Filter to include/exclude attribute keys from the +// return value. Excluded keys are returned as a slice of attribute values. +func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { + // Check for empty set. + if len(kvs) == 0 { + return empty(), nil + } + return NewSetWithSortableFiltered(kvs, new(Sortable), filter) +} + +// NewSetWithSortableFiltered returns a new Set. +// +// Duplicate keys are eliminated by taking the last value. This +// re-orders the input slice so that unique last-values are contiguous +// at the end of the slice. +// +// This ensures the following: +// +// - Last-value-wins semantics +// - Caller sees the reordering, but doesn't lose values +// - Repeated call preserve last-value wins. +// +// Note that methods are defined on Set, although this returns Set. Callers +// can avoid memory allocations by: +// +// - allocating a Sortable for use as a temporary in this method +// - allocating a Set for storing the return value of this constructor. +// +// The result maintains a cache of encoded attributes, by attribute.EncoderID. +// This value should not be copied after its first use. +// +// The second []KeyValue return value is a list of attributes that were +// excluded by the Filter (if non-nil). +func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (Set, []KeyValue) { + // Check for empty set. + if len(kvs) == 0 { + return empty(), nil + } + + *tmp = kvs + + // Stable sort so the following de-duplication can implement + // last-value-wins semantics. + sort.Stable(tmp) + + *tmp = nil + + position := len(kvs) - 1 + offset := position - 1 + + // The requirements stated above require that the stable + // result be placed in the end of the input slice, while + // overwritten values are swapped to the beginning. + // + // De-duplicate with last-value-wins semantics. Preserve + // duplicate values at the beginning of the input slice. + for ; offset >= 0; offset-- { + if kvs[offset].Key == kvs[position].Key { + continue + } + position-- + kvs[offset], kvs[position] = kvs[position], kvs[offset] + } + if filter != nil { + return filterSet(kvs[position:], filter) + } + return Set{ + equivalent: computeDistinct(kvs[position:]), + }, nil +} + +// filterSet reorders kvs so that included keys are contiguous at the end of +// the slice, while excluded keys precede the included keys. +func filterSet(kvs []KeyValue, filter Filter) (Set, []KeyValue) { + var excluded []KeyValue + + // Move attributes that do not match the filter so they're adjacent before + // calling computeDistinct(). + distinctPosition := len(kvs) + + // Swap indistinct keys forward and distinct keys toward the + // end of the slice. + offset := len(kvs) - 1 + for ; offset >= 0; offset-- { + if filter(kvs[offset]) { + distinctPosition-- + kvs[offset], kvs[distinctPosition] = kvs[distinctPosition], kvs[offset] + continue + } + } + excluded = kvs[:distinctPosition] + + return Set{ + equivalent: computeDistinct(kvs[distinctPosition:]), + }, excluded +} + +// Filter returns a filtered copy of this Set. See the documentation for +// NewSetWithSortableFiltered for more details. +func (l *Set) Filter(re Filter) (Set, []KeyValue) { + if re == nil { + return Set{ + equivalent: l.equivalent, + }, nil + } + + // Note: This could be refactored to avoid the temporary slice + // allocation, if it proves to be expensive. + return filterSet(l.ToSlice(), re) +} + +// computeDistinct returns a Distinct using either the fixed- or +// reflect-oriented code path, depending on the size of the input. The input +// slice is assumed to already be sorted and de-duplicated. +func computeDistinct(kvs []KeyValue) Distinct { + iface := computeDistinctFixed(kvs) + if iface == nil { + iface = computeDistinctReflect(kvs) + } + return Distinct{ + iface: iface, + } +} + +// computeDistinctFixed computes a Distinct for small slices. It returns nil +// if the input is too large for this code path. +func computeDistinctFixed(kvs []KeyValue) interface{} { + switch len(kvs) { + case 1: + ptr := new([1]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 2: + ptr := new([2]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 3: + ptr := new([3]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 4: + ptr := new([4]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 5: + ptr := new([5]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 6: + ptr := new([6]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 7: + ptr := new([7]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 8: + ptr := new([8]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 9: + ptr := new([9]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 10: + ptr := new([10]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + default: + return nil + } +} + +// computeDistinctReflect computes a Distinct using reflection, works for any +// size input. +func computeDistinctReflect(kvs []KeyValue) interface{} { + at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem() + for i, keyValue := range kvs { + *(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue + } + return at.Interface() +} + +// MarshalJSON returns the JSON encoding of the Set. +func (l *Set) MarshalJSON() ([]byte, error) { + return json.Marshal(l.equivalent.iface) +} + +// MarshalLog is the marshaling function used by the logging system to represent this exporter. +func (l Set) MarshalLog() interface{} { + kvs := make(map[string]string) + for _, kv := range l.ToSlice() { + kvs[string(kv.Key)] = kv.Value.Emit() + } + return kvs +} + +// Len implements sort.Interface. +func (l *Sortable) Len() int { + return len(*l) +} + +// Swap implements sort.Interface. +func (l *Sortable) Swap(i, j int) { + (*l)[i], (*l)[j] = (*l)[j], (*l)[i] +} + +// Less implements sort.Interface. +func (l *Sortable) Less(i, j int) bool { + return (*l)[i].Key < (*l)[j].Key +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/type_string.go b/vendor/go.opentelemetry.io/otel/attribute/type_string.go new file mode 100644 index 00000000..e584b247 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/type_string.go @@ -0,0 +1,31 @@ +// Code generated by "stringer -type=Type"; DO NOT EDIT. + +package attribute + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[INVALID-0] + _ = x[BOOL-1] + _ = x[INT64-2] + _ = x[FLOAT64-3] + _ = x[STRING-4] + _ = x[BOOLSLICE-5] + _ = x[INT64SLICE-6] + _ = x[FLOAT64SLICE-7] + _ = x[STRINGSLICE-8] +} + +const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICESTRINGSLICE" + +var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71} + +func (i Type) String() string { + if i < 0 || i >= Type(len(_Type_index)-1) { + return "Type(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Type_name[_Type_index[i]:_Type_index[i+1]] +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go new file mode 100644 index 00000000..cb21dd5c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -0,0 +1,270 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" + + "go.opentelemetry.io/otel/internal" + "go.opentelemetry.io/otel/internal/attribute" +) + +//go:generate stringer -type=Type + +// Type describes the type of the data Value holds. +type Type int // nolint: revive // redefines builtin Type. + +// Value represents the value part in key-value pairs. +type Value struct { + vtype Type + numeric uint64 + stringly string + slice interface{} +} + +const ( + // INVALID is used for a Value with no value set. + INVALID Type = iota + // BOOL is a boolean Type Value. + BOOL + // INT64 is a 64-bit signed integral Type Value. + INT64 + // FLOAT64 is a 64-bit floating point Type Value. + FLOAT64 + // STRING is a string Type Value. + STRING + // BOOLSLICE is a slice of booleans Type Value. + BOOLSLICE + // INT64SLICE is a slice of 64-bit signed integral numbers Type Value. + INT64SLICE + // FLOAT64SLICE is a slice of 64-bit floating point numbers Type Value. + FLOAT64SLICE + // STRINGSLICE is a slice of strings Type Value. + STRINGSLICE +) + +// BoolValue creates a BOOL Value. +func BoolValue(v bool) Value { + return Value{ + vtype: BOOL, + numeric: internal.BoolToRaw(v), + } +} + +// BoolSliceValue creates a BOOLSLICE Value. +func BoolSliceValue(v []bool) Value { + return Value{vtype: BOOLSLICE, slice: attribute.BoolSliceValue(v)} +} + +// IntValue creates an INT64 Value. +func IntValue(v int) Value { + return Int64Value(int64(v)) +} + +// IntSliceValue creates an INTSLICE Value. +func IntSliceValue(v []int) Value { + var int64Val int64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(int64Val))) + for i, val := range v { + cp.Elem().Index(i).SetInt(int64(val)) + } + return Value{ + vtype: INT64SLICE, + slice: cp.Elem().Interface(), + } +} + +// Int64Value creates an INT64 Value. +func Int64Value(v int64) Value { + return Value{ + vtype: INT64, + numeric: internal.Int64ToRaw(v), + } +} + +// Int64SliceValue creates an INT64SLICE Value. +func Int64SliceValue(v []int64) Value { + return Value{vtype: INT64SLICE, slice: attribute.Int64SliceValue(v)} +} + +// Float64Value creates a FLOAT64 Value. +func Float64Value(v float64) Value { + return Value{ + vtype: FLOAT64, + numeric: internal.Float64ToRaw(v), + } +} + +// Float64SliceValue creates a FLOAT64SLICE Value. +func Float64SliceValue(v []float64) Value { + return Value{vtype: FLOAT64SLICE, slice: attribute.Float64SliceValue(v)} +} + +// StringValue creates a STRING Value. +func StringValue(v string) Value { + return Value{ + vtype: STRING, + stringly: v, + } +} + +// StringSliceValue creates a STRINGSLICE Value. +func StringSliceValue(v []string) Value { + return Value{vtype: STRINGSLICE, slice: attribute.StringSliceValue(v)} +} + +// Type returns a type of the Value. +func (v Value) Type() Type { + return v.vtype +} + +// AsBool returns the bool value. Make sure that the Value's type is +// BOOL. +func (v Value) AsBool() bool { + return internal.RawToBool(v.numeric) +} + +// AsBoolSlice returns the []bool value. Make sure that the Value's type is +// BOOLSLICE. +func (v Value) AsBoolSlice() []bool { + if v.vtype != BOOLSLICE { + return nil + } + return v.asBoolSlice() +} + +func (v Value) asBoolSlice() []bool { + return attribute.AsBoolSlice(v.slice) +} + +// AsInt64 returns the int64 value. Make sure that the Value's type is +// INT64. +func (v Value) AsInt64() int64 { + return internal.RawToInt64(v.numeric) +} + +// AsInt64Slice returns the []int64 value. Make sure that the Value's type is +// INT64SLICE. +func (v Value) AsInt64Slice() []int64 { + if v.vtype != INT64SLICE { + return nil + } + return v.asInt64Slice() +} + +func (v Value) asInt64Slice() []int64 { + return attribute.AsInt64Slice(v.slice) +} + +// AsFloat64 returns the float64 value. Make sure that the Value's +// type is FLOAT64. +func (v Value) AsFloat64() float64 { + return internal.RawToFloat64(v.numeric) +} + +// AsFloat64Slice returns the []float64 value. Make sure that the Value's type is +// FLOAT64SLICE. +func (v Value) AsFloat64Slice() []float64 { + if v.vtype != FLOAT64SLICE { + return nil + } + return v.asFloat64Slice() +} + +func (v Value) asFloat64Slice() []float64 { + return attribute.AsFloat64Slice(v.slice) +} + +// AsString returns the string value. Make sure that the Value's type +// is STRING. +func (v Value) AsString() string { + return v.stringly +} + +// AsStringSlice returns the []string value. Make sure that the Value's type is +// STRINGSLICE. +func (v Value) AsStringSlice() []string { + if v.vtype != STRINGSLICE { + return nil + } + return v.asStringSlice() +} + +func (v Value) asStringSlice() []string { + return attribute.AsStringSlice(v.slice) +} + +type unknownValueType struct{} + +// AsInterface returns Value's data as interface{}. +func (v Value) AsInterface() interface{} { + switch v.Type() { + case BOOL: + return v.AsBool() + case BOOLSLICE: + return v.asBoolSlice() + case INT64: + return v.AsInt64() + case INT64SLICE: + return v.asInt64Slice() + case FLOAT64: + return v.AsFloat64() + case FLOAT64SLICE: + return v.asFloat64Slice() + case STRING: + return v.stringly + case STRINGSLICE: + return v.asStringSlice() + } + return unknownValueType{} +} + +// Emit returns a string representation of Value's data. +func (v Value) Emit() string { + switch v.Type() { + case BOOLSLICE: + return fmt.Sprint(v.asBoolSlice()) + case BOOL: + return strconv.FormatBool(v.AsBool()) + case INT64SLICE: + return fmt.Sprint(v.asInt64Slice()) + case INT64: + return strconv.FormatInt(v.AsInt64(), 10) + case FLOAT64SLICE: + return fmt.Sprint(v.asFloat64Slice()) + case FLOAT64: + return fmt.Sprint(v.AsFloat64()) + case STRINGSLICE: + return fmt.Sprint(v.asStringSlice()) + case STRING: + return v.stringly + default: + return "unknown" + } +} + +// MarshalJSON returns the JSON encoding of the Value. +func (v Value) MarshalJSON() ([]byte, error) { + var jsonVal struct { + Type string + Value interface{} + } + jsonVal.Type = v.Type().String() + jsonVal.Value = v.AsInterface() + return json.Marshal(jsonVal) +} diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go new file mode 100644 index 00000000..a36db8f8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -0,0 +1,570 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package baggage // import "go.opentelemetry.io/otel/baggage" + +import ( + "errors" + "fmt" + "net/url" + "regexp" + "strings" + + "go.opentelemetry.io/otel/internal/baggage" +) + +const ( + maxMembers = 180 + maxBytesPerMembers = 4096 + maxBytesPerBaggageString = 8192 + + listDelimiter = "," + keyValueDelimiter = "=" + propertyDelimiter = ";" + + keyDef = `([\x21\x23-\x27\x2A\x2B\x2D\x2E\x30-\x39\x41-\x5a\x5e-\x7a\x7c\x7e]+)` + valueDef = `([\x21\x23-\x2b\x2d-\x3a\x3c-\x5B\x5D-\x7e]*)` + keyValueDef = `\s*` + keyDef + `\s*` + keyValueDelimiter + `\s*` + valueDef + `\s*` +) + +var ( + keyRe = regexp.MustCompile(`^` + keyDef + `$`) + valueRe = regexp.MustCompile(`^` + valueDef + `$`) + propertyRe = regexp.MustCompile(`^(?:\s*` + keyDef + `\s*|` + keyValueDef + `)$`) +) + +var ( + errInvalidKey = errors.New("invalid key") + errInvalidValue = errors.New("invalid value") + errInvalidProperty = errors.New("invalid baggage list-member property") + errInvalidMember = errors.New("invalid baggage list-member") + errMemberNumber = errors.New("too many list-members in baggage-string") + errMemberBytes = errors.New("list-member too large") + errBaggageBytes = errors.New("baggage-string too large") +) + +// Property is an additional metadata entry for a baggage list-member. +type Property struct { + key, value string + + // hasValue indicates if a zero-value value means the property does not + // have a value or if it was the zero-value. + hasValue bool + + // hasData indicates whether the created property contains data or not. + // Properties that do not contain data are invalid with no other check + // required. + hasData bool +} + +// NewKeyProperty returns a new Property for key. +// +// If key is invalid, an error will be returned. +func NewKeyProperty(key string) (Property, error) { + if !keyRe.MatchString(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + + p := Property{key: key, hasData: true} + return p, nil +} + +// NewKeyValueProperty returns a new Property for key with value. +// +// If key or value are invalid, an error will be returned. +func NewKeyValueProperty(key, value string) (Property, error) { + if !keyRe.MatchString(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !valueRe.MatchString(value) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + + p := Property{ + key: key, + value: value, + hasValue: true, + hasData: true, + } + return p, nil +} + +func newInvalidProperty() Property { + return Property{} +} + +// parseProperty attempts to decode a Property from the passed string. It +// returns an error if the input is invalid according to the W3C Baggage +// specification. +func parseProperty(property string) (Property, error) { + if property == "" { + return newInvalidProperty(), nil + } + + match := propertyRe.FindStringSubmatch(property) + if len(match) != 4 { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property) + } + + p := Property{hasData: true} + if match[1] != "" { + p.key = match[1] + } else { + p.key = match[2] + p.value = match[3] + p.hasValue = true + } + + return p, nil +} + +// validate ensures p conforms to the W3C Baggage specification, returning an +// error otherwise. +func (p Property) validate() error { + errFunc := func(err error) error { + return fmt.Errorf("invalid property: %w", err) + } + + if !p.hasData { + return errFunc(fmt.Errorf("%w: %q", errInvalidProperty, p)) + } + + if !keyRe.MatchString(p.key) { + return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) + } + if p.hasValue && !valueRe.MatchString(p.value) { + return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value)) + } + if !p.hasValue && p.value != "" { + return errFunc(errors.New("inconsistent value")) + } + return nil +} + +// Key returns the Property key. +func (p Property) Key() string { + return p.key +} + +// Value returns the Property value. Additionally, a boolean value is returned +// indicating if the returned value is the empty if the Property has a value +// that is empty or if the value is not set. +func (p Property) Value() (string, bool) { + return p.value, p.hasValue +} + +// String encodes Property into a string compliant with the W3C Baggage +// specification. +func (p Property) String() string { + if p.hasValue { + return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, p.value) + } + return p.key +} + +type properties []Property + +func fromInternalProperties(iProps []baggage.Property) properties { + if len(iProps) == 0 { + return nil + } + + props := make(properties, len(iProps)) + for i, p := range iProps { + props[i] = Property{ + key: p.Key, + value: p.Value, + hasValue: p.HasValue, + } + } + return props +} + +func (p properties) asInternal() []baggage.Property { + if len(p) == 0 { + return nil + } + + iProps := make([]baggage.Property, len(p)) + for i, prop := range p { + iProps[i] = baggage.Property{ + Key: prop.key, + Value: prop.value, + HasValue: prop.hasValue, + } + } + return iProps +} + +func (p properties) Copy() properties { + if len(p) == 0 { + return nil + } + + props := make(properties, len(p)) + copy(props, p) + return props +} + +// validate ensures each Property in p conforms to the W3C Baggage +// specification, returning an error otherwise. +func (p properties) validate() error { + for _, prop := range p { + if err := prop.validate(); err != nil { + return err + } + } + return nil +} + +// String encodes properties into a string compliant with the W3C Baggage +// specification. +func (p properties) String() string { + props := make([]string, len(p)) + for i, prop := range p { + props[i] = prop.String() + } + return strings.Join(props, propertyDelimiter) +} + +// Member is a list-member of a baggage-string as defined by the W3C Baggage +// specification. +type Member struct { + key, value string + properties properties + + // hasData indicates whether the created property contains data or not. + // Properties that do not contain data are invalid with no other check + // required. + hasData bool +} + +// NewMember returns a new Member from the passed arguments. The key will be +// used directly while the value will be url decoded after validation. An error +// is returned if the created Member would be invalid according to the W3C +// Baggage specification. +func NewMember(key, value string, props ...Property) (Member, error) { + m := Member{ + key: key, + value: value, + properties: properties(props).Copy(), + hasData: true, + } + if err := m.validate(); err != nil { + return newInvalidMember(), err + } + decodedValue, err := url.QueryUnescape(value) + if err != nil { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + m.value = decodedValue + return m, nil +} + +func newInvalidMember() Member { + return Member{} +} + +// parseMember attempts to decode a Member from the passed string. It returns +// an error if the input is invalid according to the W3C Baggage +// specification. +func parseMember(member string) (Member, error) { + if n := len(member); n > maxBytesPerMembers { + return newInvalidMember(), fmt.Errorf("%w: %d", errMemberBytes, n) + } + + var ( + key, value string + props properties + ) + + parts := strings.SplitN(member, propertyDelimiter, 2) + switch len(parts) { + case 2: + // Parse the member properties. + for _, pStr := range strings.Split(parts[1], propertyDelimiter) { + p, err := parseProperty(pStr) + if err != nil { + return newInvalidMember(), err + } + props = append(props, p) + } + fallthrough + case 1: + // Parse the member key/value pair. + + // Take into account a value can contain equal signs (=). + kv := strings.SplitN(parts[0], keyValueDelimiter, 2) + if len(kv) != 2 { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidMember, member) + } + // "Leading and trailing whitespaces are allowed but MUST be trimmed + // when converting the header into a data structure." + key = strings.TrimSpace(kv[0]) + var err error + value, err = url.QueryUnescape(strings.TrimSpace(kv[1])) + if err != nil { + return newInvalidMember(), fmt.Errorf("%w: %q", err, value) + } + if !keyRe.MatchString(key) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !valueRe.MatchString(value) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + default: + // This should never happen unless a developer has changed the string + // splitting somehow. Panic instead of failing silently and allowing + // the bug to slip past the CI checks. + panic("failed to parse baggage member") + } + + return Member{key: key, value: value, properties: props, hasData: true}, nil +} + +// validate ensures m conforms to the W3C Baggage specification. +// A key is just an ASCII string, but a value must be URL encoded UTF-8, +// returning an error otherwise. +func (m Member) validate() error { + if !m.hasData { + return fmt.Errorf("%w: %q", errInvalidMember, m) + } + + if !keyRe.MatchString(m.key) { + return fmt.Errorf("%w: %q", errInvalidKey, m.key) + } + if !valueRe.MatchString(m.value) { + return fmt.Errorf("%w: %q", errInvalidValue, m.value) + } + return m.properties.validate() +} + +// Key returns the Member key. +func (m Member) Key() string { return m.key } + +// Value returns the Member value. +func (m Member) Value() string { return m.value } + +// Properties returns a copy of the Member properties. +func (m Member) Properties() []Property { return m.properties.Copy() } + +// String encodes Member into a string compliant with the W3C Baggage +// specification. +func (m Member) String() string { + // A key is just an ASCII string, but a value is URL encoded UTF-8. + s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, url.QueryEscape(m.value)) + if len(m.properties) > 0 { + s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String()) + } + return s +} + +// Baggage is a list of baggage members representing the baggage-string as +// defined by the W3C Baggage specification. +type Baggage struct { //nolint:golint + list baggage.List +} + +// New returns a new valid Baggage. It returns an error if it results in a +// Baggage exceeding limits set in that specification. +// +// It expects all the provided members to have already been validated. +func New(members ...Member) (Baggage, error) { + if len(members) == 0 { + return Baggage{}, nil + } + + b := make(baggage.List) + for _, m := range members { + if !m.hasData { + return Baggage{}, errInvalidMember + } + + // OpenTelemetry resolves duplicates by last-one-wins. + b[m.key] = baggage.Item{ + Value: m.value, + Properties: m.properties.asInternal(), + } + } + + // Check member numbers after deduplication. + if len(b) > maxMembers { + return Baggage{}, errMemberNumber + } + + bag := Baggage{b} + if n := len(bag.String()); n > maxBytesPerBaggageString { + return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) + } + + return bag, nil +} + +// Parse attempts to decode a baggage-string from the passed string. It +// returns an error if the input is invalid according to the W3C Baggage +// specification. +// +// If there are duplicate list-members contained in baggage, the last one +// defined (reading left-to-right) will be the only one kept. This diverges +// from the W3C Baggage specification which allows duplicate list-members, but +// conforms to the OpenTelemetry Baggage specification. +func Parse(bStr string) (Baggage, error) { + if bStr == "" { + return Baggage{}, nil + } + + if n := len(bStr); n > maxBytesPerBaggageString { + return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) + } + + b := make(baggage.List) + for _, memberStr := range strings.Split(bStr, listDelimiter) { + m, err := parseMember(memberStr) + if err != nil { + return Baggage{}, err + } + // OpenTelemetry resolves duplicates by last-one-wins. + b[m.key] = baggage.Item{ + Value: m.value, + Properties: m.properties.asInternal(), + } + } + + // OpenTelemetry does not allow for duplicate list-members, but the W3C + // specification does. Now that we have deduplicated, ensure the baggage + // does not exceed list-member limits. + if len(b) > maxMembers { + return Baggage{}, errMemberNumber + } + + return Baggage{b}, nil +} + +// Member returns the baggage list-member identified by key. +// +// If there is no list-member matching the passed key the returned Member will +// be a zero-value Member. +// The returned member is not validated, as we assume the validation happened +// when it was added to the Baggage. +func (b Baggage) Member(key string) Member { + v, ok := b.list[key] + if !ok { + // We do not need to worry about distinguishing between the situation + // where a zero-valued Member is included in the Baggage because a + // zero-valued Member is invalid according to the W3C Baggage + // specification (it has an empty key). + return newInvalidMember() + } + + return Member{ + key: key, + value: v.Value, + properties: fromInternalProperties(v.Properties), + hasData: true, + } +} + +// Members returns all the baggage list-members. +// The order of the returned list-members does not have significance. +// +// The returned members are not validated, as we assume the validation happened +// when they were added to the Baggage. +func (b Baggage) Members() []Member { + if len(b.list) == 0 { + return nil + } + + members := make([]Member, 0, len(b.list)) + for k, v := range b.list { + members = append(members, Member{ + key: k, + value: v.Value, + properties: fromInternalProperties(v.Properties), + hasData: true, + }) + } + return members +} + +// SetMember returns a copy the Baggage with the member included. If the +// baggage contains a Member with the same key the existing Member is +// replaced. +// +// If member is invalid according to the W3C Baggage specification, an error +// is returned with the original Baggage. +func (b Baggage) SetMember(member Member) (Baggage, error) { + if !member.hasData { + return b, errInvalidMember + } + + n := len(b.list) + if _, ok := b.list[member.key]; !ok { + n++ + } + list := make(baggage.List, n) + + for k, v := range b.list { + // Do not copy if we are just going to overwrite. + if k == member.key { + continue + } + list[k] = v + } + + list[member.key] = baggage.Item{ + Value: member.value, + Properties: member.properties.asInternal(), + } + + return Baggage{list: list}, nil +} + +// DeleteMember returns a copy of the Baggage with the list-member identified +// by key removed. +func (b Baggage) DeleteMember(key string) Baggage { + n := len(b.list) + if _, ok := b.list[key]; ok { + n-- + } + list := make(baggage.List, n) + + for k, v := range b.list { + if k == key { + continue + } + list[k] = v + } + + return Baggage{list: list} +} + +// Len returns the number of list-members in the Baggage. +func (b Baggage) Len() int { + return len(b.list) +} + +// String encodes Baggage into a string compliant with the W3C Baggage +// specification. The returned string will be invalid if the Baggage contains +// any invalid list-members. +func (b Baggage) String() string { + members := make([]string, 0, len(b.list)) + for k, v := range b.list { + members = append(members, Member{ + key: k, + value: v.Value, + properties: fromInternalProperties(v.Properties), + }.String()) + } + return strings.Join(members, listDelimiter) +} diff --git a/vendor/go.opentelemetry.io/otel/baggage/context.go b/vendor/go.opentelemetry.io/otel/baggage/context.go new file mode 100644 index 00000000..24b34b75 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/context.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package baggage // import "go.opentelemetry.io/otel/baggage" + +import ( + "context" + + "go.opentelemetry.io/otel/internal/baggage" +) + +// ContextWithBaggage returns a copy of parent with baggage. +func ContextWithBaggage(parent context.Context, b Baggage) context.Context { + // Delegate so any hooks for the OpenTracing bridge are handled. + return baggage.ContextWithList(parent, b.list) +} + +// ContextWithoutBaggage returns a copy of parent with no baggage. +func ContextWithoutBaggage(parent context.Context) context.Context { + // Delegate so any hooks for the OpenTracing bridge are handled. + return baggage.ContextWithList(parent, nil) +} + +// FromContext returns the baggage contained in ctx. +func FromContext(ctx context.Context) Baggage { + // Delegate so any hooks for the OpenTracing bridge are handled. + return Baggage{list: baggage.ListFromContext(ctx)} +} diff --git a/vendor/go.opentelemetry.io/otel/baggage/doc.go b/vendor/go.opentelemetry.io/otel/baggage/doc.go new file mode 100644 index 00000000..4545100d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/doc.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package baggage provides functionality for storing and retrieving +baggage items in Go context. For propagating the baggage, see the +go.opentelemetry.io/otel/propagation package. +*/ +package baggage // import "go.opentelemetry.io/otel/baggage" diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go new file mode 100644 index 00000000..587ebae4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -0,0 +1,116 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package codes // import "go.opentelemetry.io/otel/codes" + +import ( + "encoding/json" + "fmt" + "strconv" +) + +const ( + // Unset is the default status code. + Unset Code = 0 + + // Error indicates the operation contains an error. + // + // NOTE: The error code in OTLP is 2. + // The value of this enum is only relevant to the internals + // of the Go SDK. + Error Code = 1 + + // Ok indicates operation has been validated by an Application developers + // or Operator to have completed successfully, or contain no error. + // + // NOTE: The Ok code in OTLP is 1. + // The value of this enum is only relevant to the internals + // of the Go SDK. + Ok Code = 2 + + maxCode = 3 +) + +// Code is an 32-bit representation of a status state. +type Code uint32 + +var codeToStr = map[Code]string{ + Unset: "Unset", + Error: "Error", + Ok: "Ok", +} + +var strToCode = map[string]Code{ + `"Unset"`: Unset, + `"Error"`: Error, + `"Ok"`: Ok, +} + +// String returns the Code as a string. +func (c Code) String() string { + return codeToStr[c] +} + +// UnmarshalJSON unmarshals b into the Code. +// +// This is based on the functionality in the gRPC codes package: +// https://github.com/grpc/grpc-go/blob/bb64fee312b46ebee26be43364a7a966033521b1/codes/codes.go#L218-L244 +func (c *Code) UnmarshalJSON(b []byte) error { + // From json.Unmarshaler: By convention, to approximate the behavior of + // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as + // a no-op. + if string(b) == "null" { + return nil + } + if c == nil { + return fmt.Errorf("nil receiver passed to UnmarshalJSON") + } + + var x interface{} + if err := json.Unmarshal(b, &x); err != nil { + return err + } + switch x.(type) { + case string: + if jc, ok := strToCode[string(b)]; ok { + *c = jc + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) + case float64: + if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { + if ci >= maxCode { + return fmt.Errorf("invalid code: %q", ci) + } + + *c = Code(ci) + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) + default: + return fmt.Errorf("invalid code: %q", string(b)) + } +} + +// MarshalJSON returns c as the JSON encoding of c. +func (c *Code) MarshalJSON() ([]byte, error) { + if c == nil { + return []byte("null"), nil + } + str, ok := codeToStr[*c] + if !ok { + return nil, fmt.Errorf("invalid code: %d", *c) + } + return []byte(fmt.Sprintf("%q", str)), nil +} diff --git a/vendor/go.opentelemetry.io/otel/codes/doc.go b/vendor/go.opentelemetry.io/otel/codes/doc.go new file mode 100644 index 00000000..df3e0f1b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/codes/doc.go @@ -0,0 +1,21 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package codes defines the canonical error codes used by OpenTelemetry. + +It conforms to [the OpenTelemetry +specification](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#statuscanonicalcode). +*/ +package codes // import "go.opentelemetry.io/otel/codes" diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go new file mode 100644 index 00000000..daa36c89 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/doc.go @@ -0,0 +1,34 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package otel provides global access to the OpenTelemetry API. The subpackages of +the otel package provide an implementation of the OpenTelemetry API. + +The provided API is used to instrument code and measure data about that code's +performance and operation. The measured data, by default, is not processed or +transmitted anywhere. An implementation of the OpenTelemetry SDK, like the +default SDK implementation (go.opentelemetry.io/otel/sdk), and associated +exporters are used to process and transport this data. + +To read the getting started guide, see https://opentelemetry.io/docs/go/getting-started/. + +To read more about tracing, see go.opentelemetry.io/otel/trace. + +To read more about metrics, see go.opentelemetry.io/otel/metric. + +To read more about propagation, see go.opentelemetry.io/otel/propagation and +go.opentelemetry.io/otel/baggage. +*/ +package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/error_handler.go b/vendor/go.opentelemetry.io/otel/error_handler.go new file mode 100644 index 00000000..72fad854 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/error_handler.go @@ -0,0 +1,38 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +// ErrorHandler handles irremediable events. +type ErrorHandler interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Handle handles any error deemed irremediable by an OpenTelemetry + // component. + Handle(error) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// ErrorHandlerFunc is a convenience adapter to allow the use of a function +// as an ErrorHandler. +type ErrorHandlerFunc func(error) + +var _ ErrorHandler = ErrorHandlerFunc(nil) + +// Handle handles the irremediable error by calling the ErrorHandlerFunc itself. +func (f ErrorHandlerFunc) Handle(err error) { + f(err) +} diff --git a/vendor/go.opentelemetry.io/otel/handler.go b/vendor/go.opentelemetry.io/otel/handler.go new file mode 100644 index 00000000..ecd363ab --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/handler.go @@ -0,0 +1,96 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +import ( + "log" + "os" + "sync/atomic" + "unsafe" +) + +var ( + // globalErrorHandler provides an ErrorHandler that can be used + // throughout an OpenTelemetry instrumented project. When a user + // specified ErrorHandler is registered (`SetErrorHandler`) all calls to + // `Handle` and will be delegated to the registered ErrorHandler. + globalErrorHandler = defaultErrorHandler() + + // Compile-time check that delegator implements ErrorHandler. + _ ErrorHandler = (*delegator)(nil) + // Compile-time check that errLogger implements ErrorHandler. + _ ErrorHandler = (*errLogger)(nil) +) + +type delegator struct { + delegate unsafe.Pointer +} + +func (d *delegator) Handle(err error) { + d.getDelegate().Handle(err) +} + +func (d *delegator) getDelegate() ErrorHandler { + return *(*ErrorHandler)(atomic.LoadPointer(&d.delegate)) +} + +// setDelegate sets the ErrorHandler delegate. +func (d *delegator) setDelegate(eh ErrorHandler) { + atomic.StorePointer(&d.delegate, unsafe.Pointer(&eh)) +} + +func defaultErrorHandler() *delegator { + d := &delegator{} + d.setDelegate(&errLogger{l: log.New(os.Stderr, "", log.LstdFlags)}) + return d +} + +// errLogger logs errors if no delegate is set, otherwise they are delegated. +type errLogger struct { + l *log.Logger +} + +// Handle logs err if no delegate is set, otherwise it is delegated. +func (h *errLogger) Handle(err error) { + h.l.Print(err) +} + +// GetErrorHandler returns the global ErrorHandler instance. +// +// The default ErrorHandler instance returned will log all errors to STDERR +// until an override ErrorHandler is set with SetErrorHandler. All +// ErrorHandler returned prior to this will automatically forward errors to +// the set instance instead of logging. +// +// Subsequent calls to SetErrorHandler after the first will not forward errors +// to the new ErrorHandler for prior returned instances. +func GetErrorHandler() ErrorHandler { + return globalErrorHandler +} + +// SetErrorHandler sets the global ErrorHandler to h. +// +// The first time this is called all ErrorHandler previously returned from +// GetErrorHandler will send errors to h instead of the default logging +// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not +// delegate errors to h. +func SetErrorHandler(h ErrorHandler) { + globalErrorHandler.setDelegate(h) +} + +// Handle is a convenience function for ErrorHandler().Handle(err). +func Handle(err error) { + GetErrorHandler().Handle(err) +} diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go new file mode 100644 index 00000000..622c3ee3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go @@ -0,0 +1,111 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package attribute provide several helper functions for some commonly used +logic of processing attributes. +*/ +package attribute // import "go.opentelemetry.io/otel/internal/attribute" + +import ( + "reflect" +) + +// BoolSliceValue converts a bool slice into an array with same elements as slice. +func BoolSliceValue(v []bool) interface{} { + var zero bool + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]bool), v) + return cp.Elem().Interface() +} + +// Int64SliceValue converts an int64 slice into an array with same elements as slice. +func Int64SliceValue(v []int64) interface{} { + var zero int64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]int64), v) + return cp.Elem().Interface() +} + +// Float64SliceValue converts a float64 slice into an array with same elements as slice. +func Float64SliceValue(v []float64) interface{} { + var zero float64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]float64), v) + return cp.Elem().Interface() +} + +// StringSliceValue converts a string slice into an array with same elements as slice. +func StringSliceValue(v []string) interface{} { + var zero string + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]string), v) + return cp.Elem().Interface() +} + +// AsBoolSlice converts a bool array into a slice into with same elements as array. +func AsBoolSlice(v interface{}) []bool { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero bool + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]bool) +} + +// AsInt64Slice converts an int64 array into a slice into with same elements as array. +func AsInt64Slice(v interface{}) []int64 { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero int64 + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]int64) +} + +// AsFloat64Slice converts a float64 array into a slice into with same elements as array. +func AsFloat64Slice(v interface{}) []float64 { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero float64 + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]float64) +} + +// AsStringSlice converts a string array into a slice into with same elements as array. +func AsStringSlice(v interface{}) []string { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero string + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]string) +} diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go new file mode 100644 index 00000000..b96e5408 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go @@ -0,0 +1,43 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package baggage provides base types and functionality to store and retrieve +baggage in Go context. This package exists because the OpenTracing bridge to +OpenTelemetry needs to synchronize state whenever baggage for a context is +modified and that context contains an OpenTracing span. If it were not for +this need this package would not need to exist and the +`go.opentelemetry.io/otel/baggage` package would be the singular place where +W3C baggage is handled. +*/ +package baggage // import "go.opentelemetry.io/otel/internal/baggage" + +// List is the collection of baggage members. The W3C allows for duplicates, +// but OpenTelemetry does not, therefore, this is represented as a map. +type List map[string]Item + +// Item is the value and metadata properties part of a list-member. +type Item struct { + Value string + Properties []Property +} + +// Property is a metadata entry for a list-member. +type Property struct { + Key, Value string + + // HasValue indicates if a zero-value value means the property does not + // have a value or if it was the zero-value. + HasValue bool +} diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go new file mode 100644 index 00000000..4469700d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go @@ -0,0 +1,92 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package baggage // import "go.opentelemetry.io/otel/internal/baggage" + +import "context" + +type baggageContextKeyType int + +const baggageKey baggageContextKeyType = iota + +// SetHookFunc is a callback called when storing baggage in the context. +type SetHookFunc func(context.Context, List) context.Context + +// GetHookFunc is a callback called when getting baggage from the context. +type GetHookFunc func(context.Context, List) List + +type baggageState struct { + list List + + setHook SetHookFunc + getHook GetHookFunc +} + +// ContextWithSetHook returns a copy of parent with hook configured to be +// invoked every time ContextWithBaggage is called. +// +// Passing nil SetHookFunc creates a context with no set hook to call. +func ContextWithSetHook(parent context.Context, hook SetHookFunc) context.Context { + var s baggageState + if v, ok := parent.Value(baggageKey).(baggageState); ok { + s = v + } + + s.setHook = hook + return context.WithValue(parent, baggageKey, s) +} + +// ContextWithGetHook returns a copy of parent with hook configured to be +// invoked every time FromContext is called. +// +// Passing nil GetHookFunc creates a context with no get hook to call. +func ContextWithGetHook(parent context.Context, hook GetHookFunc) context.Context { + var s baggageState + if v, ok := parent.Value(baggageKey).(baggageState); ok { + s = v + } + + s.getHook = hook + return context.WithValue(parent, baggageKey, s) +} + +// ContextWithList returns a copy of parent with baggage. Passing nil list +// returns a context without any baggage. +func ContextWithList(parent context.Context, list List) context.Context { + var s baggageState + if v, ok := parent.Value(baggageKey).(baggageState); ok { + s = v + } + + s.list = list + ctx := context.WithValue(parent, baggageKey, s) + if s.setHook != nil { + ctx = s.setHook(ctx, list) + } + + return ctx +} + +// ListFromContext returns the baggage contained in ctx. +func ListFromContext(ctx context.Context) List { + switch v := ctx.Value(baggageKey).(type) { + case baggageState: + if v.getHook != nil { + return v.getHook(ctx, v.list) + } + return v.list + default: + return nil + } +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go new file mode 100644 index 00000000..293c0896 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go @@ -0,0 +1,63 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "log" + "os" + "sync/atomic" + "unsafe" + + "github.com/go-logr/logr" + "github.com/go-logr/stdr" +) + +// globalLogger is the logging interface used within the otel api and sdk provide deatails of the internals. +// +// The default logger uses stdr which is backed by the standard `log.Logger` +// interface. This logger will only show messages at the Error Level. +var globalLogger unsafe.Pointer + +func init() { + SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) +} + +// SetLogger overrides the globalLogger with l. +// +// To see Info messages use a logger with `l.V(1).Enabled() == true` +// To see Debug messages use a logger with `l.V(5).Enabled() == true`. +func SetLogger(l logr.Logger) { + atomic.StorePointer(&globalLogger, unsafe.Pointer(&l)) +} + +func getLogger() logr.Logger { + return *(*logr.Logger)(atomic.LoadPointer(&globalLogger)) +} + +// Info prints messages about the general state of the API or SDK. +// This should usually be less then 5 messages a minute. +func Info(msg string, keysAndValues ...interface{}) { + getLogger().V(1).Info(msg, keysAndValues...) +} + +// Error prints messages about exceptional states of the API or SDK. +func Error(err error, msg string, keysAndValues ...interface{}) { + getLogger().Error(err, msg, keysAndValues...) +} + +// Debug prints messages about all internal changes in the API or SDK. +func Debug(msg string, keysAndValues ...interface{}) { + getLogger().V(5).Info(msg, keysAndValues...) +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go new file mode 100644 index 00000000..06bac35c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go @@ -0,0 +1,82 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/propagation" +) + +// textMapPropagator is a default TextMapPropagator that delegates calls to a +// registered delegate if one is set, otherwise it defaults to delegating the +// calls to a the default no-op propagation.TextMapPropagator. +type textMapPropagator struct { + mtx sync.Mutex + once sync.Once + delegate propagation.TextMapPropagator + noop propagation.TextMapPropagator +} + +// Compile-time guarantee that textMapPropagator implements the +// propagation.TextMapPropagator interface. +var _ propagation.TextMapPropagator = (*textMapPropagator)(nil) + +func newTextMapPropagator() *textMapPropagator { + return &textMapPropagator{ + noop: propagation.NewCompositeTextMapPropagator(), + } +} + +// SetDelegate sets a delegate propagation.TextMapPropagator that all calls are +// forwarded to. Delegation can only be performed once, all subsequent calls +// perform no delegation. +func (p *textMapPropagator) SetDelegate(delegate propagation.TextMapPropagator) { + if delegate == nil { + return + } + + p.mtx.Lock() + p.once.Do(func() { p.delegate = delegate }) + p.mtx.Unlock() +} + +// effectiveDelegate returns the current delegate of p if one is set, +// otherwise the default noop TextMapPropagator is returned. This method +// can be called concurrently. +func (p *textMapPropagator) effectiveDelegate() propagation.TextMapPropagator { + p.mtx.Lock() + defer p.mtx.Unlock() + if p.delegate != nil { + return p.delegate + } + return p.noop +} + +// Inject set cross-cutting concerns from the Context into the carrier. +func (p *textMapPropagator) Inject(ctx context.Context, carrier propagation.TextMapCarrier) { + p.effectiveDelegate().Inject(ctx, carrier) +} + +// Extract reads cross-cutting concerns from the carrier into a Context. +func (p *textMapPropagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context { + return p.effectiveDelegate().Extract(ctx, carrier) +} + +// Fields returns the keys whose values are set with Inject. +func (p *textMapPropagator) Fields() []string { + return p.effectiveDelegate().Fields() +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/state.go b/vendor/go.opentelemetry.io/otel/internal/global/state.go new file mode 100644 index 00000000..1ad38f82 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/state.go @@ -0,0 +1,115 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "errors" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +type ( + tracerProviderHolder struct { + tp trace.TracerProvider + } + + propagatorsHolder struct { + tm propagation.TextMapPropagator + } +) + +var ( + globalTracer = defaultTracerValue() + globalPropagators = defaultPropagatorsValue() + + delegateTraceOnce sync.Once + delegateTextMapPropagatorOnce sync.Once +) + +// TracerProvider is the internal implementation for global.TracerProvider. +func TracerProvider() trace.TracerProvider { + return globalTracer.Load().(tracerProviderHolder).tp +} + +// SetTracerProvider is the internal implementation for global.SetTracerProvider. +func SetTracerProvider(tp trace.TracerProvider) { + current := TracerProvider() + + if _, cOk := current.(*tracerProvider); cOk { + if _, tpOk := tp.(*tracerProvider); tpOk && current == tp { + // Do not assign the default delegating TracerProvider to delegate + // to itself. + Error( + errors.New("no delegate configured in tracer provider"), + "Setting tracer provider to it's current value. No delegate will be configured", + ) + return + } + } + + delegateTraceOnce.Do(func() { + if def, ok := current.(*tracerProvider); ok { + def.setDelegate(tp) + } + }) + globalTracer.Store(tracerProviderHolder{tp: tp}) +} + +// TextMapPropagator is the internal implementation for global.TextMapPropagator. +func TextMapPropagator() propagation.TextMapPropagator { + return globalPropagators.Load().(propagatorsHolder).tm +} + +// SetTextMapPropagator is the internal implementation for global.SetTextMapPropagator. +func SetTextMapPropagator(p propagation.TextMapPropagator) { + current := TextMapPropagator() + + if _, cOk := current.(*textMapPropagator); cOk { + if _, pOk := p.(*textMapPropagator); pOk && current == p { + // Do not assign the default delegating TextMapPropagator to + // delegate to itself. + Error( + errors.New("no delegate configured in text map propagator"), + "Setting text map propagator to it's current value. No delegate will be configured", + ) + return + } + } + + // For the textMapPropagator already returned by TextMapPropagator + // delegate to p. + delegateTextMapPropagatorOnce.Do(func() { + if def, ok := current.(*textMapPropagator); ok { + def.SetDelegate(p) + } + }) + // Return p when subsequent calls to TextMapPropagator are made. + globalPropagators.Store(propagatorsHolder{tm: p}) +} + +func defaultTracerValue() *atomic.Value { + v := &atomic.Value{} + v.Store(tracerProviderHolder{tp: &tracerProvider{}}) + return v +} + +func defaultPropagatorsValue() *atomic.Value { + v := &atomic.Value{} + v.Store(propagatorsHolder{tm: newTextMapPropagator()}) + return v +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go new file mode 100644 index 00000000..5f008d09 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -0,0 +1,192 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +/* +This file contains the forwarding implementation of the TracerProvider used as +the default global instance. Prior to initialization of an SDK, Tracers +returned by the global TracerProvider will provide no-op functionality. This +means that all Span created prior to initialization are no-op Spans. + +Once an SDK has been initialized, all provided no-op Tracers are swapped for +Tracers provided by the SDK defined TracerProvider. However, any Span started +prior to this initialization does not change its behavior. Meaning, the Span +remains a no-op Span. + +The implementation to track and swap Tracers locks all new Tracer creation +until the swap is complete. This assumes that this operation is not +performance-critical. If that assumption is incorrect, be sure to configure an +SDK prior to any Tracer creation. +*/ + +import ( + "context" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" +) + +// tracerProvider is a placeholder for a configured SDK TracerProvider. +// +// All TracerProvider functionality is forwarded to a delegate once +// configured. +type tracerProvider struct { + mtx sync.Mutex + tracers map[il]*tracer + delegate trace.TracerProvider +} + +// Compile-time guarantee that tracerProvider implements the TracerProvider +// interface. +var _ trace.TracerProvider = &tracerProvider{} + +// setDelegate configures p to delegate all TracerProvider functionality to +// provider. +// +// All Tracers provided prior to this function call are switched out to be +// Tracers provided by provider. +// +// It is guaranteed by the caller that this happens only once. +func (p *tracerProvider) setDelegate(provider trace.TracerProvider) { + p.mtx.Lock() + defer p.mtx.Unlock() + + p.delegate = provider + + if len(p.tracers) == 0 { + return + } + + for _, t := range p.tracers { + t.setDelegate(provider) + } + + p.tracers = nil +} + +// Tracer implements TracerProvider. +func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + p.mtx.Lock() + defer p.mtx.Unlock() + + if p.delegate != nil { + return p.delegate.Tracer(name, opts...) + } + + // At this moment it is guaranteed that no sdk is installed, save the tracer in the tracers map. + + c := trace.NewTracerConfig(opts...) + key := il{ + name: name, + version: c.InstrumentationVersion(), + } + + if p.tracers == nil { + p.tracers = make(map[il]*tracer) + } + + if val, ok := p.tracers[key]; ok { + return val + } + + t := &tracer{name: name, opts: opts, provider: p} + p.tracers[key] = t + return t +} + +type il struct { + name string + version string +} + +// tracer is a placeholder for a trace.Tracer. +// +// All Tracer functionality is forwarded to a delegate once configured. +// Otherwise, all functionality is forwarded to a NoopTracer. +type tracer struct { + name string + opts []trace.TracerOption + provider *tracerProvider + + delegate atomic.Value +} + +// Compile-time guarantee that tracer implements the trace.Tracer interface. +var _ trace.Tracer = &tracer{} + +// setDelegate configures t to delegate all Tracer functionality to Tracers +// created by provider. +// +// All subsequent calls to the Tracer methods will be passed to the delegate. +// +// It is guaranteed by the caller that this happens only once. +func (t *tracer) setDelegate(provider trace.TracerProvider) { + t.delegate.Store(provider.Tracer(t.name, t.opts...)) +} + +// Start implements trace.Tracer by forwarding the call to t.delegate if +// set, otherwise it forwards the call to a NoopTracer. +func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + delegate := t.delegate.Load() + if delegate != nil { + return delegate.(trace.Tracer).Start(ctx, name, opts...) + } + + s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t} + ctx = trace.ContextWithSpan(ctx, s) + return ctx, s +} + +// nonRecordingSpan is a minimal implementation of a Span that wraps a +// SpanContext. It performs no operations other than to return the wrapped +// SpanContext. +type nonRecordingSpan struct { + sc trace.SpanContext + tracer *tracer +} + +var _ trace.Span = nonRecordingSpan{} + +// SpanContext returns the wrapped SpanContext. +func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc } + +// IsRecording always returns false. +func (nonRecordingSpan) IsRecording() bool { return false } + +// SetStatus does nothing. +func (nonRecordingSpan) SetStatus(codes.Code, string) {} + +// SetError does nothing. +func (nonRecordingSpan) SetError(bool) {} + +// SetAttributes does nothing. +func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (nonRecordingSpan) End(...trace.SpanEndOption) {} + +// RecordError does nothing. +func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {} + +// AddEvent does nothing. +func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {} + +// SetName does nothing. +func (nonRecordingSpan) SetName(string) {} + +func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider } diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go new file mode 100644 index 00000000..e07e7940 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -0,0 +1,55 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opentelemetry.io/otel/internal" + +import ( + "math" + "unsafe" +) + +func BoolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag. + if b { + return 1 + } + return 0 +} + +func RawToBool(r uint64) bool { + return r != 0 +} + +func Int64ToRaw(i int64) uint64 { + return uint64(i) +} + +func RawToInt64(r uint64) int64 { + return int64(r) +} + +func Float64ToRaw(f float64) uint64 { + return math.Float64bits(f) +} + +func RawToFloat64(r uint64) float64 { + return math.Float64frombits(r) +} + +func RawPtrToFloat64Ptr(r *uint64) *float64 { + return (*float64)(unsafe.Pointer(r)) +} + +func RawPtrToInt64Ptr(r *uint64) *int64 { + return (*int64)(unsafe.Pointer(r)) +} diff --git a/vendor/go.opentelemetry.io/otel/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal_logging.go new file mode 100644 index 00000000..c4f8acd5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal_logging.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +import ( + "github.com/go-logr/logr" + + "go.opentelemetry.io/otel/internal/global" +) + +// SetLogger configures the logger used internally to opentelemetry. +func SetLogger(logger logr.Logger) { + global.SetLogger(logger) +} diff --git a/vendor/go.opentelemetry.io/otel/propagation.go b/vendor/go.opentelemetry.io/otel/propagation.go new file mode 100644 index 00000000..d29aaa32 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/propagation" +) + +// GetTextMapPropagator returns the global TextMapPropagator. If none has been +// set, a No-Op TextMapPropagator is returned. +func GetTextMapPropagator() propagation.TextMapPropagator { + return global.TextMapPropagator() +} + +// SetTextMapPropagator sets propagator as the global TextMapPropagator. +func SetTextMapPropagator(propagator propagation.TextMapPropagator) { + global.SetTextMapPropagator(propagator) +} diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go new file mode 100644 index 00000000..303cdf1c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/baggage.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package propagation // import "go.opentelemetry.io/otel/propagation" + +import ( + "context" + + "go.opentelemetry.io/otel/baggage" +) + +const baggageHeader = "baggage" + +// Baggage is a propagator that supports the W3C Baggage format. +// +// This propagates user-defined baggage associated with a trace. The complete +// specification is defined at https://www.w3.org/TR/baggage/. +type Baggage struct{} + +var _ TextMapPropagator = Baggage{} + +// Inject sets baggage key-values from ctx into the carrier. +func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { + bStr := baggage.FromContext(ctx).String() + if bStr != "" { + carrier.Set(baggageHeader, bStr) + } +} + +// Extract returns a copy of parent with the baggage from the carrier added. +func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context { + bStr := carrier.Get(baggageHeader) + if bStr == "" { + return parent + } + + bag, err := baggage.Parse(bStr) + if err != nil { + return parent + } + return baggage.ContextWithBaggage(parent, bag) +} + +// Fields returns the keys who's values are set with Inject. +func (b Baggage) Fields() []string { + return []string{baggageHeader} +} diff --git a/vendor/go.opentelemetry.io/otel/propagation/doc.go b/vendor/go.opentelemetry.io/otel/propagation/doc.go new file mode 100644 index 00000000..c119eb28 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/doc.go @@ -0,0 +1,24 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package propagation contains OpenTelemetry context propagators. + +OpenTelemetry propagators are used to extract and inject context data from and +into messages exchanged by applications. The propagator supported by this +package is the W3C Trace Context encoding +(https://www.w3.org/TR/trace-context/), and W3C Baggage +(https://www.w3.org/TR/baggage/). +*/ +package propagation // import "go.opentelemetry.io/otel/propagation" diff --git a/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/vendor/go.opentelemetry.io/otel/propagation/propagation.go new file mode 100644 index 00000000..c94438f7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/propagation.go @@ -0,0 +1,153 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package propagation // import "go.opentelemetry.io/otel/propagation" + +import ( + "context" + "net/http" +) + +// TextMapCarrier is the storage medium used by a TextMapPropagator. +type TextMapCarrier interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Get returns the value associated with the passed key. + Get(key string) string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Set stores the key-value pair. + Set(key string, value string) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Keys lists the keys stored in this carrier. + Keys() []string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// MapCarrier is a TextMapCarrier that uses a map held in memory as a storage +// medium for propagated key-value pairs. +type MapCarrier map[string]string + +// Compile time check that MapCarrier implements the TextMapCarrier. +var _ TextMapCarrier = MapCarrier{} + +// Get returns the value associated with the passed key. +func (c MapCarrier) Get(key string) string { + return c[key] +} + +// Set stores the key-value pair. +func (c MapCarrier) Set(key, value string) { + c[key] = value +} + +// Keys lists the keys stored in this carrier. +func (c MapCarrier) Keys() []string { + keys := make([]string, 0, len(c)) + for k := range c { + keys = append(keys, k) + } + return keys +} + +// HeaderCarrier adapts http.Header to satisfy the TextMapCarrier interface. +type HeaderCarrier http.Header + +// Get returns the value associated with the passed key. +func (hc HeaderCarrier) Get(key string) string { + return http.Header(hc).Get(key) +} + +// Set stores the key-value pair. +func (hc HeaderCarrier) Set(key string, value string) { + http.Header(hc).Set(key, value) +} + +// Keys lists the keys stored in this carrier. +func (hc HeaderCarrier) Keys() []string { + keys := make([]string, 0, len(hc)) + for k := range hc { + keys = append(keys, k) + } + return keys +} + +// TextMapPropagator propagates cross-cutting concerns as key-value text +// pairs within a carrier that travels in-band across process boundaries. +type TextMapPropagator interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Inject set cross-cutting concerns from the Context into the carrier. + Inject(ctx context.Context, carrier TextMapCarrier) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Extract reads cross-cutting concerns from the carrier into a Context. + Extract(ctx context.Context, carrier TextMapCarrier) context.Context + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Fields returns the keys whose values are set with Inject. + Fields() []string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +type compositeTextMapPropagator []TextMapPropagator + +func (p compositeTextMapPropagator) Inject(ctx context.Context, carrier TextMapCarrier) { + for _, i := range p { + i.Inject(ctx, carrier) + } +} + +func (p compositeTextMapPropagator) Extract(ctx context.Context, carrier TextMapCarrier) context.Context { + for _, i := range p { + ctx = i.Extract(ctx, carrier) + } + return ctx +} + +func (p compositeTextMapPropagator) Fields() []string { + unique := make(map[string]struct{}) + for _, i := range p { + for _, k := range i.Fields() { + unique[k] = struct{}{} + } + } + + fields := make([]string, 0, len(unique)) + for k := range unique { + fields = append(fields, k) + } + return fields +} + +// NewCompositeTextMapPropagator returns a unified TextMapPropagator from the +// group of passed TextMapPropagator. This allows different cross-cutting +// concerns to be propagates in a unified manner. +// +// The returned TextMapPropagator will inject and extract cross-cutting +// concerns in the order the TextMapPropagators were provided. Additionally, +// the Fields method will return a de-duplicated slice of the keys that are +// set with the Inject method. +func NewCompositeTextMapPropagator(p ...TextMapPropagator) TextMapPropagator { + return compositeTextMapPropagator(p) +} diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go new file mode 100644 index 00000000..902692da --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go @@ -0,0 +1,159 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package propagation // import "go.opentelemetry.io/otel/propagation" + +import ( + "context" + "encoding/hex" + "fmt" + "regexp" + + "go.opentelemetry.io/otel/trace" +) + +const ( + supportedVersion = 0 + maxVersion = 254 + traceparentHeader = "traceparent" + tracestateHeader = "tracestate" +) + +// TraceContext is a propagator that supports the W3C Trace Context format +// (https://www.w3.org/TR/trace-context/) +// +// This propagator will propagate the traceparent and tracestate headers to +// guarantee traces are not broken. It is up to the users of this propagator +// to choose if they want to participate in a trace by modifying the +// traceparent header and relevant parts of the tracestate header containing +// their proprietary information. +type TraceContext struct{} + +var _ TextMapPropagator = TraceContext{} +var traceCtxRegExp = regexp.MustCompile("^(?P[0-9a-f]{2})-(?P[a-f0-9]{32})-(?P[a-f0-9]{16})-(?P[a-f0-9]{2})(?:-.*)?$") + +// Inject set tracecontext from the Context into the carrier. +func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { + sc := trace.SpanContextFromContext(ctx) + if !sc.IsValid() { + return + } + + if ts := sc.TraceState().String(); ts != "" { + carrier.Set(tracestateHeader, ts) + } + + // Clear all flags other than the trace-context supported sampling bit. + flags := sc.TraceFlags() & trace.FlagsSampled + + h := fmt.Sprintf("%.2x-%s-%s-%s", + supportedVersion, + sc.TraceID(), + sc.SpanID(), + flags) + carrier.Set(traceparentHeader, h) +} + +// Extract reads tracecontext from the carrier into a returned Context. +// +// The returned Context will be a copy of ctx and contain the extracted +// tracecontext as the remote SpanContext. If the extracted tracecontext is +// invalid, the passed ctx will be returned directly instead. +func (tc TraceContext) Extract(ctx context.Context, carrier TextMapCarrier) context.Context { + sc := tc.extract(carrier) + if !sc.IsValid() { + return ctx + } + return trace.ContextWithRemoteSpanContext(ctx, sc) +} + +func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { + h := carrier.Get(traceparentHeader) + if h == "" { + return trace.SpanContext{} + } + + matches := traceCtxRegExp.FindStringSubmatch(h) + + if len(matches) == 0 { + return trace.SpanContext{} + } + + if len(matches) < 5 { // four subgroups plus the overall match + return trace.SpanContext{} + } + + if len(matches[1]) != 2 { + return trace.SpanContext{} + } + ver, err := hex.DecodeString(matches[1]) + if err != nil { + return trace.SpanContext{} + } + version := int(ver[0]) + if version > maxVersion { + return trace.SpanContext{} + } + + if version == 0 && len(matches) != 5 { // four subgroups plus the overall match + return trace.SpanContext{} + } + + if len(matches[2]) != 32 { + return trace.SpanContext{} + } + + var scc trace.SpanContextConfig + + scc.TraceID, err = trace.TraceIDFromHex(matches[2][:32]) + if err != nil { + return trace.SpanContext{} + } + + if len(matches[3]) != 16 { + return trace.SpanContext{} + } + scc.SpanID, err = trace.SpanIDFromHex(matches[3]) + if err != nil { + return trace.SpanContext{} + } + + if len(matches[4]) != 2 { + return trace.SpanContext{} + } + opts, err := hex.DecodeString(matches[4]) + if err != nil || len(opts) < 1 || (version == 0 && opts[0] > 2) { + return trace.SpanContext{} + } + // Clear all flags other than the trace-context supported sampling bit. + scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled + + // Ignore the error returned here. Failure to parse tracestate MUST NOT + // affect the parsing of traceparent according to the W3C tracecontext + // specification. + scc.TraceState, _ = trace.ParseTraceState(carrier.Get(tracestateHeader)) + scc.Remote = true + + sc := trace.NewSpanContext(scc) + if !sc.IsValid() { + return trace.SpanContext{} + } + + return sc +} + +// Fields returns the keys who's values are set with Inject. +func (tc TraceContext) Fields() []string { + return []string{traceparentHeader, tracestateHeader} +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/LICENSE b/vendor/go.opentelemetry.io/otel/sdk/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go new file mode 100644 index 00000000..6e923aca --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go @@ -0,0 +1,24 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package instrumentation provides types to represent the code libraries that +// provide OpenTelemetry instrumentation. These types are used in the +// OpenTelemetry signal pipelines to identify the source of telemetry. +// +// See +// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0083-component.md +// and +// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0201-scope-attributes.md +// for more information. +package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go new file mode 100644 index 00000000..39f025a1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go @@ -0,0 +1,19 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" + +// Library represents the instrumentation library. +// Deprecated: please use Scope instead. +type Library = Scope diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go new file mode 100644 index 00000000..09c6d93f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" + +// Scope represents the instrumentation scope. +type Scope struct { + // Name is the name of the instrumentation scope. This should be the + // Go package name of that scope. + Name string + // Version is the version of the instrumentation scope. + Version string + // SchemaURL of the telemetry emitted by the scope. + SchemaURL string +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go new file mode 100644 index 00000000..5e94b8ae --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go @@ -0,0 +1,177 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package env // import "go.opentelemetry.io/otel/sdk/internal/env" + +import ( + "os" + "strconv" + + "go.opentelemetry.io/otel/internal/global" +) + +// Environment variable names. +const ( + // BatchSpanProcessorScheduleDelayKey is the delay interval between two + // consecutive exports (i.e. 5000). + BatchSpanProcessorScheduleDelayKey = "OTEL_BSP_SCHEDULE_DELAY" + // BatchSpanProcessorExportTimeoutKey is the maximum allowed time to + // export data (i.e. 3000). + BatchSpanProcessorExportTimeoutKey = "OTEL_BSP_EXPORT_TIMEOUT" + // BatchSpanProcessorMaxQueueSizeKey is the maximum queue size (i.e. 2048). + BatchSpanProcessorMaxQueueSizeKey = "OTEL_BSP_MAX_QUEUE_SIZE" + // BatchSpanProcessorMaxExportBatchSizeKey is the maximum batch size (i.e. + // 512). Note: it must be less than or equal to + // EnvBatchSpanProcessorMaxQueueSize. + BatchSpanProcessorMaxExportBatchSizeKey = "OTEL_BSP_MAX_EXPORT_BATCH_SIZE" + + // AttributeValueLengthKey is the maximum allowed attribute value size. + AttributeValueLengthKey = "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT" + + // AttributeCountKey is the maximum allowed span attribute count. + AttributeCountKey = "OTEL_ATTRIBUTE_COUNT_LIMIT" + + // SpanAttributeValueLengthKey is the maximum allowed attribute value size + // for a span. + SpanAttributeValueLengthKey = "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT" + + // SpanAttributeCountKey is the maximum allowed span attribute count for a + // span. + SpanAttributeCountKey = "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT" + + // SpanEventCountKey is the maximum allowed span event count. + SpanEventCountKey = "OTEL_SPAN_EVENT_COUNT_LIMIT" + + // SpanEventAttributeCountKey is the maximum allowed attribute per span + // event count. + SpanEventAttributeCountKey = "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT" + + // SpanLinkCountKey is the maximum allowed span link count. + SpanLinkCountKey = "OTEL_SPAN_LINK_COUNT_LIMIT" + + // SpanLinkAttributeCountKey is the maximum allowed attribute per span + // link count. + SpanLinkAttributeCountKey = "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT" +) + +// firstInt returns the value of the first matching environment variable from +// keys. If the value is not an integer or no match is found, defaultValue is +// returned. +func firstInt(defaultValue int, keys ...string) int { + for _, key := range keys { + value, ok := os.LookupEnv(key) + if !ok { + continue + } + + intValue, err := strconv.Atoi(value) + if err != nil { + global.Info("Got invalid value, number value expected.", key, value) + return defaultValue + } + + return intValue + } + + return defaultValue +} + +// IntEnvOr returns the int value of the environment variable with name key if +// it exists and the value is an int. Otherwise, defaultValue is returned. +func IntEnvOr(key string, defaultValue int) int { + value, ok := os.LookupEnv(key) + if !ok { + return defaultValue + } + + intValue, err := strconv.Atoi(value) + if err != nil { + global.Info("Got invalid value, number value expected.", key, value) + return defaultValue + } + + return intValue +} + +// BatchSpanProcessorScheduleDelay returns the environment variable value for +// the OTEL_BSP_SCHEDULE_DELAY key if it exists, otherwise defaultValue is +// returned. +func BatchSpanProcessorScheduleDelay(defaultValue int) int { + return IntEnvOr(BatchSpanProcessorScheduleDelayKey, defaultValue) +} + +// BatchSpanProcessorExportTimeout returns the environment variable value for +// the OTEL_BSP_EXPORT_TIMEOUT key if it exists, otherwise defaultValue is +// returned. +func BatchSpanProcessorExportTimeout(defaultValue int) int { + return IntEnvOr(BatchSpanProcessorExportTimeoutKey, defaultValue) +} + +// BatchSpanProcessorMaxQueueSize returns the environment variable value for +// the OTEL_BSP_MAX_QUEUE_SIZE key if it exists, otherwise defaultValue is +// returned. +func BatchSpanProcessorMaxQueueSize(defaultValue int) int { + return IntEnvOr(BatchSpanProcessorMaxQueueSizeKey, defaultValue) +} + +// BatchSpanProcessorMaxExportBatchSize returns the environment variable value for +// the OTEL_BSP_MAX_EXPORT_BATCH_SIZE key if it exists, otherwise defaultValue +// is returned. +func BatchSpanProcessorMaxExportBatchSize(defaultValue int) int { + return IntEnvOr(BatchSpanProcessorMaxExportBatchSizeKey, defaultValue) +} + +// SpanAttributeValueLength returns the environment variable value for the +// OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the +// environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT is +// returned or defaultValue if that is not set. +func SpanAttributeValueLength(defaultValue int) int { + return firstInt(defaultValue, SpanAttributeValueLengthKey, AttributeValueLengthKey) +} + +// SpanAttributeCount returns the environment variable value for the +// OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the +// environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT is returned or +// defaultValue if that is not set. +func SpanAttributeCount(defaultValue int) int { + return firstInt(defaultValue, SpanAttributeCountKey, AttributeCountKey) +} + +// SpanEventCount returns the environment variable value for the +// OTEL_SPAN_EVENT_COUNT_LIMIT key if it exists, otherwise defaultValue is +// returned. +func SpanEventCount(defaultValue int) int { + return IntEnvOr(SpanEventCountKey, defaultValue) +} + +// SpanEventAttributeCount returns the environment variable value for the +// OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key if it exists, otherwise defaultValue +// is returned. +func SpanEventAttributeCount(defaultValue int) int { + return IntEnvOr(SpanEventAttributeCountKey, defaultValue) +} + +// SpanLinkCount returns the environment variable value for the +// OTEL_SPAN_LINK_COUNT_LIMIT key if it exists, otherwise defaultValue is +// returned. +func SpanLinkCount(defaultValue int) int { + return IntEnvOr(SpanLinkCountKey, defaultValue) +} + +// SpanLinkAttributeCount returns the environment variable value for the +// OTEL_LINK_ATTRIBUTE_COUNT_LIMIT key if it exists, otherwise defaultValue is +// returned. +func SpanLinkAttributeCount(defaultValue int) int { + return IntEnvOr(SpanLinkAttributeCountKey, defaultValue) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go b/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go new file mode 100644 index 00000000..84a02306 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opentelemetry.io/otel/sdk/internal" + +import ( + "fmt" + "time" + + "go.opentelemetry.io/otel" +) + +// UserAgent is the user agent to be added to the outgoing +// requests from the exporters. +var UserAgent = fmt.Sprintf("opentelemetry-go/%s", otel.Version()) + +// MonotonicEndTime returns the end time at present +// but offset from start, monotonically. +// +// The monotonic clock is used in subtractions hence +// the duration since start added back to start gives +// end as a monotonic time. +// See https://golang.org/pkg/time/#hdr-Monotonic_Clocks +func MonotonicEndTime(start time.Time) time.Time { + return start.Add(time.Since(start)) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go new file mode 100644 index 00000000..c1d22040 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go @@ -0,0 +1,72 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "errors" + "fmt" +) + +var ( + // ErrPartialResource is returned by a detector when complete source + // information for a Resource is unavailable or the source information + // contains invalid values that are omitted from the returned Resource. + ErrPartialResource = errors.New("partial resource") +) + +// Detector detects OpenTelemetry resource information. +type Detector interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Detect returns an initialized Resource based on gathered information. + // If the source information to construct a Resource contains invalid + // values, a Resource is returned with the valid parts of the source + // information used for initialization along with an appropriately + // wrapped ErrPartialResource error. + Detect(ctx context.Context) (*Resource, error) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// Detect calls all input detectors sequentially and merges each result with the previous one. +// It returns the merged error too. +func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) { + var autoDetectedRes *Resource + var errInfo []string + for _, detector := range detectors { + if detector == nil { + continue + } + res, err := detector.Detect(ctx) + if err != nil { + errInfo = append(errInfo, err.Error()) + if !errors.Is(err, ErrPartialResource) { + continue + } + } + autoDetectedRes, err = Merge(autoDetectedRes, res) + if err != nil { + errInfo = append(errInfo, err.Error()) + } + } + + var aggregatedError error + if len(errInfo) > 0 { + aggregatedError = fmt.Errorf("detecting resources: %s", errInfo) + } + return autoDetectedRes, aggregatedError +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go new file mode 100644 index 00000000..aa0f942f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -0,0 +1,108 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" +) + +type ( + // telemetrySDK is a Detector that provides information about + // the OpenTelemetry SDK used. This Detector is included as a + // builtin. If these resource attributes are not wanted, use + // the WithTelemetrySDK(nil) or WithoutBuiltin() options to + // explicitly disable them. + telemetrySDK struct{} + + // host is a Detector that provides information about the host + // being run on. This Detector is included as a builtin. If + // these resource attributes are not wanted, use the + // WithHost(nil) or WithoutBuiltin() options to explicitly + // disable them. + host struct{} + + stringDetector struct { + schemaURL string + K attribute.Key + F func() (string, error) + } + + defaultServiceNameDetector struct{} +) + +var ( + _ Detector = telemetrySDK{} + _ Detector = host{} + _ Detector = stringDetector{} + _ Detector = defaultServiceNameDetector{} +) + +// Detect returns a *Resource that describes the OpenTelemetry SDK used. +func (telemetrySDK) Detect(context.Context) (*Resource, error) { + return NewWithAttributes( + semconv.SchemaURL, + semconv.TelemetrySDKName("opentelemetry"), + semconv.TelemetrySDKLanguageGo, + semconv.TelemetrySDKVersion(otel.Version()), + ), nil +} + +// Detect returns a *Resource that describes the host being run on. +func (host) Detect(ctx context.Context) (*Resource, error) { + return StringDetector(semconv.SchemaURL, semconv.HostNameKey, os.Hostname).Detect(ctx) +} + +// StringDetector returns a Detector that will produce a *Resource +// containing the string as a value corresponding to k. The resulting Resource +// will have the specified schemaURL. +func StringDetector(schemaURL string, k attribute.Key, f func() (string, error)) Detector { + return stringDetector{schemaURL: schemaURL, K: k, F: f} +} + +// Detect returns a *Resource that describes the string as a value +// corresponding to attribute.Key as well as the specific schemaURL. +func (sd stringDetector) Detect(ctx context.Context) (*Resource, error) { + value, err := sd.F() + if err != nil { + return nil, fmt.Errorf("%s: %w", string(sd.K), err) + } + a := sd.K.String(value) + if !a.Valid() { + return nil, fmt.Errorf("invalid attribute: %q -> %q", a.Key, a.Value.Emit()) + } + return NewWithAttributes(sd.schemaURL, sd.K.String(value)), nil +} + +// Detect implements Detector. +func (defaultServiceNameDetector) Detect(ctx context.Context) (*Resource, error) { + return StringDetector( + semconv.SchemaURL, + semconv.ServiceNameKey, + func() (string, error) { + executable, err := os.Executable() + if err != nil { + return "unknown_service:go", nil + } + return "unknown_service:" + filepath.Base(executable), nil + }, + ).Detect(ctx) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go new file mode 100644 index 00000000..f9a2a299 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go @@ -0,0 +1,201 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" +) + +// config contains configuration for Resource creation. +type config struct { + // detectors that will be evaluated. + detectors []Detector + // SchemaURL to associate with the Resource. + schemaURL string +} + +// Option is the interface that applies a configuration option. +type Option interface { + // apply sets the Option value of a config. + apply(config) config +} + +// WithAttributes adds attributes to the configured Resource. +func WithAttributes(attributes ...attribute.KeyValue) Option { + return WithDetectors(detectAttributes{attributes}) +} + +type detectAttributes struct { + attributes []attribute.KeyValue +} + +func (d detectAttributes) Detect(context.Context) (*Resource, error) { + return NewSchemaless(d.attributes...), nil +} + +// WithDetectors adds detectors to be evaluated for the configured resource. +func WithDetectors(detectors ...Detector) Option { + return detectorsOption{detectors: detectors} +} + +type detectorsOption struct { + detectors []Detector +} + +func (o detectorsOption) apply(cfg config) config { + cfg.detectors = append(cfg.detectors, o.detectors...) + return cfg +} + +// WithFromEnv adds attributes from environment variables to the configured resource. +func WithFromEnv() Option { + return WithDetectors(fromEnv{}) +} + +// WithHost adds attributes from the host to the configured resource. +func WithHost() Option { + return WithDetectors(host{}) +} + +// WithTelemetrySDK adds TelemetrySDK version info to the configured resource. +func WithTelemetrySDK() Option { + return WithDetectors(telemetrySDK{}) +} + +// WithSchemaURL sets the schema URL for the configured resource. +func WithSchemaURL(schemaURL string) Option { + return schemaURLOption(schemaURL) +} + +type schemaURLOption string + +func (o schemaURLOption) apply(cfg config) config { + cfg.schemaURL = string(o) + return cfg +} + +// WithOS adds all the OS attributes to the configured Resource. +// See individual WithOS* functions to configure specific attributes. +func WithOS() Option { + return WithDetectors( + osTypeDetector{}, + osDescriptionDetector{}, + ) +} + +// WithOSType adds an attribute with the operating system type to the configured Resource. +func WithOSType() Option { + return WithDetectors(osTypeDetector{}) +} + +// WithOSDescription adds an attribute with the operating system description to the +// configured Resource. The formatted string is equivalent to the output of the +// `uname -snrvm` command. +func WithOSDescription() Option { + return WithDetectors(osDescriptionDetector{}) +} + +// WithProcess adds all the Process attributes to the configured Resource. +// +// Warning! This option will include process command line arguments. If these +// contain sensitive information it will be included in the exported resource. +// +// This option is equivalent to calling WithProcessPID, +// WithProcessExecutableName, WithProcessExecutablePath, +// WithProcessCommandArgs, WithProcessOwner, WithProcessRuntimeName, +// WithProcessRuntimeVersion, and WithProcessRuntimeDescription. See each +// option function for information about what resource attributes each +// includes. +func WithProcess() Option { + return WithDetectors( + processPIDDetector{}, + processExecutableNameDetector{}, + processExecutablePathDetector{}, + processCommandArgsDetector{}, + processOwnerDetector{}, + processRuntimeNameDetector{}, + processRuntimeVersionDetector{}, + processRuntimeDescriptionDetector{}, + ) +} + +// WithProcessPID adds an attribute with the process identifier (PID) to the +// configured Resource. +func WithProcessPID() Option { + return WithDetectors(processPIDDetector{}) +} + +// WithProcessExecutableName adds an attribute with the name of the process +// executable to the configured Resource. +func WithProcessExecutableName() Option { + return WithDetectors(processExecutableNameDetector{}) +} + +// WithProcessExecutablePath adds an attribute with the full path to the process +// executable to the configured Resource. +func WithProcessExecutablePath() Option { + return WithDetectors(processExecutablePathDetector{}) +} + +// WithProcessCommandArgs adds an attribute with all the command arguments (including +// the command/executable itself) as received by the process to the configured +// Resource. +// +// Warning! This option will include process command line arguments. If these +// contain sensitive information it will be included in the exported resource. +func WithProcessCommandArgs() Option { + return WithDetectors(processCommandArgsDetector{}) +} + +// WithProcessOwner adds an attribute with the username of the user that owns the process +// to the configured Resource. +func WithProcessOwner() Option { + return WithDetectors(processOwnerDetector{}) +} + +// WithProcessRuntimeName adds an attribute with the name of the runtime of this +// process to the configured Resource. +func WithProcessRuntimeName() Option { + return WithDetectors(processRuntimeNameDetector{}) +} + +// WithProcessRuntimeVersion adds an attribute with the version of the runtime of +// this process to the configured Resource. +func WithProcessRuntimeVersion() Option { + return WithDetectors(processRuntimeVersionDetector{}) +} + +// WithProcessRuntimeDescription adds an attribute with an additional description +// about the runtime of the process to the configured Resource. +func WithProcessRuntimeDescription() Option { + return WithDetectors(processRuntimeDescriptionDetector{}) +} + +// WithContainer adds all the Container attributes to the configured Resource. +// See individual WithContainer* functions to configure specific attributes. +func WithContainer() Option { + return WithDetectors( + cgroupContainerIDDetector{}, + ) +} + +// WithContainerID adds an attribute with the id of the container to the configured Resource. +// Note: WithContainerID will not extract the correct container ID in an ECS environment. +// Please use the ECS resource detector instead (https://pkg.go.dev/go.opentelemetry.io/contrib/detectors/aws/ecs). +func WithContainerID() Option { + return WithDetectors(cgroupContainerIDDetector{}) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go new file mode 100644 index 00000000..318dcf82 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go @@ -0,0 +1,100 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "bufio" + "context" + "errors" + "io" + "os" + "regexp" + + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" +) + +type containerIDProvider func() (string, error) + +var ( + containerID containerIDProvider = getContainerIDFromCGroup + cgroupContainerIDRe = regexp.MustCompile(`^.*/(?:.*-)?([0-9a-f]+)(?:\.|\s*$)`) +) + +type cgroupContainerIDDetector struct{} + +const cgroupPath = "/proc/self/cgroup" + +// Detect returns a *Resource that describes the id of the container. +// If no container id found, an empty resource will be returned. +func (cgroupContainerIDDetector) Detect(ctx context.Context) (*Resource, error) { + containerID, err := containerID() + if err != nil { + return nil, err + } + + if containerID == "" { + return Empty(), nil + } + return NewWithAttributes(semconv.SchemaURL, semconv.ContainerID(containerID)), nil +} + +var ( + defaultOSStat = os.Stat + osStat = defaultOSStat + + defaultOSOpen = func(name string) (io.ReadCloser, error) { + return os.Open(name) + } + osOpen = defaultOSOpen +) + +// getContainerIDFromCGroup returns the id of the container from the cgroup file. +// If no container id found, an empty string will be returned. +func getContainerIDFromCGroup() (string, error) { + if _, err := osStat(cgroupPath); errors.Is(err, os.ErrNotExist) { + // File does not exist, skip + return "", nil + } + + file, err := osOpen(cgroupPath) + if err != nil { + return "", err + } + defer file.Close() + + return getContainerIDFromReader(file), nil +} + +// getContainerIDFromReader returns the id of the container from reader. +func getContainerIDFromReader(reader io.Reader) string { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + line := scanner.Text() + + if id := getContainerIDFromLine(line); id != "" { + return id + } + } + return "" +} + +// getContainerIDFromLine returns the id of the container from one string line. +func getContainerIDFromLine(line string) string { + matches := cgroupContainerIDRe.FindStringSubmatch(line) + if len(matches) <= 1 { + return "" + } + return matches[1] +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go b/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go new file mode 100644 index 00000000..9aab3d83 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go @@ -0,0 +1,28 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package resource provides detecting and representing resources. +// +// The fundamental struct is a Resource which holds identifying information +// about the entities for which telemetry is exported. +// +// To automatically construct Resources from an environment a Detector +// interface is defined. Implementations of this interface can be passed to +// the Detect function to generate a Resource from the merged information. +// +// To load a user defined Resource from the environment variable +// OTEL_RESOURCE_ATTRIBUTES the FromEnv Detector can be used. It will interpret +// the value as a list of comma delimited key/value pairs +// (e.g. `=,=,...`). +package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go new file mode 100644 index 00000000..e32843ca --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go @@ -0,0 +1,108 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "fmt" + "net/url" + "os" + "strings" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" +) + +const ( + // resourceAttrKey is the environment variable name OpenTelemetry Resource information will be read from. + resourceAttrKey = "OTEL_RESOURCE_ATTRIBUTES" + + // svcNameKey is the environment variable name that Service Name information will be read from. + svcNameKey = "OTEL_SERVICE_NAME" +) + +var ( + // errMissingValue is returned when a resource value is missing. + errMissingValue = fmt.Errorf("%w: missing value", ErrPartialResource) +) + +// fromEnv is a Detector that implements the Detector and collects +// resources from environment. This Detector is included as a +// builtin. +type fromEnv struct{} + +// compile time assertion that FromEnv implements Detector interface. +var _ Detector = fromEnv{} + +// Detect collects resources from environment. +func (fromEnv) Detect(context.Context) (*Resource, error) { + attrs := strings.TrimSpace(os.Getenv(resourceAttrKey)) + svcName := strings.TrimSpace(os.Getenv(svcNameKey)) + + if attrs == "" && svcName == "" { + return Empty(), nil + } + + var res *Resource + + if svcName != "" { + res = NewSchemaless(semconv.ServiceName(svcName)) + } + + r2, err := constructOTResources(attrs) + + // Ensure that the resource with the service name from OTEL_SERVICE_NAME + // takes precedence, if it was defined. + res, err2 := Merge(r2, res) + + if err == nil { + err = err2 + } else if err2 != nil { + err = fmt.Errorf("detecting resources: %s", []string{err.Error(), err2.Error()}) + } + + return res, err +} + +func constructOTResources(s string) (*Resource, error) { + if s == "" { + return Empty(), nil + } + pairs := strings.Split(s, ",") + attrs := []attribute.KeyValue{} + var invalid []string + for _, p := range pairs { + field := strings.SplitN(p, "=", 2) + if len(field) != 2 { + invalid = append(invalid, p) + continue + } + k := strings.TrimSpace(field[0]) + v, err := url.QueryUnescape(strings.TrimSpace(field[1])) + if err != nil { + // Retain original value if decoding fails, otherwise it will be + // an empty string. + v = field[1] + otel.Handle(err) + } + attrs = append(attrs, attribute.String(k, v)) + } + var err error + if len(invalid) > 0 { + err = fmt.Errorf("%w: %v", errMissingValue, invalid) + } + return NewSchemaless(attrs...), err +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go new file mode 100644 index 00000000..815fe5c2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go @@ -0,0 +1,97 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "strings" + + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" +) + +type osDescriptionProvider func() (string, error) + +var defaultOSDescriptionProvider osDescriptionProvider = platformOSDescription + +var osDescription = defaultOSDescriptionProvider + +func setDefaultOSDescriptionProvider() { + setOSDescriptionProvider(defaultOSDescriptionProvider) +} + +func setOSDescriptionProvider(osDescriptionProvider osDescriptionProvider) { + osDescription = osDescriptionProvider +} + +type osTypeDetector struct{} +type osDescriptionDetector struct{} + +// Detect returns a *Resource that describes the operating system type the +// service is running on. +func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) { + osType := runtimeOS() + + osTypeAttribute := mapRuntimeOSToSemconvOSType(osType) + + return NewWithAttributes( + semconv.SchemaURL, + osTypeAttribute, + ), nil +} + +// Detect returns a *Resource that describes the operating system the +// service is running on. +func (osDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { + description, err := osDescription() + + if err != nil { + return nil, err + } + + return NewWithAttributes( + semconv.SchemaURL, + semconv.OSDescription(description), + ), nil +} + +// mapRuntimeOSToSemconvOSType translates the OS name as provided by the Go runtime +// into an OS type attribute with the corresponding value defined by the semantic +// conventions. In case the provided OS name isn't mapped, it's transformed to lowercase +// and used as the value for the returned OS type attribute. +func mapRuntimeOSToSemconvOSType(osType string) attribute.KeyValue { + // the elements in this map are the intersection between + // available GOOS values and defined semconv OS types + osTypeAttributeMap := map[string]attribute.KeyValue{ + "darwin": semconv.OSTypeDarwin, + "dragonfly": semconv.OSTypeDragonflyBSD, + "freebsd": semconv.OSTypeFreeBSD, + "linux": semconv.OSTypeLinux, + "netbsd": semconv.OSTypeNetBSD, + "openbsd": semconv.OSTypeOpenBSD, + "solaris": semconv.OSTypeSolaris, + "windows": semconv.OSTypeWindows, + } + + var osTypeAttribute attribute.KeyValue + + if attr, ok := osTypeAttributeMap[osType]; ok { + osTypeAttribute = attr + } else { + osTypeAttribute = semconv.OSTypeKey.String(strings.ToLower(osType)) + } + + return osTypeAttribute +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go new file mode 100644 index 00000000..24ec8579 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go @@ -0,0 +1,102 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "encoding/xml" + "fmt" + "io" + "os" +) + +type plist struct { + XMLName xml.Name `xml:"plist"` + Dict dict `xml:"dict"` +} + +type dict struct { + Key []string `xml:"key"` + String []string `xml:"string"` +} + +// osRelease builds a string describing the operating system release based on the +// contents of the property list (.plist) system files. If no .plist files are found, +// or if the required properties to build the release description string are missing, +// an empty string is returned instead. The generated string resembles the output of +// the `sw_vers` commandline program, but in a single-line string. For more information +// about the `sw_vers` program, see: https://www.unix.com/man-page/osx/1/SW_VERS. +func osRelease() string { + file, err := getPlistFile() + if err != nil { + return "" + } + + defer file.Close() + + values, err := parsePlistFile(file) + if err != nil { + return "" + } + + return buildOSRelease(values) +} + +// getPlistFile returns a *os.File pointing to one of the well-known .plist files +// available on macOS. If no file can be opened, it returns an error. +func getPlistFile() (*os.File, error) { + return getFirstAvailableFile([]string{ + "/System/Library/CoreServices/SystemVersion.plist", + "/System/Library/CoreServices/ServerVersion.plist", + }) +} + +// parsePlistFile process the file pointed by `file` as a .plist file and returns +// a map with the key-values for each pair of correlated and elements +// contained in it. +func parsePlistFile(file io.Reader) (map[string]string, error) { + var v plist + + err := xml.NewDecoder(file).Decode(&v) + if err != nil { + return nil, err + } + + if len(v.Dict.Key) != len(v.Dict.String) { + return nil, fmt.Errorf("the number of and elements doesn't match") + } + + properties := make(map[string]string, len(v.Dict.Key)) + for i, key := range v.Dict.Key { + properties[key] = v.Dict.String[i] + } + + return properties, nil +} + +// buildOSRelease builds a string describing the OS release based on the properties +// available on the provided map. It tries to find the `ProductName`, `ProductVersion` +// and `ProductBuildVersion` properties. If some of these properties are not found, +// it returns an empty string. +func buildOSRelease(properties map[string]string) string { + productName := properties["ProductName"] + productVersion := properties["ProductVersion"] + productBuildVersion := properties["ProductBuildVersion"] + + if productName == "" || productVersion == "" || productBuildVersion == "" { + return "" + } + + return fmt.Sprintf("%s %s (%s)", productName, productVersion, productBuildVersion) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go new file mode 100644 index 00000000..fba6790e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go @@ -0,0 +1,154 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix dragonfly freebsd linux netbsd openbsd solaris zos + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" +) + +// osRelease builds a string describing the operating system release based on the +// properties of the os-release file. If no os-release file is found, or if the +// required properties to build the release description string are missing, an empty +// string is returned instead. For more information about os-release files, see: +// https://www.freedesktop.org/software/systemd/man/os-release.html +func osRelease() string { + file, err := getOSReleaseFile() + if err != nil { + return "" + } + + defer file.Close() + + values := parseOSReleaseFile(file) + + return buildOSRelease(values) +} + +// getOSReleaseFile returns a *os.File pointing to one of the well-known os-release +// files, according to their order of preference. If no file can be opened, it +// returns an error. +func getOSReleaseFile() (*os.File, error) { + return getFirstAvailableFile([]string{"/etc/os-release", "/usr/lib/os-release"}) +} + +// parseOSReleaseFile process the file pointed by `file` as an os-release file and +// returns a map with the key-values contained in it. Empty lines or lines starting +// with a '#' character are ignored, as well as lines with the missing key=value +// separator. Values are unquoted and unescaped. +func parseOSReleaseFile(file io.Reader) map[string]string { + values := make(map[string]string) + scanner := bufio.NewScanner(file) + + for scanner.Scan() { + line := scanner.Text() + + if skip(line) { + continue + } + + key, value, ok := parse(line) + if ok { + values[key] = value + } + } + + return values +} + +// skip returns true if the line is blank or starts with a '#' character, and +// therefore should be skipped from processing. +func skip(line string) bool { + line = strings.TrimSpace(line) + + return len(line) == 0 || strings.HasPrefix(line, "#") +} + +// parse attempts to split the provided line on the first '=' character, and then +// sanitize each side of the split before returning them as a key-value pair. +func parse(line string) (string, string, bool) { + parts := strings.SplitN(line, "=", 2) + + if len(parts) != 2 || len(parts[0]) == 0 { + return "", "", false + } + + key := strings.TrimSpace(parts[0]) + value := unescape(unquote(strings.TrimSpace(parts[1]))) + + return key, value, true +} + +// unquote checks whether the string `s` is quoted with double or single quotes +// and, if so, returns a version of the string without them. Otherwise it returns +// the provided string unchanged. +func unquote(s string) string { + if len(s) < 2 { + return s + } + + if (s[0] == '"' || s[0] == '\'') && s[0] == s[len(s)-1] { + return s[1 : len(s)-1] + } + + return s +} + +// unescape removes the `\` prefix from some characters that are expected +// to have it added in front of them for escaping purposes. +func unescape(s string) string { + return strings.NewReplacer( + `\$`, `$`, + `\"`, `"`, + `\'`, `'`, + `\\`, `\`, + "\\`", "`", + ).Replace(s) +} + +// buildOSRelease builds a string describing the OS release based on the properties +// available on the provided map. It favors a combination of the `NAME` and `VERSION` +// properties as first option (falling back to `VERSION_ID` if `VERSION` isn't +// found), and using `PRETTY_NAME` alone if some of the previous are not present. If +// none of these properties are found, it returns an empty string. +// +// The rationale behind not using `PRETTY_NAME` as first choice was that, for some +// Linux distributions, it doesn't include the same detail that can be found on the +// individual `NAME` and `VERSION` properties, and combining `PRETTY_NAME` with +// other properties can produce "pretty" redundant strings in some cases. +func buildOSRelease(values map[string]string) string { + var osRelease string + + name := values["NAME"] + version := values["VERSION"] + + if version == "" { + version = values["VERSION_ID"] + } + + if name != "" && version != "" { + osRelease = fmt.Sprintf("%s %s", name, version) + } else { + osRelease = values["PRETTY_NAME"] + } + + return osRelease +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go new file mode 100644 index 00000000..1c84afc1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go @@ -0,0 +1,90 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "fmt" + "os" + + "golang.org/x/sys/unix" +) + +type unameProvider func(buf *unix.Utsname) (err error) + +var defaultUnameProvider unameProvider = unix.Uname + +var currentUnameProvider = defaultUnameProvider + +func setDefaultUnameProvider() { + setUnameProvider(defaultUnameProvider) +} + +func setUnameProvider(unameProvider unameProvider) { + currentUnameProvider = unameProvider +} + +// platformOSDescription returns a human readable OS version information string. +// The final string combines OS release information (where available) and the +// result of the `uname` system call. +func platformOSDescription() (string, error) { + uname, err := uname() + if err != nil { + return "", err + } + + osRelease := osRelease() + if osRelease != "" { + return fmt.Sprintf("%s (%s)", osRelease, uname), nil + } + + return uname, nil +} + +// uname issues a uname(2) system call (or equivalent on systems which doesn't +// have one) and formats the output in a single string, similar to the output +// of the `uname` commandline program. The final string resembles the one +// obtained with a call to `uname -snrvm`. +func uname() (string, error) { + var utsName unix.Utsname + + err := currentUnameProvider(&utsName) + if err != nil { + return "", err + } + + return fmt.Sprintf("%s %s %s %s %s", + unix.ByteSliceToString(utsName.Sysname[:]), + unix.ByteSliceToString(utsName.Nodename[:]), + unix.ByteSliceToString(utsName.Release[:]), + unix.ByteSliceToString(utsName.Version[:]), + unix.ByteSliceToString(utsName.Machine[:]), + ), nil +} + +// getFirstAvailableFile returns an *os.File of the first available +// file from a list of candidate file paths. +func getFirstAvailableFile(candidates []string) (*os.File, error) { + for _, c := range candidates { + file, err := os.Open(c) + if err == nil { + return file, nil + } + } + + return nil, fmt.Errorf("no candidate file available: %v", candidates) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go new file mode 100644 index 00000000..3ebcb534 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go @@ -0,0 +1,34 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !aix +// +build !darwin +// +build !dragonfly +// +build !freebsd +// +build !linux +// +build !netbsd +// +build !openbsd +// +build !solaris +// +build !windows +// +build !zos + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +// platformOSDescription is a placeholder implementation for OSes +// for which this project currently doesn't support os.description +// attribute detection. See build tags declaration early on this file +// for a list of unsupported OSes. +func platformOSDescription() (string, error) { + return "", nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go new file mode 100644 index 00000000..faad64d8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go @@ -0,0 +1,101 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "fmt" + "strconv" + + "golang.org/x/sys/windows/registry" +) + +// platformOSDescription returns a human readable OS version information string. +// It does so by querying registry values under the +// `SOFTWARE\Microsoft\Windows NT\CurrentVersion` key. The final string +// resembles the one displayed by the Version Reporter Applet (winver.exe). +func platformOSDescription() (string, error) { + k, err := registry.OpenKey( + registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) + + if err != nil { + return "", err + } + + defer k.Close() + + var ( + productName = readProductName(k) + displayVersion = readDisplayVersion(k) + releaseID = readReleaseID(k) + currentMajorVersionNumber = readCurrentMajorVersionNumber(k) + currentMinorVersionNumber = readCurrentMinorVersionNumber(k) + currentBuildNumber = readCurrentBuildNumber(k) + ubr = readUBR(k) + ) + + if displayVersion != "" { + displayVersion += " " + } + + return fmt.Sprintf("%s %s(%s) [Version %s.%s.%s.%s]", + productName, + displayVersion, + releaseID, + currentMajorVersionNumber, + currentMinorVersionNumber, + currentBuildNumber, + ubr, + ), nil +} + +func getStringValue(name string, k registry.Key) string { + value, _, _ := k.GetStringValue(name) + + return value +} + +func getIntegerValue(name string, k registry.Key) uint64 { + value, _, _ := k.GetIntegerValue(name) + + return value +} + +func readProductName(k registry.Key) string { + return getStringValue("ProductName", k) +} + +func readDisplayVersion(k registry.Key) string { + return getStringValue("DisplayVersion", k) +} + +func readReleaseID(k registry.Key) string { + return getStringValue("ReleaseID", k) +} + +func readCurrentMajorVersionNumber(k registry.Key) string { + return strconv.FormatUint(getIntegerValue("CurrentMajorVersionNumber", k), 10) +} + +func readCurrentMinorVersionNumber(k registry.Key) string { + return strconv.FormatUint(getIntegerValue("CurrentMinorVersionNumber", k), 10) +} + +func readCurrentBuildNumber(k registry.Key) string { + return getStringValue("CurrentBuildNumber", k) +} + +func readUBR(k registry.Key) string { + return strconv.FormatUint(getIntegerValue("UBR", k), 10) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go new file mode 100644 index 00000000..bdd0e7fe --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go @@ -0,0 +1,180 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "fmt" + "os" + "os/user" + "path/filepath" + "runtime" + + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" +) + +type pidProvider func() int +type executablePathProvider func() (string, error) +type commandArgsProvider func() []string +type ownerProvider func() (*user.User, error) +type runtimeNameProvider func() string +type runtimeVersionProvider func() string +type runtimeOSProvider func() string +type runtimeArchProvider func() string + +var ( + defaultPidProvider pidProvider = os.Getpid + defaultExecutablePathProvider executablePathProvider = os.Executable + defaultCommandArgsProvider commandArgsProvider = func() []string { return os.Args } + defaultOwnerProvider ownerProvider = user.Current + defaultRuntimeNameProvider runtimeNameProvider = func() string { + if runtime.Compiler == "gc" { + return "go" + } + return runtime.Compiler + } + defaultRuntimeVersionProvider runtimeVersionProvider = runtime.Version + defaultRuntimeOSProvider runtimeOSProvider = func() string { return runtime.GOOS } + defaultRuntimeArchProvider runtimeArchProvider = func() string { return runtime.GOARCH } +) + +var ( + pid = defaultPidProvider + executablePath = defaultExecutablePathProvider + commandArgs = defaultCommandArgsProvider + owner = defaultOwnerProvider + runtimeName = defaultRuntimeNameProvider + runtimeVersion = defaultRuntimeVersionProvider + runtimeOS = defaultRuntimeOSProvider + runtimeArch = defaultRuntimeArchProvider +) + +func setDefaultOSProviders() { + setOSProviders( + defaultPidProvider, + defaultExecutablePathProvider, + defaultCommandArgsProvider, + ) +} + +func setOSProviders( + pidProvider pidProvider, + executablePathProvider executablePathProvider, + commandArgsProvider commandArgsProvider, +) { + pid = pidProvider + executablePath = executablePathProvider + commandArgs = commandArgsProvider +} + +func setDefaultRuntimeProviders() { + setRuntimeProviders( + defaultRuntimeNameProvider, + defaultRuntimeVersionProvider, + defaultRuntimeOSProvider, + defaultRuntimeArchProvider, + ) +} + +func setRuntimeProviders( + runtimeNameProvider runtimeNameProvider, + runtimeVersionProvider runtimeVersionProvider, + runtimeOSProvider runtimeOSProvider, + runtimeArchProvider runtimeArchProvider, +) { + runtimeName = runtimeNameProvider + runtimeVersion = runtimeVersionProvider + runtimeOS = runtimeOSProvider + runtimeArch = runtimeArchProvider +} + +func setDefaultUserProviders() { + setUserProviders(defaultOwnerProvider) +} + +func setUserProviders(ownerProvider ownerProvider) { + owner = ownerProvider +} + +type processPIDDetector struct{} +type processExecutableNameDetector struct{} +type processExecutablePathDetector struct{} +type processCommandArgsDetector struct{} +type processOwnerDetector struct{} +type processRuntimeNameDetector struct{} +type processRuntimeVersionDetector struct{} +type processRuntimeDescriptionDetector struct{} + +// Detect returns a *Resource that describes the process identifier (PID) of the +// executing process. +func (processPIDDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPID(pid())), nil +} + +// Detect returns a *Resource that describes the name of the process executable. +func (processExecutableNameDetector) Detect(ctx context.Context) (*Resource, error) { + executableName := filepath.Base(commandArgs()[0]) + + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableName(executableName)), nil +} + +// Detect returns a *Resource that describes the full path of the process executable. +func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, error) { + executablePath, err := executablePath() + if err != nil { + return nil, err + } + + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutablePath(executablePath)), nil +} + +// Detect returns a *Resource that describes all the command arguments as received +// by the process. +func (processCommandArgsDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgs(commandArgs()...)), nil +} + +// Detect returns a *Resource that describes the username of the user that owns the +// process. +func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) { + owner, err := owner() + if err != nil { + return nil, err + } + + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessOwner(owner.Username)), nil +} + +// Detect returns a *Resource that describes the name of the compiler used to compile +// this process image. +func (processRuntimeNameDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeName(runtimeName())), nil +} + +// Detect returns a *Resource that describes the version of the runtime of this process. +func (processRuntimeVersionDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersion(runtimeVersion())), nil +} + +// Detect returns a *Resource that describes the runtime of this process. +func (processRuntimeDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { + runtimeDescription := fmt.Sprintf( + "go version %s %s/%s", runtimeVersion(), runtimeOS(), runtimeArch()) + + return NewWithAttributes( + semconv.SchemaURL, + semconv.ProcessRuntimeDescription(runtimeDescription), + ), nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go new file mode 100644 index 00000000..c425ff05 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go @@ -0,0 +1,282 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "errors" + "fmt" + "sync" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" +) + +// Resource describes an entity about which identifying information +// and metadata is exposed. Resource is an immutable object, +// equivalent to a map from key to unique value. +// +// Resources should be passed and stored as pointers +// (`*resource.Resource`). The `nil` value is equivalent to an empty +// Resource. +type Resource struct { + attrs attribute.Set + schemaURL string +} + +var ( + emptyResource Resource + defaultResource *Resource + defaultResourceOnce sync.Once +) + +var errMergeConflictSchemaURL = errors.New("cannot merge resource due to conflicting Schema URL") + +// New returns a Resource combined from the user-provided detectors. +func New(ctx context.Context, opts ...Option) (*Resource, error) { + cfg := config{} + for _, opt := range opts { + cfg = opt.apply(cfg) + } + + resource, err := Detect(ctx, cfg.detectors...) + + var err2 error + resource, err2 = Merge(resource, &Resource{schemaURL: cfg.schemaURL}) + if err == nil { + err = err2 + } else if err2 != nil { + err = fmt.Errorf("detecting resources: %s", []string{err.Error(), err2.Error()}) + } + + return resource, err +} + +// NewWithAttributes creates a resource from attrs and associates the resource with a +// schema URL. If attrs contains duplicate keys, the last value will be used. If attrs +// contains any invalid items those items will be dropped. The attrs are assumed to be +// in a schema identified by schemaURL. +func NewWithAttributes(schemaURL string, attrs ...attribute.KeyValue) *Resource { + resource := NewSchemaless(attrs...) + resource.schemaURL = schemaURL + return resource +} + +// NewSchemaless creates a resource from attrs. If attrs contains duplicate keys, +// the last value will be used. If attrs contains any invalid items those items will +// be dropped. The resource will not be associated with a schema URL. If the schema +// of the attrs is known use NewWithAttributes instead. +func NewSchemaless(attrs ...attribute.KeyValue) *Resource { + if len(attrs) == 0 { + return &emptyResource + } + + // Ensure attributes comply with the specification: + // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.0.1/specification/common/common.md#attributes + s, _ := attribute.NewSetWithFiltered(attrs, func(kv attribute.KeyValue) bool { + return kv.Valid() + }) + + // If attrs only contains invalid entries do not allocate a new resource. + if s.Len() == 0 { + return &emptyResource + } + + return &Resource{attrs: s} //nolint +} + +// String implements the Stringer interface and provides a +// human-readable form of the resource. +// +// Avoid using this representation as the key in a map of resources, +// use Equivalent() as the key instead. +func (r *Resource) String() string { + if r == nil { + return "" + } + return r.attrs.Encoded(attribute.DefaultEncoder()) +} + +// MarshalLog is the marshaling function used by the logging system to represent this exporter. +func (r *Resource) MarshalLog() interface{} { + return struct { + Attributes attribute.Set + SchemaURL string + }{ + Attributes: r.attrs, + SchemaURL: r.schemaURL, + } +} + +// Attributes returns a copy of attributes from the resource in a sorted order. +// To avoid allocating a new slice, use an iterator. +func (r *Resource) Attributes() []attribute.KeyValue { + if r == nil { + r = Empty() + } + return r.attrs.ToSlice() +} + +// SchemaURL returns the schema URL associated with Resource r. +func (r *Resource) SchemaURL() string { + if r == nil { + return "" + } + return r.schemaURL +} + +// Iter returns an iterator of the Resource attributes. +// This is ideal to use if you do not want a copy of the attributes. +func (r *Resource) Iter() attribute.Iterator { + if r == nil { + r = Empty() + } + return r.attrs.Iter() +} + +// Equal returns true when a Resource is equivalent to this Resource. +func (r *Resource) Equal(eq *Resource) bool { + if r == nil { + r = Empty() + } + if eq == nil { + eq = Empty() + } + return r.Equivalent() == eq.Equivalent() +} + +// Merge creates a new resource by combining resource a and b. +// +// If there are common keys between resource a and b, then the value +// from resource b will overwrite the value from resource a, even +// if resource b's value is empty. +// +// The SchemaURL of the resources will be merged according to the spec rules: +// https://github.com/open-telemetry/opentelemetry-specification/blob/bad49c714a62da5493f2d1d9bafd7ebe8c8ce7eb/specification/resource/sdk.md#merge +// If the resources have different non-empty schemaURL an empty resource and an error +// will be returned. +func Merge(a, b *Resource) (*Resource, error) { + if a == nil && b == nil { + return Empty(), nil + } + if a == nil { + return b, nil + } + if b == nil { + return a, nil + } + + // Merge the schema URL. + var schemaURL string + switch true { + case a.schemaURL == "": + schemaURL = b.schemaURL + case b.schemaURL == "": + schemaURL = a.schemaURL + case a.schemaURL == b.schemaURL: + schemaURL = a.schemaURL + default: + return Empty(), errMergeConflictSchemaURL + } + + // Note: 'b' attributes will overwrite 'a' with last-value-wins in attribute.Key() + // Meaning this is equivalent to: append(a.Attributes(), b.Attributes()...) + mi := attribute.NewMergeIterator(b.Set(), a.Set()) + combine := make([]attribute.KeyValue, 0, a.Len()+b.Len()) + for mi.Next() { + combine = append(combine, mi.Attribute()) + } + merged := NewWithAttributes(schemaURL, combine...) + return merged, nil +} + +// Empty returns an instance of Resource with no attributes. It is +// equivalent to a `nil` Resource. +func Empty() *Resource { + return &emptyResource +} + +// Default returns an instance of Resource with a default +// "service.name" and OpenTelemetrySDK attributes. +func Default() *Resource { + defaultResourceOnce.Do(func() { + var err error + defaultResource, err = Detect( + context.Background(), + defaultServiceNameDetector{}, + fromEnv{}, + telemetrySDK{}, + ) + if err != nil { + otel.Handle(err) + } + // If Detect did not return a valid resource, fall back to emptyResource. + if defaultResource == nil { + defaultResource = &emptyResource + } + }) + return defaultResource +} + +// Environment returns an instance of Resource with attributes +// extracted from the OTEL_RESOURCE_ATTRIBUTES environment variable. +func Environment() *Resource { + detector := &fromEnv{} + resource, err := detector.Detect(context.Background()) + if err != nil { + otel.Handle(err) + } + return resource +} + +// Equivalent returns an object that can be compared for equality +// between two resources. This value is suitable for use as a key in +// a map. +func (r *Resource) Equivalent() attribute.Distinct { + return r.Set().Equivalent() +} + +// Set returns the equivalent *attribute.Set of this resource's attributes. +func (r *Resource) Set() *attribute.Set { + if r == nil { + r = Empty() + } + return &r.attrs +} + +// MarshalJSON encodes the resource attributes as a JSON list of { "Key": +// "...", "Value": ... } pairs in order sorted by key. +func (r *Resource) MarshalJSON() ([]byte, error) { + if r == nil { + r = Empty() + } + return r.attrs.MarshalJSON() +} + +// Len returns the number of unique key-values in this Resource. +func (r *Resource) Len() int { + if r == nil { + return 0 + } + return r.attrs.Len() +} + +// Encoded returns an encoded representation of the resource. +func (r *Resource) Encoded(enc attribute.Encoder) string { + if r == nil { + return "" + } + return r.attrs.Encoded(enc) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go new file mode 100644 index 00000000..a2d7db49 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -0,0 +1,432 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "runtime" + "sync" + "sync/atomic" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/internal/env" + "go.opentelemetry.io/otel/trace" +) + +// Defaults for BatchSpanProcessorOptions. +const ( + DefaultMaxQueueSize = 2048 + DefaultScheduleDelay = 5000 + DefaultExportTimeout = 30000 + DefaultMaxExportBatchSize = 512 +) + +// BatchSpanProcessorOption configures a BatchSpanProcessor. +type BatchSpanProcessorOption func(o *BatchSpanProcessorOptions) + +// BatchSpanProcessorOptions is configuration settings for a +// BatchSpanProcessor. +type BatchSpanProcessorOptions struct { + // MaxQueueSize is the maximum queue size to buffer spans for delayed processing. If the + // queue gets full it drops the spans. Use BlockOnQueueFull to change this behavior. + // The default value of MaxQueueSize is 2048. + MaxQueueSize int + + // BatchTimeout is the maximum duration for constructing a batch. Processor + // forcefully sends available spans when timeout is reached. + // The default value of BatchTimeout is 5000 msec. + BatchTimeout time.Duration + + // ExportTimeout specifies the maximum duration for exporting spans. If the timeout + // is reached, the export will be cancelled. + // The default value of ExportTimeout is 30000 msec. + ExportTimeout time.Duration + + // MaxExportBatchSize is the maximum number of spans to process in a single batch. + // If there are more than one batch worth of spans then it processes multiple batches + // of spans one batch after the other without any delay. + // The default value of MaxExportBatchSize is 512. + MaxExportBatchSize int + + // BlockOnQueueFull blocks onEnd() and onStart() method if the queue is full + // AND if BlockOnQueueFull is set to true. + // Blocking option should be used carefully as it can severely affect the performance of an + // application. + BlockOnQueueFull bool +} + +// batchSpanProcessor is a SpanProcessor that batches asynchronously-received +// spans and sends them to a trace.Exporter when complete. +type batchSpanProcessor struct { + e SpanExporter + o BatchSpanProcessorOptions + + queue chan ReadOnlySpan + dropped uint32 + + batch []ReadOnlySpan + batchMutex sync.Mutex + timer *time.Timer + stopWait sync.WaitGroup + stopOnce sync.Once + stopCh chan struct{} +} + +var _ SpanProcessor = (*batchSpanProcessor)(nil) + +// NewBatchSpanProcessor creates a new SpanProcessor that will send completed +// span batches to the exporter with the supplied options. +// +// If the exporter is nil, the span processor will preform no action. +func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorOption) SpanProcessor { + maxQueueSize := env.BatchSpanProcessorMaxQueueSize(DefaultMaxQueueSize) + maxExportBatchSize := env.BatchSpanProcessorMaxExportBatchSize(DefaultMaxExportBatchSize) + + if maxExportBatchSize > maxQueueSize { + if DefaultMaxExportBatchSize > maxQueueSize { + maxExportBatchSize = maxQueueSize + } else { + maxExportBatchSize = DefaultMaxExportBatchSize + } + } + + o := BatchSpanProcessorOptions{ + BatchTimeout: time.Duration(env.BatchSpanProcessorScheduleDelay(DefaultScheduleDelay)) * time.Millisecond, + ExportTimeout: time.Duration(env.BatchSpanProcessorExportTimeout(DefaultExportTimeout)) * time.Millisecond, + MaxQueueSize: maxQueueSize, + MaxExportBatchSize: maxExportBatchSize, + } + for _, opt := range options { + opt(&o) + } + bsp := &batchSpanProcessor{ + e: exporter, + o: o, + batch: make([]ReadOnlySpan, 0, o.MaxExportBatchSize), + timer: time.NewTimer(o.BatchTimeout), + queue: make(chan ReadOnlySpan, o.MaxQueueSize), + stopCh: make(chan struct{}), + } + + bsp.stopWait.Add(1) + go func() { + defer bsp.stopWait.Done() + bsp.processQueue() + bsp.drainQueue() + }() + + return bsp +} + +// OnStart method does nothing. +func (bsp *batchSpanProcessor) OnStart(parent context.Context, s ReadWriteSpan) {} + +// OnEnd method enqueues a ReadOnlySpan for later processing. +func (bsp *batchSpanProcessor) OnEnd(s ReadOnlySpan) { + // Do not enqueue spans if we are just going to drop them. + if bsp.e == nil { + return + } + bsp.enqueue(s) +} + +// Shutdown flushes the queue and waits until all spans are processed. +// It only executes once. Subsequent call does nothing. +func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error { + var err error + bsp.stopOnce.Do(func() { + wait := make(chan struct{}) + go func() { + close(bsp.stopCh) + bsp.stopWait.Wait() + if bsp.e != nil { + if err := bsp.e.Shutdown(ctx); err != nil { + otel.Handle(err) + } + } + close(wait) + }() + // Wait until the wait group is done or the context is cancelled + select { + case <-wait: + case <-ctx.Done(): + err = ctx.Err() + } + }) + return err +} + +type forceFlushSpan struct { + ReadOnlySpan + flushed chan struct{} +} + +func (f forceFlushSpan) SpanContext() trace.SpanContext { + return trace.NewSpanContext(trace.SpanContextConfig{TraceFlags: trace.FlagsSampled}) +} + +// ForceFlush exports all ended spans that have not yet been exported. +func (bsp *batchSpanProcessor) ForceFlush(ctx context.Context) error { + var err error + if bsp.e != nil { + flushCh := make(chan struct{}) + if bsp.enqueueBlockOnQueueFull(ctx, forceFlushSpan{flushed: flushCh}) { + select { + case <-flushCh: + // Processed any items in queue prior to ForceFlush being called + case <-ctx.Done(): + return ctx.Err() + } + } + + wait := make(chan error) + go func() { + wait <- bsp.exportSpans(ctx) + close(wait) + }() + // Wait until the export is finished or the context is cancelled/timed out + select { + case err = <-wait: + case <-ctx.Done(): + err = ctx.Err() + } + } + return err +} + +// WithMaxQueueSize returns a BatchSpanProcessorOption that configures the +// maximum queue size allowed for a BatchSpanProcessor. +func WithMaxQueueSize(size int) BatchSpanProcessorOption { + return func(o *BatchSpanProcessorOptions) { + o.MaxQueueSize = size + } +} + +// WithMaxExportBatchSize returns a BatchSpanProcessorOption that configures +// the maximum export batch size allowed for a BatchSpanProcessor. +func WithMaxExportBatchSize(size int) BatchSpanProcessorOption { + return func(o *BatchSpanProcessorOptions) { + o.MaxExportBatchSize = size + } +} + +// WithBatchTimeout returns a BatchSpanProcessorOption that configures the +// maximum delay allowed for a BatchSpanProcessor before it will export any +// held span (whether the queue is full or not). +func WithBatchTimeout(delay time.Duration) BatchSpanProcessorOption { + return func(o *BatchSpanProcessorOptions) { + o.BatchTimeout = delay + } +} + +// WithExportTimeout returns a BatchSpanProcessorOption that configures the +// amount of time a BatchSpanProcessor waits for an exporter to export before +// abandoning the export. +func WithExportTimeout(timeout time.Duration) BatchSpanProcessorOption { + return func(o *BatchSpanProcessorOptions) { + o.ExportTimeout = timeout + } +} + +// WithBlocking returns a BatchSpanProcessorOption that configures a +// BatchSpanProcessor to wait for enqueue operations to succeed instead of +// dropping data when the queue is full. +func WithBlocking() BatchSpanProcessorOption { + return func(o *BatchSpanProcessorOptions) { + o.BlockOnQueueFull = true + } +} + +// exportSpans is a subroutine of processing and draining the queue. +func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { + bsp.timer.Reset(bsp.o.BatchTimeout) + + bsp.batchMutex.Lock() + defer bsp.batchMutex.Unlock() + + if bsp.o.ExportTimeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, bsp.o.ExportTimeout) + defer cancel() + } + + if l := len(bsp.batch); l > 0 { + global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped)) + err := bsp.e.ExportSpans(ctx, bsp.batch) + + // A new batch is always created after exporting, even if the batch failed to be exported. + // + // It is up to the exporter to implement any type of retry logic if a batch is failing + // to be exported, since it is specific to the protocol and backend being sent to. + bsp.batch = bsp.batch[:0] + + if err != nil { + return err + } + } + return nil +} + +// processQueue removes spans from the `queue` channel until processor +// is shut down. It calls the exporter in batches of up to MaxExportBatchSize +// waiting up to BatchTimeout to form a batch. +func (bsp *batchSpanProcessor) processQueue() { + defer bsp.timer.Stop() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for { + select { + case <-bsp.stopCh: + return + case <-bsp.timer.C: + if err := bsp.exportSpans(ctx); err != nil { + otel.Handle(err) + } + case sd := <-bsp.queue: + if ffs, ok := sd.(forceFlushSpan); ok { + close(ffs.flushed) + continue + } + bsp.batchMutex.Lock() + bsp.batch = append(bsp.batch, sd) + shouldExport := len(bsp.batch) >= bsp.o.MaxExportBatchSize + bsp.batchMutex.Unlock() + if shouldExport { + if !bsp.timer.Stop() { + <-bsp.timer.C + } + if err := bsp.exportSpans(ctx); err != nil { + otel.Handle(err) + } + } + } + } +} + +// drainQueue awaits the any caller that had added to bsp.stopWait +// to finish the enqueue, then exports the final batch. +func (bsp *batchSpanProcessor) drainQueue() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for { + select { + case sd := <-bsp.queue: + if sd == nil { + if err := bsp.exportSpans(ctx); err != nil { + otel.Handle(err) + } + return + } + + bsp.batchMutex.Lock() + bsp.batch = append(bsp.batch, sd) + shouldExport := len(bsp.batch) == bsp.o.MaxExportBatchSize + bsp.batchMutex.Unlock() + + if shouldExport { + if err := bsp.exportSpans(ctx); err != nil { + otel.Handle(err) + } + } + default: + close(bsp.queue) + } + } +} + +func (bsp *batchSpanProcessor) enqueue(sd ReadOnlySpan) { + ctx := context.TODO() + if bsp.o.BlockOnQueueFull { + bsp.enqueueBlockOnQueueFull(ctx, sd) + } else { + bsp.enqueueDrop(ctx, sd) + } +} + +func recoverSendOnClosedChan() { + x := recover() + switch err := x.(type) { + case nil: + return + case runtime.Error: + if err.Error() == "send on closed channel" { + return + } + } + panic(x) +} + +func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd ReadOnlySpan) bool { + if !sd.SpanContext().IsSampled() { + return false + } + + // This ensures the bsp.queue<- below does not panic as the + // processor shuts down. + defer recoverSendOnClosedChan() + + select { + case <-bsp.stopCh: + return false + default: + } + + select { + case bsp.queue <- sd: + return true + case <-ctx.Done(): + return false + } +} + +func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan) bool { + if !sd.SpanContext().IsSampled() { + return false + } + + // This ensures the bsp.queue<- below does not panic as the + // processor shuts down. + defer recoverSendOnClosedChan() + + select { + case <-bsp.stopCh: + return false + default: + } + + select { + case bsp.queue <- sd: + return true + default: + atomic.AddUint32(&bsp.dropped, 1) + } + return false +} + +// MarshalLog is the marshaling function used by the logging system to represent this exporter. +func (bsp *batchSpanProcessor) MarshalLog() interface{} { + return struct { + Type string + SpanExporter SpanExporter + Config BatchSpanProcessorOptions + }{ + Type: "BatchSpanProcessor", + SpanExporter: bsp.e, + Config: bsp.o, + } +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go new file mode 100644 index 00000000..0285e99b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go @@ -0,0 +1,21 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package trace contains support for OpenTelemetry distributed tracing. + +The following assumes a basic familiarity with OpenTelemetry concepts. +See https://opentelemetry.io. +*/ +package trace // import "go.opentelemetry.io/otel/sdk/trace" diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/event.go b/vendor/go.opentelemetry.io/otel/sdk/trace/event.go new file mode 100644 index 00000000..1e3b4267 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/event.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// Event is a thing that happened during a Span's lifetime. +type Event struct { + // Name is the name of this event + Name string + + // Attributes describe the aspects of the event. + Attributes []attribute.KeyValue + + // DroppedAttributeCount is the number of attributes that were not + // recorded due to configured limits being reached. + DroppedAttributeCount int + + // Time at which this event was recorded. + Time time.Time +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go new file mode 100644 index 00000000..d1c86e59 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go @@ -0,0 +1,44 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +// evictedQueue is a FIFO queue with a configurable capacity. +type evictedQueue struct { + queue []interface{} + capacity int + droppedCount int +} + +func newEvictedQueue(capacity int) evictedQueue { + // Do not pre-allocate queue, do this lazily. + return evictedQueue{capacity: capacity} +} + +// add adds value to the evictedQueue eq. If eq is at capacity, the oldest +// queued value will be discarded and the drop count incremented. +func (eq *evictedQueue) add(value interface{}) { + if eq.capacity == 0 { + eq.droppedCount++ + return + } + + if eq.capacity > 0 && len(eq.queue) == eq.capacity { + // Drop first-in while avoiding allocating more capacity to eq.queue. + copy(eq.queue[:eq.capacity-1], eq.queue[1:]) + eq.queue = eq.queue[:eq.capacity-1] + eq.droppedCount++ + } + eq.queue = append(eq.queue, value) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go new file mode 100644 index 00000000..bba24604 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go @@ -0,0 +1,77 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + crand "crypto/rand" + "encoding/binary" + "math/rand" + "sync" + + "go.opentelemetry.io/otel/trace" +) + +// IDGenerator allows custom generators for TraceID and SpanID. +type IDGenerator interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // NewIDs returns a new trace and span ID. + NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // NewSpanID returns a ID for a new span in the trace with traceID. + NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +type randomIDGenerator struct { + sync.Mutex + randSource *rand.Rand +} + +var _ IDGenerator = &randomIDGenerator{} + +// NewSpanID returns a non-zero span ID from a randomly-chosen sequence. +func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID { + gen.Lock() + defer gen.Unlock() + sid := trace.SpanID{} + _, _ = gen.randSource.Read(sid[:]) + return sid +} + +// NewIDs returns a non-zero trace ID and a non-zero span ID from a +// randomly-chosen sequence. +func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) { + gen.Lock() + defer gen.Unlock() + tid := trace.TraceID{} + _, _ = gen.randSource.Read(tid[:]) + sid := trace.SpanID{} + _, _ = gen.randSource.Read(sid[:]) + return tid, sid +} + +func defaultIDGenerator() IDGenerator { + gen := &randomIDGenerator{} + var rngSeed int64 + _ = binary.Read(crand.Reader, binary.LittleEndian, &rngSeed) + gen.randSource = rand.New(rand.NewSource(rngSeed)) + return gen +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/link.go b/vendor/go.opentelemetry.io/otel/sdk/trace/link.go new file mode 100644 index 00000000..19cfea4b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/link.go @@ -0,0 +1,34 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// Link is the relationship between two Spans. The relationship can be within +// the same Trace or across different Traces. +type Link struct { + // SpanContext of the linked Span. + SpanContext trace.SpanContext + + // Attributes describe the aspects of the link. + Attributes []attribute.KeyValue + + // DroppedAttributeCount is the number of attributes that were not + // recorded due to configured limits being reached. + DroppedAttributeCount int +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go new file mode 100644 index 00000000..201c1781 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -0,0 +1,461 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/resource" + "go.opentelemetry.io/otel/trace" +) + +const ( + defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer" +) + +// tracerProviderConfig. +type tracerProviderConfig struct { + // processors contains collection of SpanProcessors that are processing pipeline + // for spans in the trace signal. + // SpanProcessors registered with a TracerProvider and are called at the start + // and end of a Span's lifecycle, and are called in the order they are + // registered. + processors []SpanProcessor + + // sampler is the default sampler used when creating new spans. + sampler Sampler + + // idGenerator is used to generate all Span and Trace IDs when needed. + idGenerator IDGenerator + + // spanLimits defines the attribute, event, and link limits for spans. + spanLimits SpanLimits + + // resource contains attributes representing an entity that produces telemetry. + resource *resource.Resource +} + +// MarshalLog is the marshaling function used by the logging system to represent this exporter. +func (cfg tracerProviderConfig) MarshalLog() interface{} { + return struct { + SpanProcessors []SpanProcessor + SamplerType string + IDGeneratorType string + SpanLimits SpanLimits + Resource *resource.Resource + }{ + SpanProcessors: cfg.processors, + SamplerType: fmt.Sprintf("%T", cfg.sampler), + IDGeneratorType: fmt.Sprintf("%T", cfg.idGenerator), + SpanLimits: cfg.spanLimits, + Resource: cfg.resource, + } +} + +// TracerProvider is an OpenTelemetry TracerProvider. It provides Tracers to +// instrumentation so it can trace operational flow through a system. +type TracerProvider struct { + mu sync.Mutex + namedTracer map[instrumentation.Scope]*tracer + spanProcessors atomic.Value + isShutdown bool + + // These fields are not protected by the lock mu. They are assumed to be + // immutable after creation of the TracerProvider. + sampler Sampler + idGenerator IDGenerator + spanLimits SpanLimits + resource *resource.Resource +} + +var _ trace.TracerProvider = &TracerProvider{} + +// NewTracerProvider returns a new and configured TracerProvider. +// +// By default the returned TracerProvider is configured with: +// - a ParentBased(AlwaysSample) Sampler +// - a random number IDGenerator +// - the resource.Default() Resource +// - the default SpanLimits. +// +// The passed opts are used to override these default values and configure the +// returned TracerProvider appropriately. +func NewTracerProvider(opts ...TracerProviderOption) *TracerProvider { + o := tracerProviderConfig{ + spanLimits: NewSpanLimits(), + } + o = applyTracerProviderEnvConfigs(o) + + for _, opt := range opts { + o = opt.apply(o) + } + + o = ensureValidTracerProviderConfig(o) + + tp := &TracerProvider{ + namedTracer: make(map[instrumentation.Scope]*tracer), + sampler: o.sampler, + idGenerator: o.idGenerator, + spanLimits: o.spanLimits, + resource: o.resource, + } + global.Info("TracerProvider created", "config", o) + + spss := spanProcessorStates{} + for _, sp := range o.processors { + spss = append(spss, newSpanProcessorState(sp)) + } + tp.spanProcessors.Store(spss) + + return tp +} + +// Tracer returns a Tracer with the given name and options. If a Tracer for +// the given name and options does not exist it is created, otherwise the +// existing Tracer is returned. +// +// If name is empty, DefaultTracerName is used instead. +// +// This method is safe to be called concurrently. +func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + c := trace.NewTracerConfig(opts...) + + p.mu.Lock() + defer p.mu.Unlock() + if name == "" { + name = defaultTracerName + } + is := instrumentation.Scope{ + Name: name, + Version: c.InstrumentationVersion(), + SchemaURL: c.SchemaURL(), + } + t, ok := p.namedTracer[is] + if !ok { + t = &tracer{ + provider: p, + instrumentationScope: is, + } + p.namedTracer[is] = t + global.Info("Tracer created", "name", name, "version", c.InstrumentationVersion(), "schemaURL", c.SchemaURL()) + } + return t +} + +// RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors. +func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) { + p.mu.Lock() + defer p.mu.Unlock() + if p.isShutdown { + return + } + newSPS := spanProcessorStates{} + newSPS = append(newSPS, p.spanProcessors.Load().(spanProcessorStates)...) + newSPS = append(newSPS, newSpanProcessorState(sp)) + p.spanProcessors.Store(newSPS) +} + +// UnregisterSpanProcessor removes the given SpanProcessor from the list of SpanProcessors. +func (p *TracerProvider) UnregisterSpanProcessor(sp SpanProcessor) { + p.mu.Lock() + defer p.mu.Unlock() + if p.isShutdown { + return + } + old := p.spanProcessors.Load().(spanProcessorStates) + if len(old) == 0 { + return + } + spss := spanProcessorStates{} + spss = append(spss, old...) + + // stop the span processor if it is started and remove it from the list + var stopOnce *spanProcessorState + var idx int + for i, sps := range spss { + if sps.sp == sp { + stopOnce = sps + idx = i + } + } + if stopOnce != nil { + stopOnce.state.Do(func() { + if err := sp.Shutdown(context.Background()); err != nil { + otel.Handle(err) + } + }) + } + if len(spss) > 1 { + copy(spss[idx:], spss[idx+1:]) + } + spss[len(spss)-1] = nil + spss = spss[:len(spss)-1] + + p.spanProcessors.Store(spss) +} + +// ForceFlush immediately exports all spans that have not yet been exported for +// all the registered span processors. +func (p *TracerProvider) ForceFlush(ctx context.Context) error { + spss := p.spanProcessors.Load().(spanProcessorStates) + if len(spss) == 0 { + return nil + } + + for _, sps := range spss { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + if err := sps.sp.ForceFlush(ctx); err != nil { + return err + } + } + return nil +} + +// Shutdown shuts down TracerProvider. All registered span processors are shut down +// in the order they were registered and any held computational resources are released. +func (p *TracerProvider) Shutdown(ctx context.Context) error { + spss := p.spanProcessors.Load().(spanProcessorStates) + if len(spss) == 0 { + return nil + } + + p.mu.Lock() + defer p.mu.Unlock() + p.isShutdown = true + + var retErr error + for _, sps := range spss { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + var err error + sps.state.Do(func() { + err = sps.sp.Shutdown(ctx) + }) + if err != nil { + if retErr == nil { + retErr = err + } else { + // Poor man's list of errors + retErr = fmt.Errorf("%v; %v", retErr, err) + } + } + } + p.spanProcessors.Store(spanProcessorStates{}) + return retErr +} + +// TracerProviderOption configures a TracerProvider. +type TracerProviderOption interface { + apply(tracerProviderConfig) tracerProviderConfig +} + +type traceProviderOptionFunc func(tracerProviderConfig) tracerProviderConfig + +func (fn traceProviderOptionFunc) apply(cfg tracerProviderConfig) tracerProviderConfig { + return fn(cfg) +} + +// WithSyncer registers the exporter with the TracerProvider using a +// SimpleSpanProcessor. +// +// This is not recommended for production use. The synchronous nature of the +// SimpleSpanProcessor that will wrap the exporter make it good for testing, +// debugging, or showing examples of other feature, but it will be slow and +// have a high computation resource usage overhead. The WithBatcher option is +// recommended for production use instead. +func WithSyncer(e SpanExporter) TracerProviderOption { + return WithSpanProcessor(NewSimpleSpanProcessor(e)) +} + +// WithBatcher registers the exporter with the TracerProvider using a +// BatchSpanProcessor configured with the passed opts. +func WithBatcher(e SpanExporter, opts ...BatchSpanProcessorOption) TracerProviderOption { + return WithSpanProcessor(NewBatchSpanProcessor(e, opts...)) +} + +// WithSpanProcessor registers the SpanProcessor with a TracerProvider. +func WithSpanProcessor(sp SpanProcessor) TracerProviderOption { + return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { + cfg.processors = append(cfg.processors, sp) + return cfg + }) +} + +// WithResource returns a TracerProviderOption that will configure the +// Resource r as a TracerProvider's Resource. The configured Resource is +// referenced by all the Tracers the TracerProvider creates. It represents the +// entity producing telemetry. +// +// If this option is not used, the TracerProvider will use the +// resource.Default() Resource by default. +func WithResource(r *resource.Resource) TracerProviderOption { + return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { + var err error + cfg.resource, err = resource.Merge(resource.Environment(), r) + if err != nil { + otel.Handle(err) + } + return cfg + }) +} + +// WithIDGenerator returns a TracerProviderOption that will configure the +// IDGenerator g as a TracerProvider's IDGenerator. The configured IDGenerator +// is used by the Tracers the TracerProvider creates to generate new Span and +// Trace IDs. +// +// If this option is not used, the TracerProvider will use a random number +// IDGenerator by default. +func WithIDGenerator(g IDGenerator) TracerProviderOption { + return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { + if g != nil { + cfg.idGenerator = g + } + return cfg + }) +} + +// WithSampler returns a TracerProviderOption that will configure the Sampler +// s as a TracerProvider's Sampler. The configured Sampler is used by the +// Tracers the TracerProvider creates to make their sampling decisions for the +// Spans they create. +// +// This option overrides the Sampler configured through the OTEL_TRACES_SAMPLER +// and OTEL_TRACES_SAMPLER_ARG environment variables. If this option is not used +// and the sampler is not configured through environment variables or the environment +// contains invalid/unsupported configuration, the TracerProvider will use a +// ParentBased(AlwaysSample) Sampler by default. +func WithSampler(s Sampler) TracerProviderOption { + return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { + if s != nil { + cfg.sampler = s + } + return cfg + }) +} + +// WithSpanLimits returns a TracerProviderOption that configures a +// TracerProvider to use the SpanLimits sl. These SpanLimits bound any Span +// created by a Tracer from the TracerProvider. +// +// If any field of sl is zero or negative it will be replaced with the default +// value for that field. +// +// If this or WithRawSpanLimits are not provided, the TracerProvider will use +// the limits defined by environment variables, or the defaults if unset. +// Refer to the NewSpanLimits documentation for information about this +// relationship. +// +// Deprecated: Use WithRawSpanLimits instead which allows setting unlimited +// and zero limits. This option will be kept until the next major version +// incremented release. +func WithSpanLimits(sl SpanLimits) TracerProviderOption { + if sl.AttributeValueLengthLimit <= 0 { + sl.AttributeValueLengthLimit = DefaultAttributeValueLengthLimit + } + if sl.AttributeCountLimit <= 0 { + sl.AttributeCountLimit = DefaultAttributeCountLimit + } + if sl.EventCountLimit <= 0 { + sl.EventCountLimit = DefaultEventCountLimit + } + if sl.AttributePerEventCountLimit <= 0 { + sl.AttributePerEventCountLimit = DefaultAttributePerEventCountLimit + } + if sl.LinkCountLimit <= 0 { + sl.LinkCountLimit = DefaultLinkCountLimit + } + if sl.AttributePerLinkCountLimit <= 0 { + sl.AttributePerLinkCountLimit = DefaultAttributePerLinkCountLimit + } + return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { + cfg.spanLimits = sl + return cfg + }) +} + +// WithRawSpanLimits returns a TracerProviderOption that configures a +// TracerProvider to use these limits. These limits bound any Span created by +// a Tracer from the TracerProvider. +// +// The limits will be used as-is. Zero or negative values will not be changed +// to the default value like WithSpanLimits does. Setting a limit to zero will +// effectively disable the related resource it limits and setting to a +// negative value will mean that resource is unlimited. Consequentially, this +// means that the zero-value SpanLimits will disable all span resources. +// Because of this, limits should be constructed using NewSpanLimits and +// updated accordingly. +// +// If this or WithSpanLimits are not provided, the TracerProvider will use the +// limits defined by environment variables, or the defaults if unset. Refer to +// the NewSpanLimits documentation for information about this relationship. +func WithRawSpanLimits(limits SpanLimits) TracerProviderOption { + return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { + cfg.spanLimits = limits + return cfg + }) +} + +func applyTracerProviderEnvConfigs(cfg tracerProviderConfig) tracerProviderConfig { + for _, opt := range tracerProviderOptionsFromEnv() { + cfg = opt.apply(cfg) + } + + return cfg +} + +func tracerProviderOptionsFromEnv() []TracerProviderOption { + var opts []TracerProviderOption + + sampler, err := samplerFromEnv() + if err != nil { + otel.Handle(err) + } + + if sampler != nil { + opts = append(opts, WithSampler(sampler)) + } + + return opts +} + +// ensureValidTracerProviderConfig ensures that given TracerProviderConfig is valid. +func ensureValidTracerProviderConfig(cfg tracerProviderConfig) tracerProviderConfig { + if cfg.sampler == nil { + cfg.sampler = ParentBased(AlwaysSample()) + } + if cfg.idGenerator == nil { + cfg.idGenerator = defaultIDGenerator() + } + if cfg.resource == nil { + cfg.resource = resource.Default() + } + return cfg +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go new file mode 100644 index 00000000..02053b31 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go @@ -0,0 +1,108 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "errors" + "fmt" + "os" + "strconv" + "strings" +) + +const ( + tracesSamplerKey = "OTEL_TRACES_SAMPLER" + tracesSamplerArgKey = "OTEL_TRACES_SAMPLER_ARG" + + samplerAlwaysOn = "always_on" + samplerAlwaysOff = "always_off" + samplerTraceIDRatio = "traceidratio" + samplerParentBasedAlwaysOn = "parentbased_always_on" + samplerParsedBasedAlwaysOff = "parentbased_always_off" + samplerParentBasedTraceIDRatio = "parentbased_traceidratio" +) + +type errUnsupportedSampler string + +func (e errUnsupportedSampler) Error() string { + return fmt.Sprintf("unsupported sampler: %s", string(e)) +} + +var ( + errNegativeTraceIDRatio = errors.New("invalid trace ID ratio: less than 0.0") + errGreaterThanOneTraceIDRatio = errors.New("invalid trace ID ratio: greater than 1.0") +) + +type samplerArgParseError struct { + parseErr error +} + +func (e samplerArgParseError) Error() string { + return fmt.Sprintf("parsing sampler argument: %s", e.parseErr.Error()) +} + +func (e samplerArgParseError) Unwrap() error { + return e.parseErr +} + +func samplerFromEnv() (Sampler, error) { + sampler, ok := os.LookupEnv(tracesSamplerKey) + if !ok { + return nil, nil + } + + sampler = strings.ToLower(strings.TrimSpace(sampler)) + samplerArg, hasSamplerArg := os.LookupEnv(tracesSamplerArgKey) + samplerArg = strings.TrimSpace(samplerArg) + + switch sampler { + case samplerAlwaysOn: + return AlwaysSample(), nil + case samplerAlwaysOff: + return NeverSample(), nil + case samplerTraceIDRatio: + if !hasSamplerArg { + return TraceIDRatioBased(1.0), nil + } + return parseTraceIDRatio(samplerArg) + case samplerParentBasedAlwaysOn: + return ParentBased(AlwaysSample()), nil + case samplerParsedBasedAlwaysOff: + return ParentBased(NeverSample()), nil + case samplerParentBasedTraceIDRatio: + if !hasSamplerArg { + return ParentBased(TraceIDRatioBased(1.0)), nil + } + ratio, err := parseTraceIDRatio(samplerArg) + return ParentBased(ratio), err + default: + return nil, errUnsupportedSampler(sampler) + } +} + +func parseTraceIDRatio(arg string) (Sampler, error) { + v, err := strconv.ParseFloat(arg, 64) + if err != nil { + return TraceIDRatioBased(1.0), samplerArgParseError{err} + } + if v < 0.0 { + return TraceIDRatioBased(1.0), errNegativeTraceIDRatio + } + if v > 1.0 { + return TraceIDRatioBased(1.0), errGreaterThanOneTraceIDRatio + } + + return TraceIDRatioBased(v), nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go new file mode 100644 index 00000000..5ee9715d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go @@ -0,0 +1,293 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "encoding/binary" + "fmt" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// Sampler decides whether a trace should be sampled and exported. +type Sampler interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // ShouldSample returns a SamplingResult based on a decision made from the + // passed parameters. + ShouldSample(parameters SamplingParameters) SamplingResult + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Description returns information describing the Sampler. + Description() string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// SamplingParameters contains the values passed to a Sampler. +type SamplingParameters struct { + ParentContext context.Context + TraceID trace.TraceID + Name string + Kind trace.SpanKind + Attributes []attribute.KeyValue + Links []trace.Link +} + +// SamplingDecision indicates whether a span is dropped, recorded and/or sampled. +type SamplingDecision uint8 + +// Valid sampling decisions. +const ( + // Drop will not record the span and all attributes/events will be dropped. + Drop SamplingDecision = iota + + // Record indicates the span's `IsRecording() == true`, but `Sampled` flag + // *must not* be set. + RecordOnly + + // RecordAndSample has span's `IsRecording() == true` and `Sampled` flag + // *must* be set. + RecordAndSample +) + +// SamplingResult conveys a SamplingDecision, set of Attributes and a Tracestate. +type SamplingResult struct { + Decision SamplingDecision + Attributes []attribute.KeyValue + Tracestate trace.TraceState +} + +type traceIDRatioSampler struct { + traceIDUpperBound uint64 + description string +} + +func (ts traceIDRatioSampler) ShouldSample(p SamplingParameters) SamplingResult { + psc := trace.SpanContextFromContext(p.ParentContext) + x := binary.BigEndian.Uint64(p.TraceID[8:16]) >> 1 + if x < ts.traceIDUpperBound { + return SamplingResult{ + Decision: RecordAndSample, + Tracestate: psc.TraceState(), + } + } + return SamplingResult{ + Decision: Drop, + Tracestate: psc.TraceState(), + } +} + +func (ts traceIDRatioSampler) Description() string { + return ts.description +} + +// TraceIDRatioBased samples a given fraction of traces. Fractions >= 1 will +// always sample. Fractions < 0 are treated as zero. To respect the +// parent trace's `SampledFlag`, the `TraceIDRatioBased` sampler should be used +// as a delegate of a `Parent` sampler. +// +//nolint:revive // revive complains about stutter of `trace.TraceIDRatioBased` +func TraceIDRatioBased(fraction float64) Sampler { + if fraction >= 1 { + return AlwaysSample() + } + + if fraction <= 0 { + fraction = 0 + } + + return &traceIDRatioSampler{ + traceIDUpperBound: uint64(fraction * (1 << 63)), + description: fmt.Sprintf("TraceIDRatioBased{%g}", fraction), + } +} + +type alwaysOnSampler struct{} + +func (as alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult { + return SamplingResult{ + Decision: RecordAndSample, + Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), + } +} + +func (as alwaysOnSampler) Description() string { + return "AlwaysOnSampler" +} + +// AlwaysSample returns a Sampler that samples every trace. +// Be careful about using this sampler in a production application with +// significant traffic: a new trace will be started and exported for every +// request. +func AlwaysSample() Sampler { + return alwaysOnSampler{} +} + +type alwaysOffSampler struct{} + +func (as alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult { + return SamplingResult{ + Decision: Drop, + Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), + } +} + +func (as alwaysOffSampler) Description() string { + return "AlwaysOffSampler" +} + +// NeverSample returns a Sampler that samples no traces. +func NeverSample() Sampler { + return alwaysOffSampler{} +} + +// ParentBased returns a composite sampler which behaves differently, +// based on the parent of the span. If the span has no parent, +// the root(Sampler) is used to make sampling decision. If the span has +// a parent, depending on whether the parent is remote and whether it +// is sampled, one of the following samplers will apply: +// - remoteParentSampled(Sampler) (default: AlwaysOn) +// - remoteParentNotSampled(Sampler) (default: AlwaysOff) +// - localParentSampled(Sampler) (default: AlwaysOn) +// - localParentNotSampled(Sampler) (default: AlwaysOff) +func ParentBased(root Sampler, samplers ...ParentBasedSamplerOption) Sampler { + return parentBased{ + root: root, + config: configureSamplersForParentBased(samplers), + } +} + +type parentBased struct { + root Sampler + config samplerConfig +} + +func configureSamplersForParentBased(samplers []ParentBasedSamplerOption) samplerConfig { + c := samplerConfig{ + remoteParentSampled: AlwaysSample(), + remoteParentNotSampled: NeverSample(), + localParentSampled: AlwaysSample(), + localParentNotSampled: NeverSample(), + } + + for _, so := range samplers { + c = so.apply(c) + } + + return c +} + +// samplerConfig is a group of options for parentBased sampler. +type samplerConfig struct { + remoteParentSampled, remoteParentNotSampled Sampler + localParentSampled, localParentNotSampled Sampler +} + +// ParentBasedSamplerOption configures the sampler for a particular sampling case. +type ParentBasedSamplerOption interface { + apply(samplerConfig) samplerConfig +} + +// WithRemoteParentSampled sets the sampler for the case of sampled remote parent. +func WithRemoteParentSampled(s Sampler) ParentBasedSamplerOption { + return remoteParentSampledOption{s} +} + +type remoteParentSampledOption struct { + s Sampler +} + +func (o remoteParentSampledOption) apply(config samplerConfig) samplerConfig { + config.remoteParentSampled = o.s + return config +} + +// WithRemoteParentNotSampled sets the sampler for the case of remote parent +// which is not sampled. +func WithRemoteParentNotSampled(s Sampler) ParentBasedSamplerOption { + return remoteParentNotSampledOption{s} +} + +type remoteParentNotSampledOption struct { + s Sampler +} + +func (o remoteParentNotSampledOption) apply(config samplerConfig) samplerConfig { + config.remoteParentNotSampled = o.s + return config +} + +// WithLocalParentSampled sets the sampler for the case of sampled local parent. +func WithLocalParentSampled(s Sampler) ParentBasedSamplerOption { + return localParentSampledOption{s} +} + +type localParentSampledOption struct { + s Sampler +} + +func (o localParentSampledOption) apply(config samplerConfig) samplerConfig { + config.localParentSampled = o.s + return config +} + +// WithLocalParentNotSampled sets the sampler for the case of local parent +// which is not sampled. +func WithLocalParentNotSampled(s Sampler) ParentBasedSamplerOption { + return localParentNotSampledOption{s} +} + +type localParentNotSampledOption struct { + s Sampler +} + +func (o localParentNotSampledOption) apply(config samplerConfig) samplerConfig { + config.localParentNotSampled = o.s + return config +} + +func (pb parentBased) ShouldSample(p SamplingParameters) SamplingResult { + psc := trace.SpanContextFromContext(p.ParentContext) + if psc.IsValid() { + if psc.IsRemote() { + if psc.IsSampled() { + return pb.config.remoteParentSampled.ShouldSample(p) + } + return pb.config.remoteParentNotSampled.ShouldSample(p) + } + + if psc.IsSampled() { + return pb.config.localParentSampled.ShouldSample(p) + } + return pb.config.localParentNotSampled.ShouldSample(p) + } + return pb.root.ShouldSample(p) +} + +func (pb parentBased) Description() string { + return fmt.Sprintf("ParentBased{root:%s,remoteParentSampled:%s,"+ + "remoteParentNotSampled:%s,localParentSampled:%s,localParentNotSampled:%s}", + pb.root.Description(), + pb.config.remoteParentSampled.Description(), + pb.config.remoteParentNotSampled.Description(), + pb.config.localParentSampled.Description(), + pb.config.localParentNotSampled.Description(), + ) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go new file mode 100644 index 00000000..e8530a95 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go @@ -0,0 +1,128 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel" +) + +// simpleSpanProcessor is a SpanProcessor that synchronously sends all +// completed Spans to a trace.Exporter immediately. +type simpleSpanProcessor struct { + exporterMu sync.RWMutex + exporter SpanExporter + stopOnce sync.Once +} + +var _ SpanProcessor = (*simpleSpanProcessor)(nil) + +// NewSimpleSpanProcessor returns a new SpanProcessor that will synchronously +// send completed spans to the exporter immediately. +// +// This SpanProcessor is not recommended for production use. The synchronous +// nature of this SpanProcessor make it good for testing, debugging, or +// showing examples of other feature, but it will be slow and have a high +// computation resource usage overhead. The BatchSpanProcessor is recommended +// for production use instead. +func NewSimpleSpanProcessor(exporter SpanExporter) SpanProcessor { + ssp := &simpleSpanProcessor{ + exporter: exporter, + } + return ssp +} + +// OnStart does nothing. +func (ssp *simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} + +// OnEnd immediately exports a ReadOnlySpan. +func (ssp *simpleSpanProcessor) OnEnd(s ReadOnlySpan) { + ssp.exporterMu.RLock() + defer ssp.exporterMu.RUnlock() + + if ssp.exporter != nil && s.SpanContext().TraceFlags().IsSampled() { + if err := ssp.exporter.ExportSpans(context.Background(), []ReadOnlySpan{s}); err != nil { + otel.Handle(err) + } + } +} + +// Shutdown shuts down the exporter this SimpleSpanProcessor exports to. +func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error { + var err error + ssp.stopOnce.Do(func() { + stopFunc := func(exp SpanExporter) (<-chan error, func()) { + done := make(chan error) + return done, func() { done <- exp.Shutdown(ctx) } + } + + // The exporter field of the simpleSpanProcessor needs to be zeroed to + // signal it is shut down, meaning all subsequent calls to OnEnd will + // be gracefully ignored. This needs to be done synchronously to avoid + // any race condition. + // + // A closure is used to keep reference to the exporter and then the + // field is zeroed. This ensures the simpleSpanProcessor is shut down + // before the exporter. This order is important as it avoids a + // potential deadlock. If the exporter shut down operation generates a + // span, that span would need to be exported. Meaning, OnEnd would be + // called and try acquiring the lock that is held here. + ssp.exporterMu.Lock() + done, shutdown := stopFunc(ssp.exporter) + ssp.exporter = nil + ssp.exporterMu.Unlock() + + go shutdown() + + // Wait for the exporter to shut down or the deadline to expire. + select { + case err = <-done: + case <-ctx.Done(): + // It is possible for the exporter to have immediately shut down + // and the context to be done simultaneously. In that case this + // outer select statement will randomly choose a case. This will + // result in a different returned error for similar scenarios. + // Instead, double check if the exporter shut down at the same + // time and return that error if so. This will ensure consistency + // as well as ensure the caller knows the exporter shut down + // successfully (they can already determine if the deadline is + // expired given they passed the context). + select { + case err = <-done: + default: + err = ctx.Err() + } + } + }) + return err +} + +// ForceFlush does nothing as there is no data to flush. +func (ssp *simpleSpanProcessor) ForceFlush(context.Context) error { + return nil +} + +// MarshalLog is the marshaling function used by the logging system to represent this Span Processor. +func (ssp *simpleSpanProcessor) MarshalLog() interface{} { + return struct { + Type string + Exporter SpanExporter + }{ + Type: "SimpleSpanProcessor", + Exporter: ssp.exporter, + } +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go new file mode 100644 index 00000000..0349b2f1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go @@ -0,0 +1,144 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/resource" + "go.opentelemetry.io/otel/trace" +) + +// snapshot is an record of a spans state at a particular checkpointed time. +// It is used as a read-only representation of that state. +type snapshot struct { + name string + spanContext trace.SpanContext + parent trace.SpanContext + spanKind trace.SpanKind + startTime time.Time + endTime time.Time + attributes []attribute.KeyValue + events []Event + links []Link + status Status + childSpanCount int + droppedAttributeCount int + droppedEventCount int + droppedLinkCount int + resource *resource.Resource + instrumentationScope instrumentation.Scope +} + +var _ ReadOnlySpan = snapshot{} + +func (s snapshot) private() {} + +// Name returns the name of the span. +func (s snapshot) Name() string { + return s.name +} + +// SpanContext returns the unique SpanContext that identifies the span. +func (s snapshot) SpanContext() trace.SpanContext { + return s.spanContext +} + +// Parent returns the unique SpanContext that identifies the parent of the +// span if one exists. If the span has no parent the returned SpanContext +// will be invalid. +func (s snapshot) Parent() trace.SpanContext { + return s.parent +} + +// SpanKind returns the role the span plays in a Trace. +func (s snapshot) SpanKind() trace.SpanKind { + return s.spanKind +} + +// StartTime returns the time the span started recording. +func (s snapshot) StartTime() time.Time { + return s.startTime +} + +// EndTime returns the time the span stopped recording. It will be zero if +// the span has not ended. +func (s snapshot) EndTime() time.Time { + return s.endTime +} + +// Attributes returns the defining attributes of the span. +func (s snapshot) Attributes() []attribute.KeyValue { + return s.attributes +} + +// Links returns all the links the span has to other spans. +func (s snapshot) Links() []Link { + return s.links +} + +// Events returns all the events that occurred within in the spans +// lifetime. +func (s snapshot) Events() []Event { + return s.events +} + +// Status returns the spans status. +func (s snapshot) Status() Status { + return s.status +} + +// InstrumentationScope returns information about the instrumentation +// scope that created the span. +func (s snapshot) InstrumentationScope() instrumentation.Scope { + return s.instrumentationScope +} + +// InstrumentationLibrary returns information about the instrumentation +// library that created the span. +func (s snapshot) InstrumentationLibrary() instrumentation.Library { + return s.instrumentationScope +} + +// Resource returns information about the entity that produced the span. +func (s snapshot) Resource() *resource.Resource { + return s.resource +} + +// DroppedAttributes returns the number of attributes dropped by the span +// due to limits being reached. +func (s snapshot) DroppedAttributes() int { + return s.droppedAttributeCount +} + +// DroppedLinks returns the number of links dropped by the span due to limits +// being reached. +func (s snapshot) DroppedLinks() int { + return s.droppedLinkCount +} + +// DroppedEvents returns the number of events dropped by the span due to +// limits being reached. +func (s snapshot) DroppedEvents() int { + return s.droppedEventCount +} + +// ChildSpanCount returns the count of spans that consider the span a +// direct parent. +func (s snapshot) ChildSpanCount() int { + return s.childSpanCount +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go new file mode 100644 index 00000000..9fb483a9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -0,0 +1,828 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "fmt" + "reflect" + "runtime" + rt "runtime/trace" + "strings" + "sync" + "time" + "unicode/utf8" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/internal" + "go.opentelemetry.io/otel/sdk/resource" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + "go.opentelemetry.io/otel/trace" +) + +// ReadOnlySpan allows reading information from the data structure underlying a +// trace.Span. It is used in places where reading information from a span is +// necessary but changing the span isn't necessary or allowed. +// +// Warning: methods may be added to this interface in minor releases. +type ReadOnlySpan interface { + // Name returns the name of the span. + Name() string + // SpanContext returns the unique SpanContext that identifies the span. + SpanContext() trace.SpanContext + // Parent returns the unique SpanContext that identifies the parent of the + // span if one exists. If the span has no parent the returned SpanContext + // will be invalid. + Parent() trace.SpanContext + // SpanKind returns the role the span plays in a Trace. + SpanKind() trace.SpanKind + // StartTime returns the time the span started recording. + StartTime() time.Time + // EndTime returns the time the span stopped recording. It will be zero if + // the span has not ended. + EndTime() time.Time + // Attributes returns the defining attributes of the span. + // The order of the returned attributes is not guaranteed to be stable across invocations. + Attributes() []attribute.KeyValue + // Links returns all the links the span has to other spans. + Links() []Link + // Events returns all the events that occurred within in the spans + // lifetime. + Events() []Event + // Status returns the spans status. + Status() Status + // InstrumentationScope returns information about the instrumentation + // scope that created the span. + InstrumentationScope() instrumentation.Scope + // InstrumentationLibrary returns information about the instrumentation + // library that created the span. + // Deprecated: please use InstrumentationScope instead. + InstrumentationLibrary() instrumentation.Library + // Resource returns information about the entity that produced the span. + Resource() *resource.Resource + // DroppedAttributes returns the number of attributes dropped by the span + // due to limits being reached. + DroppedAttributes() int + // DroppedLinks returns the number of links dropped by the span due to + // limits being reached. + DroppedLinks() int + // DroppedEvents returns the number of events dropped by the span due to + // limits being reached. + DroppedEvents() int + // ChildSpanCount returns the count of spans that consider the span a + // direct parent. + ChildSpanCount() int + + // A private method to prevent users implementing the + // interface and so future additions to it will not + // violate compatibility. + private() +} + +// ReadWriteSpan exposes the same methods as trace.Span and in addition allows +// reading information from the underlying data structure. +// This interface exposes the union of the methods of trace.Span (which is a +// "write-only" span) and ReadOnlySpan. New methods for writing or reading span +// information should be added under trace.Span or ReadOnlySpan, respectively. +// +// Warning: methods may be added to this interface in minor releases. +type ReadWriteSpan interface { + trace.Span + ReadOnlySpan +} + +// recordingSpan is an implementation of the OpenTelemetry Span API +// representing the individual component of a trace that is sampled. +type recordingSpan struct { + // mu protects the contents of this span. + mu sync.Mutex + + // parent holds the parent span of this span as a trace.SpanContext. + parent trace.SpanContext + + // spanKind represents the kind of this span as a trace.SpanKind. + spanKind trace.SpanKind + + // name is the name of this span. + name string + + // startTime is the time at which this span was started. + startTime time.Time + + // endTime is the time at which this span was ended. It contains the zero + // value of time.Time until the span is ended. + endTime time.Time + + // status is the status of this span. + status Status + + // childSpanCount holds the number of child spans created for this span. + childSpanCount int + + // spanContext holds the SpanContext of this span. + spanContext trace.SpanContext + + // attributes is a collection of user provided key/values. The collection + // is constrained by a configurable maximum held by the parent + // TracerProvider. When additional attributes are added after this maximum + // is reached these attributes the user is attempting to add are dropped. + // This dropped number of attributes is tracked and reported in the + // ReadOnlySpan exported when the span ends. + attributes []attribute.KeyValue + droppedAttributes int + + // events are stored in FIFO queue capped by configured limit. + events evictedQueue + + // links are stored in FIFO queue capped by configured limit. + links evictedQueue + + // executionTracerTaskEnd ends the execution tracer span. + executionTracerTaskEnd func() + + // tracer is the SDK tracer that created this span. + tracer *tracer +} + +var _ ReadWriteSpan = (*recordingSpan)(nil) +var _ runtimeTracer = (*recordingSpan)(nil) + +// SpanContext returns the SpanContext of this span. +func (s *recordingSpan) SpanContext() trace.SpanContext { + if s == nil { + return trace.SpanContext{} + } + return s.spanContext +} + +// IsRecording returns if this span is being recorded. If this span has ended +// this will return false. +func (s *recordingSpan) IsRecording() bool { + if s == nil { + return false + } + s.mu.Lock() + defer s.mu.Unlock() + + return s.endTime.IsZero() +} + +// SetStatus sets the status of the Span in the form of a code and a +// description, overriding previous values set. The description is only +// included in the set status when the code is for an error. If this span is +// not being recorded than this method does nothing. +func (s *recordingSpan) SetStatus(code codes.Code, description string) { + if !s.IsRecording() { + return + } + s.mu.Lock() + defer s.mu.Unlock() + if s.status.Code > code { + return + } + + status := Status{Code: code} + if code == codes.Error { + status.Description = description + } + + s.status = status +} + +// SetAttributes sets attributes of this span. +// +// If a key from attributes already exists the value associated with that key +// will be overwritten with the value contained in attributes. +// +// If this span is not being recorded than this method does nothing. +// +// If adding attributes to the span would exceed the maximum amount of +// attributes the span is configured to have, the last added attributes will +// be dropped. +func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) { + if !s.IsRecording() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + limit := s.tracer.provider.spanLimits.AttributeCountLimit + if limit == 0 { + // No attributes allowed. + s.droppedAttributes += len(attributes) + return + } + + // If adding these attributes could exceed the capacity of s perform a + // de-duplication and truncation while adding to avoid over allocation. + if limit > 0 && len(s.attributes)+len(attributes) > limit { + s.addOverCapAttrs(limit, attributes) + return + } + + // Otherwise, add without deduplication. When attributes are read they + // will be deduplicated, optimizing the operation. + for _, a := range attributes { + if !a.Valid() { + // Drop all invalid attributes. + s.droppedAttributes++ + continue + } + a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a) + s.attributes = append(s.attributes, a) + } +} + +// addOverCapAttrs adds the attributes attrs to the span s while +// de-duplicating the attributes of s and attrs and dropping attributes that +// exceed the limit. +// +// This method assumes s.mu.Lock is held by the caller. +// +// This method should only be called when there is a possibility that adding +// attrs to s will exceed the limit. Otherwise, attrs should be added to s +// without checking for duplicates and all retrieval methods of the attributes +// for s will de-duplicate as needed. +// +// This method assumes limit is a value > 0. The argument should be validated +// by the caller. +func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) { + // In order to not allocate more capacity to s.attributes than needed, + // prune and truncate this addition of attributes while adding. + + // Do not set a capacity when creating this map. Benchmark testing has + // showed this to only add unused memory allocations in general use. + exists := make(map[attribute.Key]int) + s.dedupeAttrsFromRecord(&exists) + + // Now that s.attributes is deduplicated, adding unique attributes up to + // the capacity of s will not over allocate s.attributes. + for _, a := range attrs { + if !a.Valid() { + // Drop all invalid attributes. + s.droppedAttributes++ + continue + } + + if idx, ok := exists[a.Key]; ok { + // Perform all updates before dropping, even when at capacity. + s.attributes[idx] = a + continue + } + + if len(s.attributes) >= limit { + // Do not just drop all of the remaining attributes, make sure + // updates are checked and performed. + s.droppedAttributes++ + } else { + a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a) + s.attributes = append(s.attributes, a) + exists[a.Key] = len(s.attributes) - 1 + } + } +} + +// truncateAttr returns a truncated version of attr. Only string and string +// slice attribute values are truncated. String values are truncated to at +// most a length of limit. Each string slice value is truncated in this fashion +// (the slice length itself is unaffected). +// +// No truncation is perfromed for a negative limit. +func truncateAttr(limit int, attr attribute.KeyValue) attribute.KeyValue { + if limit < 0 { + return attr + } + switch attr.Value.Type() { + case attribute.STRING: + if v := attr.Value.AsString(); len(v) > limit { + return attr.Key.String(safeTruncate(v, limit)) + } + case attribute.STRINGSLICE: + v := attr.Value.AsStringSlice() + for i := range v { + if len(v[i]) > limit { + v[i] = safeTruncate(v[i], limit) + } + } + return attr.Key.StringSlice(v) + } + return attr +} + +// safeTruncate truncates the string and guarantees valid UTF-8 is returned. +func safeTruncate(input string, limit int) string { + if trunc, ok := safeTruncateValidUTF8(input, limit); ok { + return trunc + } + trunc, _ := safeTruncateValidUTF8(strings.ToValidUTF8(input, ""), limit) + return trunc +} + +// safeTruncateValidUTF8 returns a copy of the input string safely truncated to +// limit. The truncation is ensured to occur at the bounds of complete UTF-8 +// characters. If invalid encoding of UTF-8 is encountered, input is returned +// with false, otherwise, the truncated input will be returned with true. +func safeTruncateValidUTF8(input string, limit int) (string, bool) { + for cnt := 0; cnt <= limit; { + r, size := utf8.DecodeRuneInString(input[cnt:]) + if r == utf8.RuneError { + return input, false + } + + if cnt+size > limit { + return input[:cnt], true + } + cnt += size + } + return input, true +} + +// End ends the span. This method does nothing if the span is already ended or +// is not being recorded. +// +// The only SpanOption currently supported is WithTimestamp which will set the +// end time for a Span's life-cycle. +// +// If this method is called while panicking an error event is added to the +// Span before ending it and the panic is continued. +func (s *recordingSpan) End(options ...trace.SpanEndOption) { + // Do not start by checking if the span is being recorded which requires + // acquiring a lock. Make a minimal check that the span is not nil. + if s == nil { + return + } + + // Store the end time as soon as possible to avoid artificially increasing + // the span's duration in case some operation below takes a while. + et := internal.MonotonicEndTime(s.startTime) + + // Do relative expensive check now that we have an end time and see if we + // need to do any more processing. + if !s.IsRecording() { + return + } + + config := trace.NewSpanEndConfig(options...) + if recovered := recover(); recovered != nil { + // Record but don't stop the panic. + defer panic(recovered) + opts := []trace.EventOption{ + trace.WithAttributes( + semconv.ExceptionType(typeStr(recovered)), + semconv.ExceptionMessage(fmt.Sprint(recovered)), + ), + } + + if config.StackTrace() { + opts = append(opts, trace.WithAttributes( + semconv.ExceptionStacktrace(recordStackTrace()), + )) + } + + s.addEvent(semconv.ExceptionEventName, opts...) + } + + if s.executionTracerTaskEnd != nil { + s.executionTracerTaskEnd() + } + + s.mu.Lock() + // Setting endTime to non-zero marks the span as ended and not recording. + if config.Timestamp().IsZero() { + s.endTime = et + } else { + s.endTime = config.Timestamp() + } + s.mu.Unlock() + + sps := s.tracer.provider.spanProcessors.Load().(spanProcessorStates) + if len(sps) == 0 { + return + } + snap := s.snapshot() + for _, sp := range sps { + sp.sp.OnEnd(snap) + } +} + +// RecordError will record err as a span event for this span. An additional call to +// SetStatus is required if the Status of the Span should be set to Error, this method +// does not change the Span status. If this span is not being recorded or err is nil +// than this method does nothing. +func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) { + if s == nil || err == nil || !s.IsRecording() { + return + } + + opts = append(opts, trace.WithAttributes( + semconv.ExceptionType(typeStr(err)), + semconv.ExceptionMessage(err.Error()), + )) + + c := trace.NewEventConfig(opts...) + if c.StackTrace() { + opts = append(opts, trace.WithAttributes( + semconv.ExceptionStacktrace(recordStackTrace()), + )) + } + + s.addEvent(semconv.ExceptionEventName, opts...) +} + +func typeStr(i interface{}) string { + t := reflect.TypeOf(i) + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + return t.String() + } + return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) +} + +func recordStackTrace() string { + stackTrace := make([]byte, 2048) + n := runtime.Stack(stackTrace, false) + + return string(stackTrace[0:n]) +} + +// AddEvent adds an event with the provided name and options. If this span is +// not being recorded than this method does nothing. +func (s *recordingSpan) AddEvent(name string, o ...trace.EventOption) { + if !s.IsRecording() { + return + } + s.addEvent(name, o...) +} + +func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) { + c := trace.NewEventConfig(o...) + e := Event{Name: name, Attributes: c.Attributes(), Time: c.Timestamp()} + + // Discard attributes over limit. + limit := s.tracer.provider.spanLimits.AttributePerEventCountLimit + if limit == 0 { + // Drop all attributes. + e.DroppedAttributeCount = len(e.Attributes) + e.Attributes = nil + } else if limit > 0 && len(e.Attributes) > limit { + // Drop over capacity. + e.DroppedAttributeCount = len(e.Attributes) - limit + e.Attributes = e.Attributes[:limit] + } + + s.mu.Lock() + s.events.add(e) + s.mu.Unlock() +} + +// SetName sets the name of this span. If this span is not being recorded than +// this method does nothing. +func (s *recordingSpan) SetName(name string) { + if !s.IsRecording() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + s.name = name +} + +// Name returns the name of this span. +func (s *recordingSpan) Name() string { + s.mu.Lock() + defer s.mu.Unlock() + return s.name +} + +// Name returns the SpanContext of this span's parent span. +func (s *recordingSpan) Parent() trace.SpanContext { + s.mu.Lock() + defer s.mu.Unlock() + return s.parent +} + +// SpanKind returns the SpanKind of this span. +func (s *recordingSpan) SpanKind() trace.SpanKind { + s.mu.Lock() + defer s.mu.Unlock() + return s.spanKind +} + +// StartTime returns the time this span started. +func (s *recordingSpan) StartTime() time.Time { + s.mu.Lock() + defer s.mu.Unlock() + return s.startTime +} + +// EndTime returns the time this span ended. For spans that have not yet +// ended, the returned value will be the zero value of time.Time. +func (s *recordingSpan) EndTime() time.Time { + s.mu.Lock() + defer s.mu.Unlock() + return s.endTime +} + +// Attributes returns the attributes of this span. +// +// The order of the returned attributes is not guaranteed to be stable. +func (s *recordingSpan) Attributes() []attribute.KeyValue { + s.mu.Lock() + defer s.mu.Unlock() + s.dedupeAttrs() + return s.attributes +} + +// dedupeAttrs deduplicates the attributes of s to fit capacity. +// +// This method assumes s.mu.Lock is held by the caller. +func (s *recordingSpan) dedupeAttrs() { + // Do not set a capacity when creating this map. Benchmark testing has + // showed this to only add unused memory allocations in general use. + exists := make(map[attribute.Key]int) + s.dedupeAttrsFromRecord(&exists) +} + +// dedupeAttrsFromRecord deduplicates the attributes of s to fit capacity +// using record as the record of unique attribute keys to their index. +// +// This method assumes s.mu.Lock is held by the caller. +func (s *recordingSpan) dedupeAttrsFromRecord(record *map[attribute.Key]int) { + // Use the fact that slices share the same backing array. + unique := s.attributes[:0] + for _, a := range s.attributes { + if idx, ok := (*record)[a.Key]; ok { + unique[idx] = a + } else { + unique = append(unique, a) + (*record)[a.Key] = len(unique) - 1 + } + } + // s.attributes have element types of attribute.KeyValue. These types are + // not pointers and they themselves do not contain pointer fields, + // therefore the duplicate values do not need to be zeroed for them to be + // garbage collected. + s.attributes = unique +} + +// Links returns the links of this span. +func (s *recordingSpan) Links() []Link { + s.mu.Lock() + defer s.mu.Unlock() + if len(s.links.queue) == 0 { + return []Link{} + } + return s.interfaceArrayToLinksArray() +} + +// Events returns the events of this span. +func (s *recordingSpan) Events() []Event { + s.mu.Lock() + defer s.mu.Unlock() + if len(s.events.queue) == 0 { + return []Event{} + } + return s.interfaceArrayToEventArray() +} + +// Status returns the status of this span. +func (s *recordingSpan) Status() Status { + s.mu.Lock() + defer s.mu.Unlock() + return s.status +} + +// InstrumentationScope returns the instrumentation.Scope associated with +// the Tracer that created this span. +func (s *recordingSpan) InstrumentationScope() instrumentation.Scope { + s.mu.Lock() + defer s.mu.Unlock() + return s.tracer.instrumentationScope +} + +// InstrumentationLibrary returns the instrumentation.Library associated with +// the Tracer that created this span. +func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library { + s.mu.Lock() + defer s.mu.Unlock() + return s.tracer.instrumentationScope +} + +// Resource returns the Resource associated with the Tracer that created this +// span. +func (s *recordingSpan) Resource() *resource.Resource { + s.mu.Lock() + defer s.mu.Unlock() + return s.tracer.provider.resource +} + +func (s *recordingSpan) addLink(link trace.Link) { + if !s.IsRecording() || !link.SpanContext.IsValid() { + return + } + + l := Link{SpanContext: link.SpanContext, Attributes: link.Attributes} + + // Discard attributes over limit. + limit := s.tracer.provider.spanLimits.AttributePerLinkCountLimit + if limit == 0 { + // Drop all attributes. + l.DroppedAttributeCount = len(l.Attributes) + l.Attributes = nil + } else if limit > 0 && len(l.Attributes) > limit { + l.DroppedAttributeCount = len(l.Attributes) - limit + l.Attributes = l.Attributes[:limit] + } + + s.mu.Lock() + s.links.add(l) + s.mu.Unlock() +} + +// DroppedAttributes returns the number of attributes dropped by the span +// due to limits being reached. +func (s *recordingSpan) DroppedAttributes() int { + s.mu.Lock() + defer s.mu.Unlock() + return s.droppedAttributes +} + +// DroppedLinks returns the number of links dropped by the span due to limits +// being reached. +func (s *recordingSpan) DroppedLinks() int { + s.mu.Lock() + defer s.mu.Unlock() + return s.links.droppedCount +} + +// DroppedEvents returns the number of events dropped by the span due to +// limits being reached. +func (s *recordingSpan) DroppedEvents() int { + s.mu.Lock() + defer s.mu.Unlock() + return s.events.droppedCount +} + +// ChildSpanCount returns the count of spans that consider the span a +// direct parent. +func (s *recordingSpan) ChildSpanCount() int { + s.mu.Lock() + defer s.mu.Unlock() + return s.childSpanCount +} + +// TracerProvider returns a trace.TracerProvider that can be used to generate +// additional Spans on the same telemetry pipeline as the current Span. +func (s *recordingSpan) TracerProvider() trace.TracerProvider { + return s.tracer.provider +} + +// snapshot creates a read-only copy of the current state of the span. +func (s *recordingSpan) snapshot() ReadOnlySpan { + var sd snapshot + s.mu.Lock() + defer s.mu.Unlock() + + sd.endTime = s.endTime + sd.instrumentationScope = s.tracer.instrumentationScope + sd.name = s.name + sd.parent = s.parent + sd.resource = s.tracer.provider.resource + sd.spanContext = s.spanContext + sd.spanKind = s.spanKind + sd.startTime = s.startTime + sd.status = s.status + sd.childSpanCount = s.childSpanCount + + if len(s.attributes) > 0 { + s.dedupeAttrs() + sd.attributes = s.attributes + } + sd.droppedAttributeCount = s.droppedAttributes + if len(s.events.queue) > 0 { + sd.events = s.interfaceArrayToEventArray() + sd.droppedEventCount = s.events.droppedCount + } + if len(s.links.queue) > 0 { + sd.links = s.interfaceArrayToLinksArray() + sd.droppedLinkCount = s.links.droppedCount + } + return &sd +} + +func (s *recordingSpan) interfaceArrayToLinksArray() []Link { + linkArr := make([]Link, 0) + for _, value := range s.links.queue { + linkArr = append(linkArr, value.(Link)) + } + return linkArr +} + +func (s *recordingSpan) interfaceArrayToEventArray() []Event { + eventArr := make([]Event, 0) + for _, value := range s.events.queue { + eventArr = append(eventArr, value.(Event)) + } + return eventArr +} + +func (s *recordingSpan) addChild() { + if !s.IsRecording() { + return + } + s.mu.Lock() + s.childSpanCount++ + s.mu.Unlock() +} + +func (*recordingSpan) private() {} + +// runtimeTrace starts a "runtime/trace".Task for the span and returns a +// context containing the task. +func (s *recordingSpan) runtimeTrace(ctx context.Context) context.Context { + if !rt.IsEnabled() { + // Avoid additional overhead if runtime/trace is not enabled. + return ctx + } + nctx, task := rt.NewTask(ctx, s.name) + + s.mu.Lock() + s.executionTracerTaskEnd = task.End + s.mu.Unlock() + + return nctx +} + +// nonRecordingSpan is a minimal implementation of the OpenTelemetry Span API +// that wraps a SpanContext. It performs no operations other than to return +// the wrapped SpanContext or TracerProvider that created it. +type nonRecordingSpan struct { + // tracer is the SDK tracer that created this span. + tracer *tracer + sc trace.SpanContext +} + +var _ trace.Span = nonRecordingSpan{} + +// SpanContext returns the wrapped SpanContext. +func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc } + +// IsRecording always returns false. +func (nonRecordingSpan) IsRecording() bool { return false } + +// SetStatus does nothing. +func (nonRecordingSpan) SetStatus(codes.Code, string) {} + +// SetError does nothing. +func (nonRecordingSpan) SetError(bool) {} + +// SetAttributes does nothing. +func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (nonRecordingSpan) End(...trace.SpanEndOption) {} + +// RecordError does nothing. +func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {} + +// AddEvent does nothing. +func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {} + +// SetName does nothing. +func (nonRecordingSpan) SetName(string) {} + +// TracerProvider returns the trace.TracerProvider that provided the Tracer +// that created this span. +func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider } + +func isRecording(s SamplingResult) bool { + return s.Decision == RecordOnly || s.Decision == RecordAndSample +} + +func isSampled(s SamplingResult) bool { + return s.Decision == RecordAndSample +} + +// Status is the classified state of a Span. +type Status struct { + // Code is an identifier of a Spans state classification. + Code codes.Code + // Description is a user hint about why that status was set. It is only + // applicable when Code is Error. + Description string +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go new file mode 100644 index 00000000..9fb3d6ea --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import "context" + +// SpanExporter handles the delivery of spans to external receivers. This is +// the final component in the trace export pipeline. +type SpanExporter interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // ExportSpans exports a batch of spans. + // + // This function is called synchronously, so there is no concurrency + // safety requirement. However, due to the synchronous calling pattern, + // it is critical that all timeouts and cancellations contained in the + // passed context must be honored. + // + // Any retry logic must be contained in this function. The SDK that + // calls this function will not implement any retry logic. All errors + // returned by this function are considered unrecoverable and will be + // reported to a configured error Handler. + ExportSpans(ctx context.Context, spans []ReadOnlySpan) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Shutdown notifies the exporter of a pending halt to operations. The + // exporter is expected to preform any cleanup or synchronization it + // requires while honoring all timeouts and cancellations contained in + // the passed context. + Shutdown(ctx context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go new file mode 100644 index 00000000..aa4d4221 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go @@ -0,0 +1,125 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import "go.opentelemetry.io/otel/sdk/internal/env" + +const ( + // DefaultAttributeValueLengthLimit is the default maximum allowed + // attribute value length, unlimited. + DefaultAttributeValueLengthLimit = -1 + + // DefaultAttributeCountLimit is the default maximum number of attributes + // a span can have. + DefaultAttributeCountLimit = 128 + + // DefaultEventCountLimit is the default maximum number of events a span + // can have. + DefaultEventCountLimit = 128 + + // DefaultLinkCountLimit is the default maximum number of links a span can + // have. + DefaultLinkCountLimit = 128 + + // DefaultAttributePerEventCountLimit is the default maximum number of + // attributes a span event can have. + DefaultAttributePerEventCountLimit = 128 + + // DefaultAttributePerLinkCountLimit is the default maximum number of + // attributes a span link can have. + DefaultAttributePerLinkCountLimit = 128 +) + +// SpanLimits represents the limits of a span. +type SpanLimits struct { + // AttributeValueLengthLimit is the maximum allowed attribute value length. + // + // This limit only applies to string and string slice attribute values. + // Any string longer than this value will be truncated to this length. + // + // Setting this to a negative value means no limit is applied. + AttributeValueLengthLimit int + + // AttributeCountLimit is the maximum allowed span attribute count. Any + // attribute added to a span once this limit is reached will be dropped. + // + // Setting this to zero means no attributes will be recorded. + // + // Setting this to a negative value means no limit is applied. + AttributeCountLimit int + + // EventCountLimit is the maximum allowed span event count. Any event + // added to a span once this limit is reached means it will be added but + // the oldest event will be dropped. + // + // Setting this to zero means no events we be recorded. + // + // Setting this to a negative value means no limit is applied. + EventCountLimit int + + // LinkCountLimit is the maximum allowed span link count. Any link added + // to a span once this limit is reached means it will be added but the + // oldest link will be dropped. + // + // Setting this to zero means no links we be recorded. + // + // Setting this to a negative value means no limit is applied. + LinkCountLimit int + + // AttributePerEventCountLimit is the maximum number of attributes allowed + // per span event. Any attribute added after this limit reached will be + // dropped. + // + // Setting this to zero means no attributes will be recorded for events. + // + // Setting this to a negative value means no limit is applied. + AttributePerEventCountLimit int + + // AttributePerLinkCountLimit is the maximum number of attributes allowed + // per span link. Any attribute added after this limit reached will be + // dropped. + // + // Setting this to zero means no attributes will be recorded for links. + // + // Setting this to a negative value means no limit is applied. + AttributePerLinkCountLimit int +} + +// NewSpanLimits returns a SpanLimits with all limits set to the value their +// corresponding environment variable holds, or the default if unset. +// +// • AttributeValueLengthLimit: OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT +// (default: unlimited) +// +// • AttributeCountLimit: OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT (default: 128) +// +// • EventCountLimit: OTEL_SPAN_EVENT_COUNT_LIMIT (default: 128) +// +// • AttributePerEventCountLimit: OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT (default: +// 128) +// +// • LinkCountLimit: OTEL_SPAN_LINK_COUNT_LIMIT (default: 128) +// +// • AttributePerLinkCountLimit: OTEL_LINK_ATTRIBUTE_COUNT_LIMIT (default: 128) +func NewSpanLimits() SpanLimits { + return SpanLimits{ + AttributeValueLengthLimit: env.SpanAttributeValueLength(DefaultAttributeValueLengthLimit), + AttributeCountLimit: env.SpanAttributeCount(DefaultAttributeCountLimit), + EventCountLimit: env.SpanEventCount(DefaultEventCountLimit), + LinkCountLimit: env.SpanLinkCount(DefaultLinkCountLimit), + AttributePerEventCountLimit: env.SpanEventAttributeCount(DefaultAttributePerEventCountLimit), + AttributePerLinkCountLimit: env.SpanLinkAttributeCount(DefaultAttributePerLinkCountLimit), + } +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go new file mode 100644 index 00000000..e6ae1935 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go @@ -0,0 +1,72 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "sync" +) + +// SpanProcessor is a processing pipeline for spans in the trace signal. +// SpanProcessors registered with a TracerProvider and are called at the start +// and end of a Span's lifecycle, and are called in the order they are +// registered. +type SpanProcessor interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // OnStart is called when a span is started. It is called synchronously + // and should not block. + OnStart(parent context.Context, s ReadWriteSpan) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // OnEnd is called when span is finished. It is called synchronously and + // hence not block. + OnEnd(s ReadOnlySpan) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Shutdown is called when the SDK shuts down. Any cleanup or release of + // resources held by the processor should be done in this call. + // + // Calls to OnStart, OnEnd, or ForceFlush after this has been called + // should be ignored. + // + // All timeouts and cancellations contained in ctx must be honored, this + // should not block indefinitely. + Shutdown(ctx context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // ForceFlush exports all ended spans to the configured Exporter that have not yet + // been exported. It should only be called when absolutely necessary, such as when + // using a FaaS provider that may suspend the process after an invocation, but before + // the Processor can export the completed spans. + ForceFlush(ctx context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +type spanProcessorState struct { + sp SpanProcessor + state *sync.Once +} + +func newSpanProcessorState(sp SpanProcessor) *spanProcessorState { + return &spanProcessorState{sp: sp, state: &sync.Once{}} +} + +type spanProcessorStates []*spanProcessorState diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go new file mode 100644 index 00000000..f17d924b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go @@ -0,0 +1,161 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/trace" +) + +type tracer struct { + provider *TracerProvider + instrumentationScope instrumentation.Scope +} + +var _ trace.Tracer = &tracer{} + +// Start starts a Span and returns it along with a context containing it. +// +// The Span is created with the provided name and as a child of any existing +// span context found in the passed context. The created Span will be +// configured appropriately by any SpanOption passed. +func (tr *tracer) Start(ctx context.Context, name string, options ...trace.SpanStartOption) (context.Context, trace.Span) { + config := trace.NewSpanStartConfig(options...) + + if ctx == nil { + // Prevent trace.ContextWithSpan from panicking. + ctx = context.Background() + } + + // For local spans created by this SDK, track child span count. + if p := trace.SpanFromContext(ctx); p != nil { + if sdkSpan, ok := p.(*recordingSpan); ok { + sdkSpan.addChild() + } + } + + s := tr.newSpan(ctx, name, &config) + if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() { + sps := tr.provider.spanProcessors.Load().(spanProcessorStates) + for _, sp := range sps { + sp.sp.OnStart(ctx, rw) + } + } + if rtt, ok := s.(runtimeTracer); ok { + ctx = rtt.runtimeTrace(ctx) + } + + return trace.ContextWithSpan(ctx, s), s +} + +type runtimeTracer interface { + // runtimeTrace starts a "runtime/trace".Task for the span and + // returns a context containing the task. + runtimeTrace(ctx context.Context) context.Context +} + +// newSpan returns a new configured span. +func (tr *tracer) newSpan(ctx context.Context, name string, config *trace.SpanConfig) trace.Span { + // If told explicitly to make this a new root use a zero value SpanContext + // as a parent which contains an invalid trace ID and is not remote. + var psc trace.SpanContext + if config.NewRoot() { + ctx = trace.ContextWithSpanContext(ctx, psc) + } else { + psc = trace.SpanContextFromContext(ctx) + } + + // If there is a valid parent trace ID, use it to ensure the continuity of + // the trace. Always generate a new span ID so other components can rely + // on a unique span ID, even if the Span is non-recording. + var tid trace.TraceID + var sid trace.SpanID + if !psc.TraceID().IsValid() { + tid, sid = tr.provider.idGenerator.NewIDs(ctx) + } else { + tid = psc.TraceID() + sid = tr.provider.idGenerator.NewSpanID(ctx, tid) + } + + samplingResult := tr.provider.sampler.ShouldSample(SamplingParameters{ + ParentContext: ctx, + TraceID: tid, + Name: name, + Kind: config.SpanKind(), + Attributes: config.Attributes(), + Links: config.Links(), + }) + + scc := trace.SpanContextConfig{ + TraceID: tid, + SpanID: sid, + TraceState: samplingResult.Tracestate, + } + if isSampled(samplingResult) { + scc.TraceFlags = psc.TraceFlags() | trace.FlagsSampled + } else { + scc.TraceFlags = psc.TraceFlags() &^ trace.FlagsSampled + } + sc := trace.NewSpanContext(scc) + + if !isRecording(samplingResult) { + return tr.newNonRecordingSpan(sc) + } + return tr.newRecordingSpan(psc, sc, name, samplingResult, config) +} + +// newRecordingSpan returns a new configured recordingSpan. +func (tr *tracer) newRecordingSpan(psc, sc trace.SpanContext, name string, sr SamplingResult, config *trace.SpanConfig) *recordingSpan { + startTime := config.Timestamp() + if startTime.IsZero() { + startTime = time.Now() + } + + s := &recordingSpan{ + // Do not pre-allocate the attributes slice here! Doing so will + // allocate memory that is likely never going to be used, or if used, + // will be over-sized. The default Go compiler has been tested to + // dynamically allocate needed space very well. Benchmarking has shown + // it to be more performant than what we can predetermine here, + // especially for the common use case of few to no added + // attributes. + + parent: psc, + spanContext: sc, + spanKind: trace.ValidateSpanKind(config.SpanKind()), + name: name, + startTime: startTime, + events: newEvictedQueue(tr.provider.spanLimits.EventCountLimit), + links: newEvictedQueue(tr.provider.spanLimits.LinkCountLimit), + tracer: tr, + } + + for _, l := range config.Links() { + s.addLink(l) + } + + s.SetAttributes(sr.Attributes...) + s.SetAttributes(config.Attributes()...) + + return s +} + +// newNonRecordingSpan returns a new configured nonRecordingSpan. +func (tr *tracer) newNonRecordingSpan(sc trace.SpanContext) nonRecordingSpan { + return nonRecordingSpan{tracer: tr, sc: sc} +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go new file mode 100644 index 00000000..b580eede --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go @@ -0,0 +1,336 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opentelemetry.io/otel/semconv/internal" + +import ( + "fmt" + "net" + "net/http" + "strconv" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" +) + +// SemanticConventions are the semantic convention values defined for a +// version of the OpenTelemetry specification. +type SemanticConventions struct { + EnduserIDKey attribute.Key + HTTPClientIPKey attribute.Key + HTTPFlavorKey attribute.Key + HTTPHostKey attribute.Key + HTTPMethodKey attribute.Key + HTTPRequestContentLengthKey attribute.Key + HTTPRouteKey attribute.Key + HTTPSchemeHTTP attribute.KeyValue + HTTPSchemeHTTPS attribute.KeyValue + HTTPServerNameKey attribute.Key + HTTPStatusCodeKey attribute.Key + HTTPTargetKey attribute.Key + HTTPURLKey attribute.Key + HTTPUserAgentKey attribute.Key + NetHostIPKey attribute.Key + NetHostNameKey attribute.Key + NetHostPortKey attribute.Key + NetPeerIPKey attribute.Key + NetPeerNameKey attribute.Key + NetPeerPortKey attribute.Key + NetTransportIP attribute.KeyValue + NetTransportOther attribute.KeyValue + NetTransportTCP attribute.KeyValue + NetTransportUDP attribute.KeyValue + NetTransportUnix attribute.KeyValue +} + +// NetAttributesFromHTTPRequest generates attributes of the net +// namespace as specified by the OpenTelemetry specification for a +// span. The network parameter is a string that net.Dial function +// from standard library can understand. +func (sc *SemanticConventions) NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{} + + switch network { + case "tcp", "tcp4", "tcp6": + attrs = append(attrs, sc.NetTransportTCP) + case "udp", "udp4", "udp6": + attrs = append(attrs, sc.NetTransportUDP) + case "ip", "ip4", "ip6": + attrs = append(attrs, sc.NetTransportIP) + case "unix", "unixgram", "unixpacket": + attrs = append(attrs, sc.NetTransportUnix) + default: + attrs = append(attrs, sc.NetTransportOther) + } + + peerIP, peerName, peerPort := hostIPNamePort(request.RemoteAddr) + if peerIP != "" { + attrs = append(attrs, sc.NetPeerIPKey.String(peerIP)) + } + if peerName != "" { + attrs = append(attrs, sc.NetPeerNameKey.String(peerName)) + } + if peerPort != 0 { + attrs = append(attrs, sc.NetPeerPortKey.Int(peerPort)) + } + + hostIP, hostName, hostPort := "", "", 0 + for _, someHost := range []string{request.Host, request.Header.Get("Host"), request.URL.Host} { + hostIP, hostName, hostPort = hostIPNamePort(someHost) + if hostIP != "" || hostName != "" || hostPort != 0 { + break + } + } + if hostIP != "" { + attrs = append(attrs, sc.NetHostIPKey.String(hostIP)) + } + if hostName != "" { + attrs = append(attrs, sc.NetHostNameKey.String(hostName)) + } + if hostPort != 0 { + attrs = append(attrs, sc.NetHostPortKey.Int(hostPort)) + } + + return attrs +} + +// hostIPNamePort extracts the IP address, name and (optional) port from hostWithPort. +// It handles both IPv4 and IPv6 addresses. If the host portion is not recognized +// as a valid IPv4 or IPv6 address, the `ip` result will be empty and the +// host portion will instead be returned in `name`. +func hostIPNamePort(hostWithPort string) (ip string, name string, port int) { + var ( + hostPart, portPart string + parsedPort uint64 + err error + ) + if hostPart, portPart, err = net.SplitHostPort(hostWithPort); err != nil { + hostPart, portPart = hostWithPort, "" + } + if parsedIP := net.ParseIP(hostPart); parsedIP != nil { + ip = parsedIP.String() + } else { + name = hostPart + } + if parsedPort, err = strconv.ParseUint(portPart, 10, 16); err == nil { + port = int(parsedPort) + } + return +} + +// EndUserAttributesFromHTTPRequest generates attributes of the +// enduser namespace as specified by the OpenTelemetry specification +// for a span. +func (sc *SemanticConventions) EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + if username, _, ok := request.BasicAuth(); ok { + return []attribute.KeyValue{sc.EnduserIDKey.String(username)} + } + return nil +} + +// HTTPClientAttributesFromHTTPRequest generates attributes of the +// http namespace as specified by the OpenTelemetry specification for +// a span on the client side. +func (sc *SemanticConventions) HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{} + + // remove any username/password info that may be in the URL + // before adding it to the attributes + userinfo := request.URL.User + request.URL.User = nil + + attrs = append(attrs, sc.HTTPURLKey.String(request.URL.String())) + + // restore any username/password info that was removed + request.URL.User = userinfo + + return append(attrs, sc.httpCommonAttributesFromHTTPRequest(request)...) +} + +func (sc *SemanticConventions) httpCommonAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{} + if ua := request.UserAgent(); ua != "" { + attrs = append(attrs, sc.HTTPUserAgentKey.String(ua)) + } + if request.ContentLength > 0 { + attrs = append(attrs, sc.HTTPRequestContentLengthKey.Int64(request.ContentLength)) + } + + return append(attrs, sc.httpBasicAttributesFromHTTPRequest(request)...) +} + +func (sc *SemanticConventions) httpBasicAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + // as these attributes are used by HTTPServerMetricAttributesFromHTTPRequest, they should be low-cardinality + attrs := []attribute.KeyValue{} + + if request.TLS != nil { + attrs = append(attrs, sc.HTTPSchemeHTTPS) + } else { + attrs = append(attrs, sc.HTTPSchemeHTTP) + } + + if request.Host != "" { + attrs = append(attrs, sc.HTTPHostKey.String(request.Host)) + } else if request.URL != nil && request.URL.Host != "" { + attrs = append(attrs, sc.HTTPHostKey.String(request.URL.Host)) + } + + flavor := "" + if request.ProtoMajor == 1 { + flavor = fmt.Sprintf("1.%d", request.ProtoMinor) + } else if request.ProtoMajor == 2 { + flavor = "2" + } + if flavor != "" { + attrs = append(attrs, sc.HTTPFlavorKey.String(flavor)) + } + + if request.Method != "" { + attrs = append(attrs, sc.HTTPMethodKey.String(request.Method)) + } else { + attrs = append(attrs, sc.HTTPMethodKey.String(http.MethodGet)) + } + + return attrs +} + +// HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes +// to be used with server-side HTTP metrics. +func (sc *SemanticConventions) HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{} + if serverName != "" { + attrs = append(attrs, sc.HTTPServerNameKey.String(serverName)) + } + return append(attrs, sc.httpBasicAttributesFromHTTPRequest(request)...) +} + +// HTTPServerAttributesFromHTTPRequest generates attributes of the +// http namespace as specified by the OpenTelemetry specification for +// a span on the server side. Currently, only basic authentication is +// supported. +func (sc *SemanticConventions) HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{ + sc.HTTPTargetKey.String(request.RequestURI), + } + + if serverName != "" { + attrs = append(attrs, sc.HTTPServerNameKey.String(serverName)) + } + if route != "" { + attrs = append(attrs, sc.HTTPRouteKey.String(route)) + } + if values, ok := request.Header["X-Forwarded-For"]; ok && len(values) > 0 { + if addresses := strings.SplitN(values[0], ",", 2); len(addresses) > 0 { + attrs = append(attrs, sc.HTTPClientIPKey.String(addresses[0])) + } + } + + return append(attrs, sc.httpCommonAttributesFromHTTPRequest(request)...) +} + +// HTTPAttributesFromHTTPStatusCode generates attributes of the http +// namespace as specified by the OpenTelemetry specification for a +// span. +func (sc *SemanticConventions) HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue { + attrs := []attribute.KeyValue{ + sc.HTTPStatusCodeKey.Int(code), + } + return attrs +} + +type codeRange struct { + fromInclusive int + toInclusive int +} + +func (r codeRange) contains(code int) bool { + return r.fromInclusive <= code && code <= r.toInclusive +} + +var validRangesPerCategory = map[int][]codeRange{ + 1: { + {http.StatusContinue, http.StatusEarlyHints}, + }, + 2: { + {http.StatusOK, http.StatusAlreadyReported}, + {http.StatusIMUsed, http.StatusIMUsed}, + }, + 3: { + {http.StatusMultipleChoices, http.StatusUseProxy}, + {http.StatusTemporaryRedirect, http.StatusPermanentRedirect}, + }, + 4: { + {http.StatusBadRequest, http.StatusTeapot}, // yes, teapot is so useful… + {http.StatusMisdirectedRequest, http.StatusUpgradeRequired}, + {http.StatusPreconditionRequired, http.StatusTooManyRequests}, + {http.StatusRequestHeaderFieldsTooLarge, http.StatusRequestHeaderFieldsTooLarge}, + {http.StatusUnavailableForLegalReasons, http.StatusUnavailableForLegalReasons}, + }, + 5: { + {http.StatusInternalServerError, http.StatusLoopDetected}, + {http.StatusNotExtended, http.StatusNetworkAuthenticationRequired}, + }, +} + +// SpanStatusFromHTTPStatusCode generates a status code and a message +// as specified by the OpenTelemetry specification for a span. +func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) { + spanCode, valid := validateHTTPStatusCode(code) + if !valid { + return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code) + } + return spanCode, "" +} + +// SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message +// as specified by the OpenTelemetry specification for a span. +// Exclude 4xx for SERVER to set the appropriate status. +func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) { + spanCode, valid := validateHTTPStatusCode(code) + if !valid { + return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code) + } + category := code / 100 + if spanKind == trace.SpanKindServer && category == 4 { + return codes.Unset, "" + } + return spanCode, "" +} + +// validateHTTPStatusCode validates the HTTP status code and returns +// corresponding span status code. If the `code` is not a valid HTTP status +// code, returns span status Error and false. +func validateHTTPStatusCode(code int) (codes.Code, bool) { + category := code / 100 + ranges, ok := validRangesPerCategory[category] + if !ok { + return codes.Error, false + } + ok = false + for _, crange := range ranges { + ok = crange.contains(code) + if ok { + break + } + } + if !ok { + return codes.Error, false + } + if category > 0 && category < 4 { + return codes.Unset, true + } + return codes.Error, true +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go new file mode 100644 index 00000000..71a1f774 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the conventions +// as of the v1.17.0 version of the OpenTelemetry specification. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go new file mode 100644 index 00000000..679c40c4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go @@ -0,0 +1,199 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +import "go.opentelemetry.io/otel/attribute" + +// This semantic convention defines the attributes used to represent a feature +// flag evaluation as an event. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// RPC received/sent message. +const ( + // MessageTypeKey is the attribute Key conforming to the "message.type" + // semantic conventions. It represents the whether this is a received or + // sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessageTypeKey = attribute.Key("message.type") + + // MessageIDKey is the attribute Key conforming to the "message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Note: This way we guarantee that the values will be consistent between + // different implementations. + MessageIDKey = attribute.Key("message.id") + + // MessageCompressedSizeKey is the attribute Key conforming to the + // "message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageCompressedSizeKey = attribute.Key("message.compressed_size") + + // MessageUncompressedSizeKey is the attribute Key conforming to the + // "message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") +) + +var ( + // sent + MessageTypeSent = MessageTypeKey.String("SENT") + // received + MessageTypeReceived = MessageTypeKey.String("RECEIVED") +) + +// MessageID returns an attribute KeyValue conforming to the "message.id" +// semantic conventions. It represents the mUST be calculated as two different +// counters starting from `1` one for sent messages and one for received +// message. +func MessageID(val int) attribute.KeyValue { + return MessageIDKey.Int(val) +} + +// MessageCompressedSize returns an attribute KeyValue conforming to the +// "message.compressed_size" semantic conventions. It represents the compressed +// size of the message in bytes. +func MessageCompressedSize(val int) attribute.KeyValue { + return MessageCompressedSizeKey.Int(val) +} + +// MessageUncompressedSize returns an attribute KeyValue conforming to the +// "message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func MessageUncompressedSize(val int) attribute.KeyValue { + return MessageUncompressedSizeKey.Int(val) +} + +// The attributes used to report a single exception associated with a span. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example above](#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go new file mode 100644 index 00000000..9b8c559d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go new file mode 100644 index 00000000..d5c4b5c1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go @@ -0,0 +1,21 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +// HTTP scheme attributes. +var ( + HTTPSchemeHTTP = HTTPSchemeKey.String("http") + HTTPSchemeHTTPS = HTTPSchemeKey.String("https") +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go new file mode 100644 index 00000000..39a2eab3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go @@ -0,0 +1,2010 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +import "go.opentelemetry.io/otel/attribute" + +// The web browser in which the application represented by the resource is +// running. The `browser.*` attributes MUST be used only for resources that +// represent applications running in a web browser (regardless of whether +// running on a mobile or desktop device). +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserUserAgentKey is the attribute Key conforming to the + // "browser.user_agent" semantic conventions. It represents the full + // user-agent string provided by the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) + // AppleWebKit/537.36 (KHTML, ' + // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36' + // Note: The user-agent value SHOULD be provided only from browsers that do + // not have a mechanism to retrieve brands and platform individually from + // the User-Agent Client Hints API. To retrieve the value, the legacy + // `navigator.userAgent` API can be used. + BrowserUserAgentKey = attribute.Key("browser.user_agent") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserUserAgent returns an attribute KeyValue conforming to the +// "browser.user_agent" semantic conventions. It represents the full user-agent +// string provided by the browser +func BrowserUserAgent(val string) attribute.KeyValue { + return BrowserUserAgentKey.String(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// A cloud environment (e.g. GCP, Azure, AWS) +const ( + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://intl.cloud.tencent.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGoogleCloudOpenshift = CloudPlatformKey.String("google_cloud_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an + // [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the task + // definition family this task definition is a member of. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for this task definition. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS +// task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the task +// definition family this task definition is a member of. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// this task definition. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Resources specific to Amazon Web Services. +const ( + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") +) + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// A container instance. +const ( + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageTagKey is the attribute Key conforming to the + // "container.image.tag" semantic conventions. It represents the container + // image tag. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + ContainerImageTagKey = attribute.Key("container.image.tag") +) + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageTag returns an attribute KeyValue conforming to the +// "container.image.tag" semantic conventions. It represents the container +// image tag. +func ContainerImageTag(val string) attribute.KeyValue { + return ContainerImageTagKey.String(val) +} + +// The software deployment. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'staging', 'production' + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment +// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka +// deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// The device on which the process represented by this resource is running. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human readable version of + // the device model rather than a machine readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// A serverless instance. +const ( + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `faas.id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSIDKey is the attribute Key conforming to the "faas.id" semantic + // conventions. It represents the unique ID of the single function that + // this runtime instance executes. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so consider setting `faas.id` as a span attribute instead. + // + // The exact value to use for `faas.id` depends on the cloud provider: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + FaaSIDKey = attribute.Key("faas.id") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run:** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function in MiB. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 128 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information. + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") +) + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSID returns an attribute KeyValue conforming to the "faas.id" semantic +// conventions. It represents the unique ID of the single function that this +// runtime instance executes. +func FaaSID(val string) attribute.KeyValue { + return FaaSIDKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function in MiB. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// A host is defined as a general computing instance. +const ( + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // Linux systems, the `machine-id` located in `/etc/machine-id` or + // `/var/lib/dbus/machine-id` may be used. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") + + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + HostArchKey = attribute.Key("host.arch") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID. For Cloud, this + // value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image as defined in [Version + // Attributes](README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized Linux +// systems, the `machine-id` located in `/etc/machine-id` or +// `/var/lib/dbus/machine-id` may be used. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID. For +// Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image as defined in [Version +// Attributes](README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// A Kubernetes Cluster. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// A Kubernetes Node object. +const ( + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") +) + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// A Kubernetes Namespace. +const ( + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") +) + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// A Kubernetes Pod object. +const ( + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") +) + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// A container in a +// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). +const ( + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") +) + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// A Kubernetes ReplicaSet object. +const ( + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") +) + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// A Kubernetes Deployment object. +const ( + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") +) + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// A Kubernetes StatefulSet object. +const ( + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") +) + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// A Kubernetes DaemonSet object. +const ( + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") +) + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// A Kubernetes Job object. +const ( + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") +) + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// A Kubernetes CronJob object. +const ( + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") +) + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + OSTypeKey = attribute.Key("os.type") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](../../resource/semantic_conventions/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](../../resource/semantic_conventions/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// An operating system process. +const ( + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") +) + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// The single (language) runtime instance which is monitored. +const ( + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") +) + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// A service instance. +const ( + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md#process), e.g. + // `unknown_service:bash`. If `process.executable.name` is not available, + // the value MUST be set to `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to distinguish instances of the same + // service that exist at the same time (e.g. instances of a horizontally + // scaled service). It is preferable for the ID to be persistent and stay + // the same for the lifetime of the service instance, however it is + // acceptable that the ID is ephemeral and changes during important + // lifetime events for the service (e.g. service restarts). If the service + // has no inherent unique ID that can be used as the value of this + // attribute it is recommended to generate a random Version 1 or Version 4 + // RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2.0.0' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") + + // TelemetryAutoVersionKey is the attribute Key conforming to the + // "telemetry.auto.version" semantic conventions. It represents the version + // string of the auto instrumentation agent, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.2.3' + TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// TelemetryAutoVersion returns an attribute KeyValue conforming to the +// "telemetry.auto.version" semantic conventions. It represents the version +// string of the auto instrumentation agent, if used. +func TelemetryAutoVersion(val string) attribute.KeyValue { + return TelemetryAutoVersionKey.String(val) +} + +// Resource describing the packaged software running the application code. Web +// engines are typically executed using process.runtime. +const ( + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") + + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") +) + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OtelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + OtelScopeNameKey = attribute.Key("otel.scope.name") + + // OtelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0.0' + OtelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OtelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OtelScopeName(val string) attribute.KeyValue { + return OtelScopeNameKey.String(val) +} + +// OtelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OtelScopeVersion(val string) attribute.KeyValue { + return OtelScopeVersionKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry +// Scope's concepts. +const ( + // OtelLibraryNameKey is the attribute Key conforming to the + // "otel.library.name" semantic conventions. It represents the deprecated, + // use the `otel.scope.name` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'io.opentelemetry.contrib.mongodb' + OtelLibraryNameKey = attribute.Key("otel.library.name") + + // OtelLibraryVersionKey is the attribute Key conforming to the + // "otel.library.version" semantic conventions. It represents the + // deprecated, use the `otel.scope.version` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '1.0.0' + OtelLibraryVersionKey = attribute.Key("otel.library.version") +) + +// OtelLibraryName returns an attribute KeyValue conforming to the +// "otel.library.name" semantic conventions. It represents the deprecated, use +// the `otel.scope.name` attribute. +func OtelLibraryName(val string) attribute.KeyValue { + return OtelLibraryNameKey.String(val) +} + +// OtelLibraryVersion returns an attribute KeyValue conforming to the +// "otel.library.version" semantic conventions. It represents the deprecated, +// use the `otel.scope.version` attribute. +func OtelLibraryVersion(val string) attribute.KeyValue { + return OtelLibraryVersionKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go new file mode 100644 index 00000000..42fc525d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go new file mode 100644 index 00000000..8c4a7299 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go @@ -0,0 +1,3375 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +import "go.opentelemetry.io/otel/attribute" + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") +) + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the name identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'click', 'exception' + EventNameKey = attribute.Key("event.name") + + // EventDomainKey is the attribute Key conforming to the "event.domain" + // semantic conventions. It represents the domain identifies the business + // context for the events. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: Events across different domains may have same `event.name`, yet be + // unrelated events. + EventDomainKey = attribute.Key("event.domain") +) + +var ( + // Events from browser apps + EventDomainBrowser = EventDomainKey.String("browser") + // Events from mobile apps + EventDomainDevice = EventDomainKey.String("device") + // Events from Kubernetes + EventDomainK8S = EventDomainKey.String("k8s") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the name identifies the event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// Span attributes used by AWS Lambda (in addition to general `faas` +// attributes). +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `faas.id` if an alias is involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for CloudEvents. CloudEvents is a specification on how to define +// event data in a standard way. These attributes can be attached to spans when +// performing operations with CloudEvents, regardless of the protocol being +// used. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// Semantic conventions for the OpenTracing Shim +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span does not depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// The attributes used to perform database client calls. +const ( + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents an identifier for the database management + // system (DBMS) product being used. See below for a list of well-known + // identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + DBSystemKey = attribute.Key("db.system") + + // DBConnectionStringKey is the attribute Key conforming to the + // "db.connection_string" semantic conventions. It represents the + // connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + DBConnectionStringKey = attribute.Key("db.connection_string") + + // DBUserKey is the attribute Key conforming to the "db.user" semantic + // conventions. It represents the username for accessing the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'readonly_user', 'reporting_user' + DBUserKey = attribute.Key("db.user") + + // DBJDBCDriverClassnameKey is the attribute Key conforming to the + // "db.jdbc.driver_classname" semantic conventions. It represents the + // fully-qualified class name of the [Java Database Connectivity + // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) + // driver used to connect. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") + + // DBNameKey is the attribute Key conforming to the "db.name" semantic + // conventions. It represents the this attribute is used to report the name + // of the database being accessed. For commands that switch the database, + // this should be set to the target database (even if the command fails). + // + // Type: string + // RequirementLevel: ConditionallyRequired (If applicable.) + // Stability: stable + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema + // name), the database name to be used is the more specific layer (e.g. + // Oracle schema name). + DBNameKey = attribute.Key("db.name") + + // DBStatementKey is the attribute Key conforming to the "db.statement" + // semantic conventions. It represents the database statement being + // executed. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If applicable and not + // explicitly disabled via instrumentation configuration.) + // Stability: stable + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + // Note: The value may be sanitized to exclude sensitive information. + DBStatementKey = attribute.Key("db.statement") + + // DBOperationKey is the attribute Key conforming to the "db.operation" + // semantic conventions. It represents the name of the operation being + // executed, e.g. the [MongoDB command + // name](https://docs.mongodb.com/manual/reference/command/#database-operations) + // such as `findAndModify`, or the SQL keyword. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If `db.statement` is not + // applicable.) + // Stability: stable + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to + // attempt any client-side parsing of `db.statement` just to get this + // property, but it should be set if the operation name is provided by the + // library being instrumented. If the SQL statement has an ambiguous + // operation, or performs more than one operation, this value may be + // omitted. + DBOperationKey = attribute.Key("db.operation") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") +) + +// DBConnectionString returns an attribute KeyValue conforming to the +// "db.connection_string" semantic conventions. It represents the connection +// string used to connect to the database. It is recommended to remove embedded +// credentials. +func DBConnectionString(val string) attribute.KeyValue { + return DBConnectionStringKey.String(val) +} + +// DBUser returns an attribute KeyValue conforming to the "db.user" semantic +// conventions. It represents the username for accessing the database. +func DBUser(val string) attribute.KeyValue { + return DBUserKey.String(val) +} + +// DBJDBCDriverClassname returns an attribute KeyValue conforming to the +// "db.jdbc.driver_classname" semantic conventions. It represents the +// fully-qualified class name of the [Java Database Connectivity +// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver +// used to connect. +func DBJDBCDriverClassname(val string) attribute.KeyValue { + return DBJDBCDriverClassnameKey.String(val) +} + +// DBName returns an attribute KeyValue conforming to the "db.name" semantic +// conventions. It represents the this attribute is used to report the name of +// the database being accessed. For commands that switch the database, this +// should be set to the target database (even if the command fails). +func DBName(val string) attribute.KeyValue { + return DBNameKey.String(val) +} + +// DBStatement returns an attribute KeyValue conforming to the +// "db.statement" semantic conventions. It represents the database statement +// being executed. +func DBStatement(val string) attribute.KeyValue { + return DBStatementKey.String(val) +} + +// DBOperation returns an attribute KeyValue conforming to the +// "db.operation" semantic conventions. It represents the name of the operation +// being executed, e.g. the [MongoDB command +// name](https://docs.mongodb.com/manual/reference/command/#database-operations) +// such as `findAndModify`, or the SQL keyword. +func DBOperation(val string) attribute.KeyValue { + return DBOperationKey.String(val) +} + +// Connection-level attributes for Microsoft SQL Server +const ( + // DBMSSQLInstanceNameKey is the attribute Key conforming to the + // "db.mssql.instance_name" semantic conventions. It represents the + // Microsoft SQL Server [instance + // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) + // connecting to. This name is used to determine the port of a named + // instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MSSQLSERVER' + // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no + // longer required (but still recommended if non-standard). + DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") +) + +// DBMSSQLInstanceName returns an attribute KeyValue conforming to the +// "db.mssql.instance_name" semantic conventions. It represents the Microsoft +// SQL Server [instance +// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) +// connecting to. This name is used to determine the port of a named instance. +func DBMSSQLInstanceName(val string) attribute.KeyValue { + return DBMSSQLInstanceNameKey.String(val) +} + +// Call-level attributes for Cassandra +const ( + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraTableKey is the attribute Key conforming to the + // "db.cassandra.table" semantic conventions. It represents the name of the + // primary table that the operation is acting upon, including the keyspace + // name (if applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra + // rather than sql. It is not recommended to attempt any client-side + // parsing of `db.statement` just to get this property, but it should be + // set if it is provided by the library being instrumented. If the + // operation is acting upon an anonymous table, or more than one table, + // this value MUST NOT be set. + DBCassandraTableKey = attribute.Key("db.cassandra.table") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraTable returns an attribute KeyValue conforming to the +// "db.cassandra.table" semantic conventions. It represents the name of the +// primary table that the operation is acting upon, including the keyspace name +// (if applicable). +func DBCassandraTable(val string) attribute.KeyValue { + return DBCassandraTableKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// Call-level attributes for Redis +const ( + // DBRedisDBIndexKey is the attribute Key conforming to the + // "db.redis.database_index" semantic conventions. It represents the index + // of the database being accessed as used in the [`SELECT` + // command](https://redis.io/commands/select), provided as an integer. To + // be used instead of the generic `db.name` attribute. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If other than the default + // database (`0`).) + // Stability: stable + // Examples: 0, 1, 15 + DBRedisDBIndexKey = attribute.Key("db.redis.database_index") +) + +// DBRedisDBIndex returns an attribute KeyValue conforming to the +// "db.redis.database_index" semantic conventions. It represents the index of +// the database being accessed as used in the [`SELECT` +// command](https://redis.io/commands/select), provided as an integer. To be +// used instead of the generic `db.name` attribute. +func DBRedisDBIndex(val int) attribute.KeyValue { + return DBRedisDBIndexKey.Int(val) +} + +// Call-level attributes for MongoDB +const ( + // DBMongoDBCollectionKey is the attribute Key conforming to the + // "db.mongodb.collection" semantic conventions. It represents the + // collection being accessed within the database stated in `db.name`. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'customers', 'products' + DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") +) + +// DBMongoDBCollection returns an attribute KeyValue conforming to the +// "db.mongodb.collection" semantic conventions. It represents the collection +// being accessed within the database stated in `db.name`. +func DBMongoDBCollection(val string) attribute.KeyValue { + return DBMongoDBCollectionKey.String(val) +} + +// Call-level attributes for SQL databases +const ( + // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" + // semantic conventions. It represents the name of the primary table that + // the operation is acting upon, including the database name (if + // applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting + // upon an anonymous table, or more than one table, this value MUST NOT be + // set. + DBSQLTableKey = attribute.Key("db.sql.table") +) + +// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" +// semantic conventions. It represents the name of the primary table that the +// operation is acting upon, including the database name (if applicable). +func DBSQLTable(val string) attribute.KeyValue { + return DBSQLTableKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's +// concepts. +const ( + // OtelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + OtelStatusCodeKey = attribute.Key("otel.status_code") + + // OtelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'resource not found' + OtelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OtelStatusCodeOk = OtelStatusCodeKey.String("OK") + // The operation contains an error + OtelStatusCodeError = OtelStatusCodeKey.String("ERROR") +) + +// OtelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OtelStatusDescription(val string) attribute.KeyValue { + return OtelStatusDescriptionKey.String(val) +} + +// This semantic convention describes an instance of a function that runs +// without provisioning or managing of servers (also known as serverless +// functions or Function as a Service (FaaS)) with spans. +const ( + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function execution. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: For the server/consumer span on the incoming side, + // `faas.trigger` MUST be set. + // + // Clients invoking FaaS instances usually cannot set `faas.trigger`, + // since they would typically need to look in the payload to determine + // the event type. If clients set it, it should be the same as the + // trigger that corresponding incoming would have (i.e., this has + // nothing to do with the underlying transport used to make the API + // call to invoke the lambda, which is often HTTP). + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSExecutionKey is the attribute Key conforming to the "faas.execution" + // semantic conventions. It represents the execution ID of the current + // function execution. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSExecutionKey = attribute.Key("faas.execution") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSExecution returns an attribute KeyValue conforming to the +// "faas.execution" semantic conventions. It represents the execution ID of the +// current function execution. +func FaaSExecution(val string) attribute.KeyValue { + return FaaSExecutionKey.String(val) +} + +// Semantic Convention for FaaS triggered as a response to some data source +// operation such as a database or filesystem read/write. +const ( + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") +) + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// Contains additional attributes for incoming FaaS spans. +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + FaaSColdstartKey = attribute.Key("faas.coldstart") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// Contains additional attributes for outgoing FaaS spans. +const ( + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: ConditionallyRequired (For some cloud providers, like + // AWS or GCP, the region in which a function is hosted is essential to + // uniquely identify the function and also part of its endpoint. Since it's + // part of the endpoint being called, the region is always known to + // clients. In these cases, `faas.invoked_region` MUST be set accordingly. + // If the region is unknown to the client or not required for identifying + // the invoked function, setting `faas.invoked_region` is optional.) + // Stability: stable + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetTransportKey is the attribute Key conforming to the "net.transport" + // semantic conventions. It represents the transport protocol used. See + // note below. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + NetTransportKey = attribute.Key("net.transport") + + // NetAppProtocolNameKey is the attribute Key conforming to the + // "net.app.protocol.name" semantic conventions. It represents the + // application layer protocol used. The value SHOULD be normalized to + // lowercase. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + NetAppProtocolNameKey = attribute.Key("net.app.protocol.name") + + // NetAppProtocolVersionKey is the attribute Key conforming to the + // "net.app.protocol.version" semantic conventions. It represents the + // version of the application layer protocol used. See note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3.1.1' + // Note: `net.app.protocol.version` refers to the version of the protocol + // used and might be different from the protocol client's version. If the + // HTTP client used has a version of `0.27.2`, but sends HTTP version + // `1.1`, this attribute should be set to `1.1`. + NetAppProtocolVersionKey = attribute.Key("net.app.protocol.version") + + // NetSockPeerNameKey is the attribute Key conforming to the + // "net.sock.peer.name" semantic conventions. It represents the remote + // socket peer name. + // + // Type: string + // RequirementLevel: Recommended (If available and different from + // `net.peer.name` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 'proxy.example.com' + NetSockPeerNameKey = attribute.Key("net.sock.peer.name") + + // NetSockPeerAddrKey is the attribute Key conforming to the + // "net.sock.peer.addr" semantic conventions. It represents the remote + // socket peer address: IPv4 or IPv6 for internet protocols, path for local + // communication, + // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '127.0.0.1', '/tmp/mysql.sock' + NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") + + // NetSockPeerPortKey is the attribute Key conforming to the + // "net.sock.peer.port" semantic conventions. It represents the remote + // socket peer port. + // + // Type: int + // RequirementLevel: Recommended (If defined for the address family and if + // different than `net.peer.port` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 16456 + NetSockPeerPortKey = attribute.Key("net.sock.peer.port") + + // NetSockFamilyKey is the attribute Key conforming to the + // "net.sock.family" semantic conventions. It represents the protocol + // [address + // family](https://man7.org/linux/man-pages/man7/address_families.7.html) + // which is used for communication. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (If different than `inet` and if + // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers + // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in + // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support + // instrumentations that follow previous versions of this document.) + // Stability: stable + // Examples: 'inet6', 'bluetooth' + NetSockFamilyKey = attribute.Key("net.sock.family") + + // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" + // semantic conventions. It represents the logical remote hostname, see + // note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com' + // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an + // extra DNS lookup. + NetPeerNameKey = attribute.Key("net.peer.name") + + // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" + // semantic conventions. It represents the logical remote port number + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + NetPeerPortKey = attribute.Key("net.peer.port") + + // NetHostNameKey is the attribute Key conforming to the "net.host.name" + // semantic conventions. It represents the logical local hostname or + // similar, see note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'localhost' + NetHostNameKey = attribute.Key("net.host.name") + + // NetHostPortKey is the attribute Key conforming to the "net.host.port" + // semantic conventions. It represents the logical local port number, + // preferably the one that the peer used to connect + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 8080 + NetHostPortKey = attribute.Key("net.host.port") + + // NetSockHostAddrKey is the attribute Key conforming to the + // "net.sock.host.addr" semantic conventions. It represents the local + // socket address. Useful in case of a multi-IP host. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '192.168.0.1' + NetSockHostAddrKey = attribute.Key("net.sock.host.addr") + + // NetSockHostPortKey is the attribute Key conforming to the + // "net.sock.host.port" semantic conventions. It represents the local + // socket port number. + // + // Type: int + // RequirementLevel: Recommended (If defined for the address family and if + // different than `net.host.port` and if `net.sock.host.addr` is set.) + // Stability: stable + // Examples: 35555 + NetSockHostPortKey = attribute.Key("net.sock.host.port") + + // NetHostConnectionTypeKey is the attribute Key conforming to the + // "net.host.connection.type" semantic conventions. It represents the + // internet connection type currently being used by the host. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'wifi' + NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") + + // NetHostConnectionSubtypeKey is the attribute Key conforming to the + // "net.host.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'LTE' + NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") + + // NetHostCarrierNameKey is the attribute Key conforming to the + // "net.host.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'sprint' + NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") + + // NetHostCarrierMccKey is the attribute Key conforming to the + // "net.host.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '310' + NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") + + // NetHostCarrierMncKey is the attribute Key conforming to the + // "net.host.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '001' + NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") + + // NetHostCarrierIccKey is the attribute Key conforming to the + // "net.host.carrier.icc" semantic conventions. It represents the ISO + // 3166-1 alpha-2 2-character country code associated with the mobile + // carrier network. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'DE' + NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") +) + +var ( + // ip_tcp + NetTransportTCP = NetTransportKey.String("ip_tcp") + // ip_udp + NetTransportUDP = NetTransportKey.String("ip_udp") + // Named or anonymous pipe. See note below + NetTransportPipe = NetTransportKey.String("pipe") + // In-process communication + NetTransportInProc = NetTransportKey.String("inproc") + // Something else (non IP-based) + NetTransportOther = NetTransportKey.String("other") +) + +var ( + // IPv4 address + NetSockFamilyInet = NetSockFamilyKey.String("inet") + // IPv6 address + NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") + // Unix domain socket path + NetSockFamilyUnix = NetSockFamilyKey.String("unix") +) + +var ( + // wifi + NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") + // wired + NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") + // cell + NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") + // unavailable + NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") + // unknown + NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") +) + +var ( + // GPRS + NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") + // EDGE + NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") + // UMTS + NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") + // CDMA + NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") + // HSPA + NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") + // IDEN + NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") + // LTE + NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") + // EHRPD + NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") + // GSM + NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") +) + +// NetAppProtocolName returns an attribute KeyValue conforming to the +// "net.app.protocol.name" semantic conventions. It represents the application +// layer protocol used. The value SHOULD be normalized to lowercase. +func NetAppProtocolName(val string) attribute.KeyValue { + return NetAppProtocolNameKey.String(val) +} + +// NetAppProtocolVersion returns an attribute KeyValue conforming to the +// "net.app.protocol.version" semantic conventions. It represents the version +// of the application layer protocol used. See note below. +func NetAppProtocolVersion(val string) attribute.KeyValue { + return NetAppProtocolVersionKey.String(val) +} + +// NetSockPeerName returns an attribute KeyValue conforming to the +// "net.sock.peer.name" semantic conventions. It represents the remote socket +// peer name. +func NetSockPeerName(val string) attribute.KeyValue { + return NetSockPeerNameKey.String(val) +} + +// NetSockPeerAddr returns an attribute KeyValue conforming to the +// "net.sock.peer.addr" semantic conventions. It represents the remote socket +// peer address: IPv4 or IPv6 for internet protocols, path for local +// communication, +// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). +func NetSockPeerAddr(val string) attribute.KeyValue { + return NetSockPeerAddrKey.String(val) +} + +// NetSockPeerPort returns an attribute KeyValue conforming to the +// "net.sock.peer.port" semantic conventions. It represents the remote socket +// peer port. +func NetSockPeerPort(val int) attribute.KeyValue { + return NetSockPeerPortKey.Int(val) +} + +// NetPeerName returns an attribute KeyValue conforming to the +// "net.peer.name" semantic conventions. It represents the logical remote +// hostname, see note below. +func NetPeerName(val string) attribute.KeyValue { + return NetPeerNameKey.String(val) +} + +// NetPeerPort returns an attribute KeyValue conforming to the +// "net.peer.port" semantic conventions. It represents the logical remote port +// number +func NetPeerPort(val int) attribute.KeyValue { + return NetPeerPortKey.Int(val) +} + +// NetHostName returns an attribute KeyValue conforming to the +// "net.host.name" semantic conventions. It represents the logical local +// hostname or similar, see note below. +func NetHostName(val string) attribute.KeyValue { + return NetHostNameKey.String(val) +} + +// NetHostPort returns an attribute KeyValue conforming to the +// "net.host.port" semantic conventions. It represents the logical local port +// number, preferably the one that the peer used to connect +func NetHostPort(val int) attribute.KeyValue { + return NetHostPortKey.Int(val) +} + +// NetSockHostAddr returns an attribute KeyValue conforming to the +// "net.sock.host.addr" semantic conventions. It represents the local socket +// address. Useful in case of a multi-IP host. +func NetSockHostAddr(val string) attribute.KeyValue { + return NetSockHostAddrKey.String(val) +} + +// NetSockHostPort returns an attribute KeyValue conforming to the +// "net.sock.host.port" semantic conventions. It represents the local socket +// port number. +func NetSockHostPort(val int) attribute.KeyValue { + return NetSockHostPortKey.Int(val) +} + +// NetHostCarrierName returns an attribute KeyValue conforming to the +// "net.host.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetHostCarrierName(val string) attribute.KeyValue { + return NetHostCarrierNameKey.String(val) +} + +// NetHostCarrierMcc returns an attribute KeyValue conforming to the +// "net.host.carrier.mcc" semantic conventions. It represents the mobile +// carrier country code. +func NetHostCarrierMcc(val string) attribute.KeyValue { + return NetHostCarrierMccKey.String(val) +} + +// NetHostCarrierMnc returns an attribute KeyValue conforming to the +// "net.host.carrier.mnc" semantic conventions. It represents the mobile +// carrier network code. +func NetHostCarrierMnc(val string) attribute.KeyValue { + return NetHostCarrierMncKey.String(val) +} + +// NetHostCarrierIcc returns an attribute KeyValue conforming to the +// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetHostCarrierIcc(val string) attribute.KeyValue { + return NetHostCarrierIccKey.String(val) +} + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](../../resource/semantic_conventions/README.md#service) + // of the remote service. SHOULD be equal to the actual `service.name` + // resource attribute of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](../../resource/semantic_conventions/README.md#service) of +// the remote service. SHOULD be equal to the actual `service.name` resource +// attribute of the remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// These attributes may be used for any operation with an authenticated and/or +// authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") +) + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// Semantic conventions for HTTP client and server Spans. +const ( + // HTTPMethodKey is the attribute Key conforming to the "http.method" + // semantic conventions. It represents the hTTP request method. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + HTTPMethodKey = attribute.Key("http.method") + + // HTTPStatusCodeKey is the attribute Key conforming to the + // "http.status_code" semantic conventions. It represents the [HTTP + // response status code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: ConditionallyRequired (If and only if one was + // received/sent.) + // Stability: stable + // Examples: 200 + HTTPStatusCodeKey = attribute.Key("http.status_code") + + // HTTPFlavorKey is the attribute Key conforming to the "http.flavor" + // semantic conventions. It represents the kind of HTTP protocol used. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: If `net.transport` is not specified, it can be assumed to be + // `IP.TCP` except if `http.flavor` is `QUIC`, in which case `IP.UDP` is + // assumed. + HTTPFlavorKey = attribute.Key("http.flavor") + + // HTTPUserAgentKey is the attribute Key conforming to the + // "http.user_agent" semantic conventions. It represents the value of the + // [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' + HTTPUserAgentKey = attribute.Key("http.user_agent") + + // HTTPRequestContentLengthKey is the attribute Key conforming to the + // "http.request_content_length" semantic conventions. It represents the + // size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") + + // HTTPResponseContentLengthKey is the attribute Key conforming to the + // "http.response_content_length" semantic conventions. It represents the + // size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") +) + +var ( + // HTTP/1.0 + HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") + // HTTP/1.1 + HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") + // HTTP/2 + HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") + // HTTP/3 + HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") + // SPDY protocol + HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") + // QUIC protocol + HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") +) + +// HTTPMethod returns an attribute KeyValue conforming to the "http.method" +// semantic conventions. It represents the hTTP request method. +func HTTPMethod(val string) attribute.KeyValue { + return HTTPMethodKey.String(val) +} + +// HTTPStatusCode returns an attribute KeyValue conforming to the +// "http.status_code" semantic conventions. It represents the [HTTP response +// status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPStatusCode(val int) attribute.KeyValue { + return HTTPStatusCodeKey.Int(val) +} + +// HTTPUserAgent returns an attribute KeyValue conforming to the +// "http.user_agent" semantic conventions. It represents the value of the [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func HTTPUserAgent(val string) attribute.KeyValue { + return HTTPUserAgentKey.String(val) +} + +// HTTPRequestContentLength returns an attribute KeyValue conforming to the +// "http.request_content_length" semantic conventions. It represents the size +// of the request payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestContentLength(val int) attribute.KeyValue { + return HTTPRequestContentLengthKey.Int(val) +} + +// HTTPResponseContentLength returns an attribute KeyValue conforming to the +// "http.response_content_length" semantic conventions. It represents the size +// of the response payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseContentLength(val int) attribute.KeyValue { + return HTTPResponseContentLengthKey.Int(val) +} + +// Semantic Convention for HTTP Client +const ( + // HTTPURLKey is the attribute Key conforming to the "http.url" semantic + // conventions. It represents the full HTTP request URL in the form + // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is + // not transmitted over HTTP, but if it is known, it should be included + // nevertheless. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + // Note: `http.url` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case the + // attribute's value should be `https://www.example.com/`. + HTTPURLKey = attribute.Key("http.url") + + // HTTPResendCountKey is the attribute Key conforming to the + // "http.resend_count" semantic conventions. It represents the ordinal + // number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Recommended (if and only if request was retried.) + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPResendCountKey = attribute.Key("http.resend_count") +) + +// HTTPURL returns an attribute KeyValue conforming to the "http.url" +// semantic conventions. It represents the full HTTP request URL in the form +// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not +// transmitted over HTTP, but if it is known, it should be included +// nevertheless. +func HTTPURL(val string) attribute.KeyValue { + return HTTPURLKey.String(val) +} + +// HTTPResendCount returns an attribute KeyValue conforming to the +// "http.resend_count" semantic conventions. It represents the ordinal number +// of request resending attempt (for any reason, including redirects). +func HTTPResendCount(val int) attribute.KeyValue { + return HTTPResendCountKey.Int(val) +} + +// Semantic Convention for HTTP Server +const ( + // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" + // semantic conventions. It represents the URI scheme identifying the used + // protocol. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'http', 'https' + HTTPSchemeKey = attribute.Key("http.scheme") + + // HTTPTargetKey is the attribute Key conforming to the "http.target" + // semantic conventions. It represents the full request target as passed in + // a HTTP request line or equivalent. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '/path/12314/?q=ddds' + HTTPTargetKey = attribute.Key("http.target") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route (path template in + // the format used by the respective server framework). See note below + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if it's available) + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: 'http.route' MUST NOT be populated when this is not supported by + // the HTTP server framework as the route attribute should have + // low-cardinality and the URI path can NOT substitute it. + HTTPRouteKey = attribute.Key("http.route") + + // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip" + // semantic conventions. It represents the IP address of the original + // client behind all proxies, if known (e.g. from + // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '83.164.160.102' + // Note: This is not necessarily the same as `net.sock.peer.addr`, which + // would + // identify the network-level peer, which may be a proxy. + // + // This attribute should be set when a source of information different + // from the one used for `net.sock.peer.addr`, is available even if that + // other + // source just confirms the same value as `net.sock.peer.addr`. + // Rationale: For `net.sock.peer.addr`, one typically does not know if it + // comes from a proxy, reverse proxy, or the actual client. Setting + // `http.client_ip` when it's the same as `net.sock.peer.addr` means that + // one is at least somewhat confident that the address is not that of + // the closest proxy. + HTTPClientIPKey = attribute.Key("http.client_ip") +) + +// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" +// semantic conventions. It represents the URI scheme identifying the used +// protocol. +func HTTPScheme(val string) attribute.KeyValue { + return HTTPSchemeKey.String(val) +} + +// HTTPTarget returns an attribute KeyValue conforming to the "http.target" +// semantic conventions. It represents the full request target as passed in a +// HTTP request line or equivalent. +func HTTPTarget(val string) attribute.KeyValue { + return HTTPTargetKey.String(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route (path template in the +// format used by the respective server framework). See note below +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// HTTPClientIP returns an attribute KeyValue conforming to the +// "http.client_ip" semantic conventions. It represents the IP address of the +// original client behind all proxies, if known (e.g. from +// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). +func HTTPClientIP(val string) attribute.KeyValue { + return HTTPClientIPKey.String(val) +} + +// Attributes that exist for multiple DynamoDB request types. +const ( + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") +) + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// DynamoDB.CreateTable +const ( + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") +) + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// DynamoDB.ListTables +const ( + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") +) + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the the +// number of items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// DynamoDB.Query +const ( + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") +) + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// DynamoDB.Scan +const ( + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") +) + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// DynamoDB.UpdateTable +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // the `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// Semantic conventions to apply when instrumenting the GraphQL implementation. +// They map GraphQL operations to attributes on a Span. +const ( + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") + + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// Semantic convention describing per-message attributes populated on messaging +// spans or links. +const ( + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the [conversation ID](#conversations) identifying the conversation to + // which the message belongs, represented as a string. Sometimes called + // "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to + // the "messaging.message.payload_size_bytes" semantic conventions. It + // represents the (uncompressed) size of the message payload in bytes. Also + // use this attribute if it is unknown whether the compressed or + // uncompressed payload size is reported. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2738 + MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") + + // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key + // conforming to the "messaging.message.payload_compressed_size_bytes" + // semantic conventions. It represents the compressed size of the message + // payload in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2048 + MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") +) + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the [conversation ID](#conversations) identifying the +// conversation to which the message belongs, represented as a string. +// Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming +// to the "messaging.message.payload_size_bytes" semantic conventions. It +// represents the (uncompressed) size of the message payload in bytes. Also use +// this attribute if it is unknown whether the compressed or uncompressed +// payload size is reported. +func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadSizeBytesKey.Int(val) +} + +// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue +// conforming to the "messaging.message.payload_compressed_size_bytes" semantic +// conventions. It represents the compressed size of the message payload in +// bytes. +func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadCompressedSizeBytesKey.Int(val) +} + +// Semantic convention for attributes that describe messaging destination on +// broker +const ( + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker does not have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationKindKey is the attribute Key conforming to the + // "messaging.destination.kind" semantic conventions. It represents the + // kind of message destination + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationKindKey = attribute.Key("messaging.destination.kind") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") +) + +var ( + // A message sent to a queue + MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") + // A message sent to a topic + MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") +) + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// Semantic convention for attributes that describe messaging source on broker +const ( + // MessagingSourceNameKey is the attribute Key conforming to the + // "messaging.source.name" semantic conventions. It represents the message + // source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Source name SHOULD uniquely identify a specific queue, topic, or + // other entity within the broker. If + // the broker does not have such notion, the source name SHOULD uniquely + // identify the broker. + MessagingSourceNameKey = attribute.Key("messaging.source.name") + + // MessagingSourceKindKey is the attribute Key conforming to the + // "messaging.source.kind" semantic conventions. It represents the kind of + // message source + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingSourceKindKey = attribute.Key("messaging.source.kind") + + // MessagingSourceTemplateKey is the attribute Key conforming to the + // "messaging.source.template" semantic conventions. It represents the low + // cardinality representation of the messaging source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Source names could be constructed from templates. An example would + // be a source name involving a user name or product id. Although the + // source name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and + // aggregation. + MessagingSourceTemplateKey = attribute.Key("messaging.source.template") + + // MessagingSourceTemporaryKey is the attribute Key conforming to the + // "messaging.source.temporary" semantic conventions. It represents a + // boolean that is true if the message source is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary") + + // MessagingSourceAnonymousKey is the attribute Key conforming to the + // "messaging.source.anonymous" semantic conventions. It represents a + // boolean that is true if the message source is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous") +) + +var ( + // A message received from a queue + MessagingSourceKindQueue = MessagingSourceKindKey.String("queue") + // A message received from a topic + MessagingSourceKindTopic = MessagingSourceKindKey.String("topic") +) + +// MessagingSourceName returns an attribute KeyValue conforming to the +// "messaging.source.name" semantic conventions. It represents the message +// source name +func MessagingSourceName(val string) attribute.KeyValue { + return MessagingSourceNameKey.String(val) +} + +// MessagingSourceTemplate returns an attribute KeyValue conforming to the +// "messaging.source.template" semantic conventions. It represents the low +// cardinality representation of the messaging source name +func MessagingSourceTemplate(val string) attribute.KeyValue { + return MessagingSourceTemplateKey.String(val) +} + +// MessagingSourceTemporary returns an attribute KeyValue conforming to the +// "messaging.source.temporary" semantic conventions. It represents a boolean +// that is true if the message source is temporary and might not exist anymore +// after messages are processed. +func MessagingSourceTemporary(val bool) attribute.KeyValue { + return MessagingSourceTemporaryKey.Bool(val) +} + +// MessagingSourceAnonymous returns an attribute KeyValue conforming to the +// "messaging.source.anonymous" semantic conventions. It represents a boolean +// that is true if the message source is anonymous (could be unnamed or have +// auto-generated name). +func MessagingSourceAnonymous(val bool) attribute.KeyValue { + return MessagingSourceAnonymousKey.Bool(val) +} + +// General attributes used in messaging systems. +const ( + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents a string + // identifying the messaging system. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' + MessagingSystemKey = attribute.Key("messaging.system") + + // MessagingOperationKey is the attribute Key conforming to the + // "messaging.operation" semantic conventions. It represents a string + // identifying the kind of messaging operation as defined in the [Operation + // names](#operation-names) section above. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationKey = attribute.Key("messaging.operation") + + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the span describes an + // operation on a batch of messages.) + // Stability: stable + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") +) + +var ( + // publish + MessagingOperationPublish = MessagingOperationKey.String("publish") + // receive + MessagingOperationReceive = MessagingOperationKey.String("receive") + // process + MessagingOperationProcess = MessagingOperationKey.String("process") +) + +// MessagingSystem returns an attribute KeyValue conforming to the +// "messaging.system" semantic conventions. It represents a string identifying +// the messaging system. +func MessagingSystem(val string) attribute.KeyValue { + return MessagingSystemKey.String(val) +} + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// Semantic convention for a consumer of messages received from a messaging +// system +const ( + // MessagingConsumerIDKey is the attribute Key conforming to the + // "messaging.consumer.id" semantic conventions. It represents the + // identifier for the consumer receiving a message. For Kafka, set it to + // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if + // both are present, or only `messaging.kafka.consumer.group`. For brokers, + // such as RabbitMQ and Artemis, set it to the `client_id` of the client + // consuming the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mygroup - client-6' + MessagingConsumerIDKey = attribute.Key("messaging.consumer.id") +) + +// MessagingConsumerID returns an attribute KeyValue conforming to the +// "messaging.consumer.id" semantic conventions. It represents the identifier +// for the consumer receiving a message. For Kafka, set it to +// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both +// are present, or only `messaging.kafka.consumer.group`. For brokers, such as +// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the +// message. +func MessagingConsumerID(val string) attribute.KeyValue { + return MessagingConsumerIDKey.String(val) +} + +// Attributes for RabbitMQ +const ( + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If not empty.) + // Stability: stable + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") +) + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// Attributes for Apache Kafka +const ( + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaClientIDKey is the attribute Key conforming to the + // "messaging.kafka.client_id" semantic conventions. It represents the + // client ID for the Consumer or Producer that is handling the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client-5' + MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") + + // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to + // the "messaging.kafka.destination.partition" semantic conventions. It + // represents the partition the message is sent to. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") + + // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the + // "messaging.kafka.source.partition" semantic conventions. It represents + // the partition the message is received from. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: ConditionallyRequired (If value is `true`. When + // missing, the value is assumed to be `false`.) + // Stability: stable + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") +) + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaClientID returns an attribute KeyValue conforming to the +// "messaging.kafka.client_id" semantic conventions. It represents the client +// ID for the Consumer or Producer that is handling the message. +func MessagingKafkaClientID(val string) attribute.KeyValue { + return MessagingKafkaClientIDKey.String(val) +} + +// MessagingKafkaDestinationPartition returns an attribute KeyValue +// conforming to the "messaging.kafka.destination.partition" semantic +// conventions. It represents the partition the message is sent to. +func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { + return MessagingKafkaDestinationPartitionKey.Int(val) +} + +// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to +// the "messaging.kafka.source.partition" semantic conventions. It represents +// the partition the message is received from. +func MessagingKafkaSourcePartition(val int) attribute.KeyValue { + return MessagingKafkaSourcePartitionKey.Int(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// Attributes for Apache RocketMQ +const ( + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqClientIDKey is the attribute Key conforming to the + // "messaging.rocketmq.client_id" semantic conventions. It represents the + // unique identifier for each client. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myhost@8742@s8083jm' + MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delay time level is not specified.) + // Stability: stable + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delivery timestamp is not specified.) + // Stability: stable + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) + // Stability: stable + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqClientID returns an attribute KeyValue conforming to the +// "messaging.rocketmq.client_id" semantic conventions. It represents the +// unique identifier for each client. +func MessagingRocketmqClientID(val string) attribute.KeyValue { + return MessagingRocketmqClientIDKey.String(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// Semantic conventions for remote procedure calls. +const ( + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCSystemKey = attribute.Key("rpc.system") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") +) + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// Tech-specific attributes for gRPC. +const ( + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). +const ( + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // does not specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If other than the default + // version (`1.0`)) + // Stability: stable + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If response is not successful.) + // Stability: stable + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") +) + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// does not specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/doc.go new file mode 100644 index 00000000..c0b1723f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/doc.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the conventions +// as of the v1.4.0 version of the OpenTelemetry specification. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.4.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/exception.go new file mode 100644 index 00000000..311cbf21 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/exception.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.4.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/http.go new file mode 100644 index 00000000..8d814edc --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/http.go @@ -0,0 +1,114 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.4.0" + +import ( + "net/http" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/semconv/internal" + "go.opentelemetry.io/otel/trace" +) + +// HTTP scheme attributes. +var ( + HTTPSchemeHTTP = HTTPSchemeKey.String("http") + HTTPSchemeHTTPS = HTTPSchemeKey.String("https") +) + +var sc = &internal.SemanticConventions{ + EnduserIDKey: EnduserIDKey, + HTTPClientIPKey: HTTPClientIPKey, + HTTPFlavorKey: HTTPFlavorKey, + HTTPHostKey: HTTPHostKey, + HTTPMethodKey: HTTPMethodKey, + HTTPRequestContentLengthKey: HTTPRequestContentLengthKey, + HTTPRouteKey: HTTPRouteKey, + HTTPSchemeHTTP: HTTPSchemeHTTP, + HTTPSchemeHTTPS: HTTPSchemeHTTPS, + HTTPServerNameKey: HTTPServerNameKey, + HTTPStatusCodeKey: HTTPStatusCodeKey, + HTTPTargetKey: HTTPTargetKey, + HTTPURLKey: HTTPURLKey, + HTTPUserAgentKey: HTTPUserAgentKey, + NetHostIPKey: NetHostIPKey, + NetHostNameKey: NetHostNameKey, + NetHostPortKey: NetHostPortKey, + NetPeerIPKey: NetPeerIPKey, + NetPeerNameKey: NetPeerNameKey, + NetPeerPortKey: NetPeerPortKey, + NetTransportIP: NetTransportIP, + NetTransportOther: NetTransportOther, + NetTransportTCP: NetTransportTCP, + NetTransportUDP: NetTransportUDP, + NetTransportUnix: NetTransportUnix, +} + +// NetAttributesFromHTTPRequest generates attributes of the net +// namespace as specified by the OpenTelemetry specification for a +// span. The network parameter is a string that net.Dial function +// from standard library can understand. +func NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue { + return sc.NetAttributesFromHTTPRequest(network, request) +} + +// EndUserAttributesFromHTTPRequest generates attributes of the +// enduser namespace as specified by the OpenTelemetry specification +// for a span. +func EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + return sc.EndUserAttributesFromHTTPRequest(request) +} + +// HTTPClientAttributesFromHTTPRequest generates attributes of the +// http namespace as specified by the OpenTelemetry specification for +// a span on the client side. +func HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + return sc.HTTPClientAttributesFromHTTPRequest(request) +} + +// HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes +// to be used with server-side HTTP metrics. +func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue { + return sc.HTTPServerMetricAttributesFromHTTPRequest(serverName, request) +} + +// HTTPServerAttributesFromHTTPRequest generates attributes of the +// http namespace as specified by the OpenTelemetry specification for +// a span on the server side. Currently, only basic authentication is +// supported. +func HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue { + return sc.HTTPServerAttributesFromHTTPRequest(serverName, route, request) +} + +// HTTPAttributesFromHTTPStatusCode generates attributes of the http +// namespace as specified by the OpenTelemetry specification for a +// span. +func HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue { + return sc.HTTPAttributesFromHTTPStatusCode(code) +} + +// SpanStatusFromHTTPStatusCode generates a status code and a message +// as specified by the OpenTelemetry specification for a span. +func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) { + return internal.SpanStatusFromHTTPStatusCode(code) +} + +// SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message +// as specified by the OpenTelemetry specification for a span. +// Exclude 4xx for SERVER to set the appropriate status. +func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) { + return internal.SpanStatusFromHTTPStatusCodeAndSpanKind(code, spanKind) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/resource.go new file mode 100644 index 00000000..404bd4e7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/resource.go @@ -0,0 +1,906 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.4.0" + +import "go.opentelemetry.io/otel/attribute" + +// A cloud environment (e.g. GCP, Azure, AWS) +const ( + // Name of the cloud provider. + // + // Type: Enum + // Required: No + // Stability: stable + // Examples: 'gcp' + CloudProviderKey = attribute.Key("cloud.provider") + // The cloud account ID the resource is assigned to. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + // The geographical region the resource is running. Refer to your provider's docs + // to see the available regions, for example [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure regions](https://azure.microsoft.com/en-us/global- + // infrastructure/geographies/), or [Google Cloud + // regions](https://cloud.google.com/about/locations). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'us-central1', 'us-east-1' + CloudRegionKey = attribute.Key("cloud.region") + // Cloud regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the resource + // is running. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Google Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + // The cloud platform in use. + // + // Type: Enum + // Required: No + // Stability: stable + // Examples: 'aws_ec2', 'azure_vm', 'gcp_compute_engine' + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") +) + +var ( + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") +) + +var ( + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") +) + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws. + // amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + // The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/develo + // perguide/clusters.html). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + // The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/l + // aunch_types.html) for an ECS task. + // + // Type: Enum + // Required: No + // Stability: stable + // Examples: 'ec2', 'fargate' + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + // The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/lates + // t/developerguide/task_definitions.html). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + // The task definition family this task definition is a member of. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + // The revision for this task definition. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // The ARN of an EKS cluster. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// Resources specific to Amazon Web Services. +const ( + // The name(s) of the AWS log group(s) an application is writing to. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like multi-container + // applications, where a single application has sidecar containers, and each write + // to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + // The Amazon Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- + // access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + // The name(s) of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + // The ARN(s) of the AWS log stream(s). + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- + // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- + // access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain + // several log streams, so these ARNs necessarily identify both a log group and a + // log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") +) + +// A container instance. +const ( + // Container name. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + // Container ID. Usually a UUID, as for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container- + // identification). The UUID might be abbreviated. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + // The container runtime managing this container. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") + // Name of the image the container was built on. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + // Container image tag. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '0.1' + ContainerImageTagKey = attribute.Key("container.image.tag") +) + +// The software deployment. +const ( + // Name of the [deployment + // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'staging', 'production' + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// The device on which the process represented by this resource is running. +const ( + // A unique identifier representing the device + // + // Type: string + // Required: No + // Stability: stable + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values outlined + // below. This value is not an advertising identifier and MUST NOT be used as + // such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor id + // entifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-iden + // tifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the + // Firebase Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on best + // practices and exact implementation details. Caution should be taken when + // storing personal data or anything which can identify a user. GDPR and data + // protection laws may apply, ensure you do your own due diligence. + DeviceIDKey = attribute.Key("device.id") + // The model identifier for the device + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine readable version of the + // model identifier rather than the market or consumer-friendly name of the + // device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + // The marketing name for the device model + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human readable version of the + // device model rather than a machine readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") +) + +// A serverless instance. +const ( + // The name of the function being executed. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'my-function' + FaaSNameKey = attribute.Key("faas.name") + // The unique ID of the function being executed. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' + // Note: For example, in AWS Lambda this field corresponds to the + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and- + // namespaces.html) value, in GCP to the URI of the resource, and in Azure to the + // [FunctionDirectory](https://github.com/Azure/azure-functions- + // host/wiki/Retrieving-information-about-the-currently-running-function) field. + FaaSIDKey = attribute.Key("faas.id") + // The version string of the function being executed as defined in [Version + // Attributes](../../resource/semantic_conventions/README.md#version-attributes). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '2.0.0' + FaaSVersionKey = attribute.Key("faas.version") + // The execution environment ID as a string. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'my-function:instance-0001' + FaaSInstanceKey = attribute.Key("faas.instance") + // The amount of memory available to the serverless function in MiB. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 128 + // Note: It's recommended to set this attribute since e.g. too little memory can + // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, + // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this + // information. + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") +) + +// A host is defined as a general computing instance. +const ( + // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud + // provider. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-test' + HostIDKey = attribute.Key("host.id") + // Name of the host. On Unix systems, it may contain what the hostname command + // returns, or the fully qualified hostname, or another name specified by the + // user. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + // Type of host. For Cloud, this must be the machine type. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") + // The CPU architecture the host system is running on. + // + // Type: Enum + // Required: No + // Stability: stable + HostArchKey = attribute.Key("host.arch") + // Name of the VM image or OS install the host was instantiated from. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + // VM image ID. For Cloud, this value is from the provider. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + // The version string of the VM image as defined in [Version + // Attributes](README.md#version-attributes). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// A Kubernetes Cluster. +const ( + // The name of the cluster. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") +) + +// A Kubernetes Node object. +const ( + // The name of the Node. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + // The UID of the Node. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") +) + +// A Kubernetes Namespace. +const ( + // The name of the namespace that the pod is running in. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") +) + +// A Kubernetes Pod object. +const ( + // The UID of the Pod. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + // The name of the Pod. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") +) + +// A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). +const ( + // The name of the Container in a Pod template. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") +) + +// A Kubernetes ReplicaSet object. +const ( + // The UID of the ReplicaSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicasetUIDKey = attribute.Key("k8s.replicaset.uid") + // The name of the ReplicaSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + K8SReplicasetNameKey = attribute.Key("k8s.replicaset.name") +) + +// A Kubernetes Deployment object. +const ( + // The UID of the Deployment. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + // The name of the Deployment. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") +) + +// A Kubernetes StatefulSet object. +const ( + // The UID of the StatefulSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulsetUIDKey = attribute.Key("k8s.statefulset.uid") + // The name of the StatefulSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + K8SStatefulsetNameKey = attribute.Key("k8s.statefulset.name") +) + +// A Kubernetes DaemonSet object. +const ( + // The UID of the DaemonSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonsetUIDKey = attribute.Key("k8s.daemonset.uid") + // The name of the DaemonSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + K8SDaemonsetNameKey = attribute.Key("k8s.daemonset.name") +) + +// A Kubernetes Job object. +const ( + // The UID of the Job. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + // The name of the Job. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") +) + +// A Kubernetes CronJob object. +const ( + // The UID of the CronJob. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + // The name of the CronJob. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") +) + +// The operating system (OS) on which the process represented by this resource is running. +const ( + // The operating system type. + // + // Type: Enum + // Required: Always + // Stability: stable + OSTypeKey = attribute.Key("os.type") + // Human readable (not intended to be parsed) OS version information, like e.g. + // reported by `ver` or `lsb_release -a` commands. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' + OSDescriptionKey = attribute.Key("os.description") + // Human readable operating system name. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + // The version string of the operating system as defined in [Version + // Attributes](../../resource/semantic_conventions/README.md#version-attributes). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// An operating system process. +const ( + // Process identifier (PID). + // + // Type: int + // Required: No + // Stability: stable + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + // The name of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of + // `GetProcessImageFileNameW`. + // + // Type: string + // Required: See below + // Stability: stable + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + // The full path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // Required: See below + // Stability: stable + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + // The command used to launch the process (i.e. the command name). On Linux based + // systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, + // can be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // Required: See below + // Stability: stable + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + // The full command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. Do not + // set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // Required: See below + // Stability: stable + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + // All the command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited strings + // extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be + // the full argv vector passed to `main`. + // + // Type: string[] + // Required: See below + // Stability: stable + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + // The username of the user that owns the process. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") +) + +// The single (language) runtime instance which is monitored. +const ( + // The name of the runtime of this process. For compiled native binaries, this + // SHOULD be the name of the compiler. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + // The version of the runtime of this process, as returned by the runtime without + // modification. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + // An additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") +) + +// A service instance. +const ( + // Logical name of the service. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled services. If + // the value was not specified, SDKs MUST fallback to `unknown_service:` + // concatenated with [`process.executable.name`](process.md#process), e.g. + // `unknown_service:bash`. If `process.executable.name` is not available, the + // value MUST be set to `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + // A namespace for `service.name`. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group of + // services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` is + // expected to be unique for all services that have no explicit namespace defined + // (so the empty/unspecified namespace is simply one more valid namespace). Zero- + // length namespace string is assumed equal to unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + // The string ID of the service instance. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be globally + // unique). The ID helps to distinguish instances of the same service that exist + // at the same time (e.g. instances of a horizontally scaled service). It is + // preferable for the ID to be persistent and stay the same for the lifetime of + // the service instance, however it is acceptable that the ID is ephemeral and + // changes during important lifetime events for the service (e.g. service + // restarts). If the service has no inherent unique ID that can be used as the + // value of this attribute it is recommended to generate a random Version 1 or + // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + ServiceInstanceIDKey = attribute.Key("service.instance.id") + // The version string of the service API or implementation. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '2.0.0' + ServiceVersionKey = attribute.Key("service.version") +) + +// The telemetry SDK used to capture data recorded by the instrumentation libraries. +const ( + // The name of the telemetry SDK as defined above. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + // The language of the telemetry SDK. + // + // Type: Enum + // Required: No + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + // The version string of the telemetry SDK. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") + // The version string of the auto instrumentation agent, if used. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '1.2.3' + TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") +) + +// Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime. +const ( + // The name of the web engine. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + // The version of the web engine. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") + // Additional description of the web engine (e.g. detailed version and edition + // information). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/schema.go new file mode 100644 index 00000000..a78f1bf4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/schema.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.4.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.4.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/trace.go new file mode 100644 index 00000000..805eadc9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/trace.go @@ -0,0 +1,1378 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.4.0" + +import "go.opentelemetry.io/otel/attribute" + +// This document defines the attributes used to perform database client calls. +const ( + // An identifier for the database management system (DBMS) product being used. See + // below for a list of well-known identifiers. + // + // Type: Enum + // Required: Always + // Stability: stable + DBSystemKey = attribute.Key("db.system") + // The connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + DBConnectionStringKey = attribute.Key("db.connection_string") + // Username for accessing the database. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'readonly_user', 'reporting_user' + DBUserKey = attribute.Key("db.user") + // The fully-qualified class name of the [Java Database Connectivity + // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver + // used to connect. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") + // If no [tech-specific attribute](#call-level-attributes-for-specific- + // technologies) is defined, this attribute is used to report the name of the + // database being accessed. For commands that switch the database, this should be + // set to the target database (even if the command fails). + // + // Type: string + // Required: Required, if applicable and no more-specific attribute is defined. + // Stability: stable + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called "schema + // name". + DBNameKey = attribute.Key("db.name") + // The database statement being executed. + // + // Type: string + // Required: Required if applicable and not explicitly disabled via + // instrumentation configuration. + // Stability: stable + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + // Note: The value may be sanitized to exclude sensitive information. + DBStatementKey = attribute.Key("db.statement") + // The name of the operation being executed, e.g. the [MongoDB command + // name](https://docs.mongodb.com/manual/reference/command/#database-operations) + // such as `findAndModify`, or the SQL keyword. + // + // Type: string + // Required: Required, if `db.statement` is not applicable. + // Stability: stable + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to attempt any + // client-side parsing of `db.statement` just to get this property, but it should + // be set if the operation name is provided by the library being instrumented. If + // the SQL statement has an ambiguous operation, or performs more than one + // operation, this value may be omitted. + DBOperationKey = attribute.Key("db.operation") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") +) + +// Connection-level attributes for Microsoft SQL Server +const ( + // The Microsoft SQL Server [instance name](https://docs.microsoft.com/en- + // us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) + // connecting to. This name is used to determine the port of a named instance. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'MSSQLSERVER' + // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no longer + // required (but still recommended if non-standard). + DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") +) + +// Call-level attributes for Cassandra +const ( + // The name of the keyspace being accessed. To be used instead of the generic + // `db.name` attribute. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'mykeyspace' + DBCassandraKeyspaceKey = attribute.Key("db.cassandra.keyspace") + // The fetch size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + // The consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra- + // oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // Required: No + // Stability: stable + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + // The name of the primary table that the operation is acting upon, including the + // schema name (if applicable). + // + // Type: string + // Required: Recommended if available. + // Stability: stable + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra rather + // than sql. It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting upon an + // anonymous table, or more than one table, this value MUST NOT be set. + DBCassandraTableKey = attribute.Key("db.cassandra.table") + // Whether or not the query is idempotent. + // + // Type: boolean + // Required: No + // Stability: stable + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + // The number of times a query was speculatively executed. Not set or `0` if the + // query was not executed speculatively. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + // The ID of the coordinating node for a query. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + // The data center of the coordinating node for a query. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// Call-level attributes for Apache HBase +const ( + // The [HBase namespace](https://hbase.apache.org/book.html#_namespace) being + // accessed. To be used instead of the generic `db.name` attribute. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'default' + DBHBaseNamespaceKey = attribute.Key("db.hbase.namespace") +) + +// Call-level attributes for Redis +const ( + // The index of the database being accessed as used in the [`SELECT` + // command](https://redis.io/commands/select), provided as an integer. To be used + // instead of the generic `db.name` attribute. + // + // Type: int + // Required: Required, if other than the default database (`0`). + // Stability: stable + // Examples: 0, 1, 15 + DBRedisDBIndexKey = attribute.Key("db.redis.database_index") +) + +// Call-level attributes for MongoDB +const ( + // The collection being accessed within the database stated in `db.name`. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'customers', 'products' + DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") +) + +// Call-level attrbiutes for SQL databases +const ( + // The name of the primary table that the operation is acting upon, including the + // schema name (if applicable). + // + // Type: string + // Required: Recommended if available. + // Stability: stable + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting upon an + // anonymous table, or more than one table, this value MUST NOT be set. + DBSQLTableKey = attribute.Key("db.sql.table") +) + +// This document defines the attributes used to report a single exception associated with a span. +const ( + // The type of the exception (its fully-qualified class name, if applicable). The + // dynamic type of the exception should be preferred over the static type in + // languages that support it. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") + // The exception message. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + // A stacktrace as a string in the natural representation for the language + // runtime. The representation is to be determined and documented by each language + // SIG. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + // SHOULD be set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // Required: No + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of a span, + // if that span is ended while the exception is still logically "in flight". + // This may be actually "in flight" in some languages (e.g. if the exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most languages. + + // It is usually not possible to determine at the point where an exception is + // thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending the span, + // as done in the [example above](#exception-end-example). + + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") +) + +// This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans. +const ( + // Type of the trigger on which the function is executed. + // + // Type: Enum + // Required: On FaaS instances, faas.trigger MUST be set on incoming invocations. + // Clients invoking FaaS instances MUST set `faas.trigger` on outgoing + // invocations, if it is known to the client. This is, for example, not the case, + // when the transport layer is abstracted in a FaaS client framework without + // access to its configuration. + // Stability: stable + FaaSTriggerKey = attribute.Key("faas.trigger") + // The execution ID of the current function execution. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSExecutionKey = attribute.Key("faas.execution") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write. +const ( + // The name of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos + // DB to the database name. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + // Describes the type of the operation that was performed on the data. + // + // Type: Enum + // Required: Always + // Stability: stable + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + // A string containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed + // in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // Required: Always + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + // The document name/table subjected to the operation. For example, in Cloud + // Storage or S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // A string containing the function invocation time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed + // in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // Required: Always + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + // A string containing the schedule period as [Cron Expression](https://docs.oracl + // e.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") +) + +// Contains additional attributes for incoming FaaS spans. +const ( + // A boolean that is true if the serverless function is executed for the first + // time (aka cold-start). + // + // Type: boolean + // Required: No + // Stability: stable + FaaSColdstartKey = attribute.Key("faas.coldstart") +) + +// Contains additional attributes for outgoing FaaS spans. +const ( + // The name of the invoked function. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked + // function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + // The cloud provider of the invoked function. + // + // Type: Enum + // Required: Always + // Stability: stable + // Examples: 'aws' + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked + // function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + // The cloud region of the invoked function. + // + // Type: string + // Required: For some cloud providers, like AWS or GCP, the region in which a + // function is hosted is essential to uniquely identify the function and also part + // of its endpoint. Since it's part of the endpoint being called, the region is + // always known to clients. In these cases, `faas.invoked_region` MUST be set + // accordingly. If the region is unknown to the client or not required for + // identifying the invoked function, setting `faas.invoked_region` is optional. + // Stability: stable + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked + // function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") +) + +var ( + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") +) + +// These attributes may be used for any network related operation. +const ( + // Transport protocol used. See note below. + // + // Type: Enum + // Required: No + // Stability: stable + // Examples: 'ip_tcp' + NetTransportKey = attribute.Key("net.transport") + // Remote address of the peer (dotted decimal for IPv4 or + // [RFC5952](https://tools.ietf.org/html/rfc5952) for IPv6) + // + // Type: string + // Required: No + // Stability: stable + // Examples: '127.0.0.1' + NetPeerIPKey = attribute.Key("net.peer.ip") + // Remote port number. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 80, 8080, 443 + NetPeerPortKey = attribute.Key("net.peer.port") + // Remote hostname or similar, see note below. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'example.com' + NetPeerNameKey = attribute.Key("net.peer.name") + // Like `net.peer.ip` but for the host IP. Useful in case of a multi-IP host. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '192.168.0.1' + NetHostIPKey = attribute.Key("net.host.ip") + // Like `net.peer.port` but for the host port. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 35555 + NetHostPortKey = attribute.Key("net.host.port") + // Local hostname or similar, see note below. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'localhost' + NetHostNameKey = attribute.Key("net.host.name") +) + +var ( + // ip_tcp + NetTransportTCP = NetTransportKey.String("ip_tcp") + // ip_udp + NetTransportUDP = NetTransportKey.String("ip_udp") + // Another IP-based protocol + NetTransportIP = NetTransportKey.String("ip") + // Unix Domain socket. See below + NetTransportUnix = NetTransportKey.String("unix") + // Named or anonymous pipe. See note below + NetTransportPipe = NetTransportKey.String("pipe") + // In-process communication + NetTransportInProc = NetTransportKey.String("inproc") + // Something else (non IP-based) + NetTransportOther = NetTransportKey.String("other") +) + +// Operations that access some remote service. +const ( + // The [`service.name`](../../resource/semantic_conventions/README.md#service) of + // the remote service. SHOULD be equal to the actual `service.name` resource + // attribute of the remote service if any. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// These attributes may be used for any operation with an authenticated and/or authorized enduser. +const ( + // Username or client_id extracted from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the + // inbound request from outside the system. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + // Actual/assumed role the client is making the request under extracted from token + // or application security context. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + // Scopes or granted authorities the client currently possesses extracted from + // token or application security context. The value would come from the scope + // associated with an [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value + // in a [SAML 2.0 Assertion](http://docs.oasis- + // open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// These attributes may be used for any operation to store information about a thread that started a span. +const ( + // Current "managed" thread ID (as opposed to OS thread ID). + // + // Type: int + // Required: No + // Stability: stable + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + // Current thread name. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// These attributes allow to report this unit of code and therefore to provide more context about the span. +const ( + // The method or function name, or equivalent (usually rightmost part of the code + // unit's name). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + // The "namespace" within which `code.function` is defined. Usually the qualified + // class or module name, such that `code.namespace` + some separator + + // `code.function` form a unique identifier for the code unit. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + // The source code file name that identifies the code unit as uniquely as possible + // (preferably an absolute file path). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + // The line number in `code.filepath` best representing the operation. It SHOULD + // point within the code unit named in `code.function`. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") +) + +// This document defines semantic conventions for HTTP client and server Spans. +const ( + // HTTP request method. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + HTTPMethodKey = attribute.Key("http.method") + // Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`. + // Usually the fragment is not transmitted over HTTP, but if it is known, it + // should be included nevertheless. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + // Note: `http.url` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case the attribute's + // value should be `https://www.example.com/`. + HTTPURLKey = attribute.Key("http.url") + // The full request target as passed in a HTTP request line or equivalent. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '/path/12314/?q=ddds#123' + HTTPTargetKey = attribute.Key("http.target") + // The value of the [HTTP host + // header](https://tools.ietf.org/html/rfc7230#section-5.4). When the header is + // empty or not present, this attribute should be the same. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'www.example.org' + HTTPHostKey = attribute.Key("http.host") + // The URI scheme identifying the used protocol. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'http', 'https' + HTTPSchemeKey = attribute.Key("http.scheme") + // [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // Required: If and only if one was received/sent. + // Stability: stable + // Examples: 200 + HTTPStatusCodeKey = attribute.Key("http.status_code") + // Kind of HTTP protocol used. + // + // Type: Enum + // Required: No + // Stability: stable + // Examples: '1.0' + // Note: If `net.transport` is not specified, it can be assumed to be `IP.TCP` + // except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed. + HTTPFlavorKey = attribute.Key("http.flavor") + // Value of the [HTTP User- + // Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3) header sent by the + // client. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' + HTTPUserAgentKey = attribute.Key("http.user_agent") + // The size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For + // requests using transport encoding, this should be the compressed size. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 3495 + HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") + // The size of the uncompressed request payload body after transport decoding. Not + // set if transport encoding not used. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 5493 + HTTPRequestContentLengthUncompressedKey = attribute.Key("http.request_content_length_uncompressed") + // The size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For + // requests using transport encoding, this should be the compressed size. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 3495 + HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") + // The size of the uncompressed response payload body after transport decoding. + // Not set if transport encoding not used. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 5493 + HTTPResponseContentLengthUncompressedKey = attribute.Key("http.response_content_length_uncompressed") +) + +var ( + // HTTP 1.0 + HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") + // HTTP 1.1 + HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") + // HTTP 2 + HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") + // SPDY protocol + HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") + // QUIC protocol + HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") +) + +// Semantic Convention for HTTP Server +const ( + // The primary server name of the matched virtual host. This should be obtained + // via configuration. If no such configuration can be obtained, this attribute + // MUST NOT be set ( `net.host.name` should be used instead). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'example.com' + // Note: `http.url` is usually not readily available on the server side but would + // have to be assembled in a cumbersome and sometimes lossy process from other + // information (see e.g. open-telemetry/opentelemetry-python/pull/148). It is thus + // preferred to supply the raw data that is available. + HTTPServerNameKey = attribute.Key("http.server_name") + // The matched route (path template). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '/users/:userID?' + HTTPRouteKey = attribute.Key("http.route") + // The IP address of the original client behind all proxies, if known (e.g. from + // [X-Forwarded-For](https://developer.mozilla.org/en- + // US/docs/Web/HTTP/Headers/X-Forwarded-For)). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '83.164.160.102' + // Note: This is not necessarily the same as `net.peer.ip`, which would identify + // the network-level peer, which may be a proxy. + HTTPClientIPKey = attribute.Key("http.client_ip") +) + +// Attributes that exist for multiple DynamoDB request types. +const ( + // The keys in the `RequestItems` object field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + // The JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { + // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, + // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": + // "string", "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + // The JSON-serialized value of the `ItemCollectionMetrics` response field. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, + // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : + // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": + // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + // The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. + // + // Type: double + // Required: No + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + // The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // Required: No + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + // The value of the `ConsistentRead` request parameter. + // + // Type: boolean + // Required: No + // Stability: stable + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + // The value of the `ProjectionExpression` request parameter. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, + // ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + // The value of the `Limit` request parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + // The value of the `AttributesToGet` request parameter. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + // The value of the `IndexName` request parameter. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + // The value of the `Select` request parameter. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") +) + +// DynamoDB.CreateTable +const ( + // The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request + // field + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": + // number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + // The JSON-serialized value of each item of the `LocalSecondaryIndexes` request + // field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": + // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") +) + +// DynamoDB.ListTables +const ( + // The value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + // The the number of items in the `TableNames` response parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") +) + +// DynamoDB.Query +const ( + // The value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // Required: No + // Stability: stable + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") +) + +// DynamoDB.Scan +const ( + // The value of the `Segment` request parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + // The value of the `TotalSegments` request parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + // The value of the `Count` response parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + // The value of the `ScannedCount` response parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") +) + +// DynamoDB.UpdateTable +const ( + // The JSON-serialized value of each item in the `AttributeDefinitions` request + // field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + // The JSON-serialized value of each item in the the `GlobalSecondaryIndexUpdates` + // request field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") +) + +// This document defines the attributes used in messaging systems. +const ( + // A string identifying the messaging system. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'kafka', 'rabbitmq', 'activemq', 'AmazonSQS' + MessagingSystemKey = attribute.Key("messaging.system") + // The message destination name. This might be equal to the span name but is + // required nevertheless. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + MessagingDestinationKey = attribute.Key("messaging.destination") + // The kind of message destination + // + // Type: Enum + // Required: Required only if the message destination is either a `queue` or + // `topic`. + // Stability: stable + MessagingDestinationKindKey = attribute.Key("messaging.destination_kind") + // A boolean that is true if the message destination is temporary. + // + // Type: boolean + // Required: If missing, it is assumed to be false. + // Stability: stable + MessagingTempDestinationKey = attribute.Key("messaging.temp_destination") + // The name of the transport protocol. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'AMQP', 'MQTT' + MessagingProtocolKey = attribute.Key("messaging.protocol") + // The version of the transport protocol. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '0.9.1' + MessagingProtocolVersionKey = attribute.Key("messaging.protocol_version") + // Connection string. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'tibjmsnaming://localhost:7222', + // 'https://queue.amazonaws.com/80398EXAMPLE/MyQueue' + MessagingURLKey = attribute.Key("messaging.url") + // A value used by the messaging system as an identifier for the message, + // represented as a string. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message_id") + // The [conversation ID](#conversations) identifying the conversation to which the + // message belongs, represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'MyConversationID' + MessagingConversationIDKey = attribute.Key("messaging.conversation_id") + // The (uncompressed) size of the message payload in bytes. Also use this + // attribute if it is unknown whether the compressed or uncompressed payload size + // is reported. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 2738 + MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message_payload_size_bytes") + // The compressed size of the message payload in bytes. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 2048 + MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message_payload_compressed_size_bytes") +) + +var ( + // A message sent to a queue + MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") + // A message sent to a topic + MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") +) + +// Semantic convention for a consumer of messages received from a messaging system +const ( + // A string identifying the kind of message consumption as defined in the + // [Operation names](#operation-names) section above. If the operation is "send", + // this attribute MUST NOT be set, since the operation can be inferred from the + // span kind in that case. + // + // Type: Enum + // Required: No + // Stability: stable + MessagingOperationKey = attribute.Key("messaging.operation") +) + +var ( + // receive + MessagingOperationReceive = MessagingOperationKey.String("receive") + // process + MessagingOperationProcess = MessagingOperationKey.String("process") +) + +// Attributes for RabbitMQ +const ( + // RabbitMQ message routing key. + // + // Type: string + // Required: Unless it is empty. + // Stability: stable + // Examples: 'myKey' + MessagingRabbitmqRoutingKeyKey = attribute.Key("messaging.rabbitmq.routing_key") +) + +// Attributes for Apache Kafka +const ( + // Message keys in Kafka are used for grouping alike messages to ensure they're + // processed on the same partition. They differ from `messaging.message_id` in + // that they're not unique. If the key is `null`, the attribute MUST NOT be set. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to be + // supplied for the attribute. If the key has no unambiguous, canonical string + // form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message_key") + // Name of the Kafka Consumer Group that is handling the message. Only applies to + // consumers, not producers. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer_group") + // Client ID for the Consumer or Producer that is handling the message. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'client-5' + MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") + // Partition the message is sent to. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 2 + MessagingKafkaPartitionKey = attribute.Key("messaging.kafka.partition") + // A boolean that is true if the message is a tombstone. + // + // Type: boolean + // Required: If missing, it is assumed to be false. + // Stability: stable + MessagingKafkaTombstoneKey = attribute.Key("messaging.kafka.tombstone") +) + +// This document defines semantic conventions for remote procedure calls. +const ( + // A string identifying the remoting system. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'grpc', 'java_rmi', 'wcf' + RPCSystemKey = attribute.Key("rpc.system") + // The full name of the service being called, including its package name, if + // applicable. + // + // Type: string + // Required: No, but recommended + // Stability: stable + // Examples: 'myservice.EchoService' + RPCServiceKey = attribute.Key("rpc.service") + // The name of the method being called, must be equal to the $method part in the + // span name. + // + // Type: string + // Required: No, but recommended + // Stability: stable + // Examples: 'exampleMethod' + RPCMethodKey = attribute.Key("rpc.method") +) + +// Tech-specific attributes for gRPC. +const ( + // The [numeric status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC + // request. + // + // Type: Enum + // Required: Always + // Stability: stable + // Examples: 0, 1, 16 + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). +const ( + // Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC + // 1.0 does not specify this, the value can be omitted. + // + // Type: string + // Required: If missing, it is assumed to be "1.0". + // Stability: stable + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + // `method` property from request. Unlike `rpc.method`, this may not relate to the + // actual method being called. Useful for client-side traces since client does not + // know what will be called on the server. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'users.create', 'get_users' + RPCJsonrpcMethodKey = attribute.Key("rpc.jsonrpc.method") + // `id` property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be cast to + // string for simplicity. Use empty string in case of `null` value. Omit entirely + // if this is a notification. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + // `error.code` property of response if it is an error response. + // + // Type: int + // Required: If missing, response is assumed to be successful. + // Stability: stable + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + // `error.message` property of response if it is an error response. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") +) diff --git a/vendor/go.opentelemetry.io/otel/trace.go b/vendor/go.opentelemetry.io/otel/trace.go new file mode 100644 index 00000000..caf7249d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/trace" +) + +// Tracer creates a named tracer that implements Tracer interface. +// If the name is an empty string then provider uses default name. +// +// This is short for GetTracerProvider().Tracer(name, opts...) +func Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + return GetTracerProvider().Tracer(name, opts...) +} + +// GetTracerProvider returns the registered global trace provider. +// If none is registered then an instance of NoopTracerProvider is returned. +// +// Use the trace provider to create a named tracer. E.g. +// +// tracer := otel.GetTracerProvider().Tracer("example.com/foo") +// +// or +// +// tracer := otel.Tracer("example.com/foo") +func GetTracerProvider() trace.TracerProvider { + return global.TracerProvider() +} + +// SetTracerProvider registers `tp` as the global trace provider. +func SetTracerProvider(tp trace.TracerProvider) { + global.SetTracerProvider(tp) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/LICENSE b/vendor/go.opentelemetry.io/otel/trace/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go new file mode 100644 index 00000000..cb3efbb9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -0,0 +1,333 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// TracerConfig is a group of options for a Tracer. +type TracerConfig struct { + instrumentationVersion string + // Schema URL of the telemetry emitted by the Tracer. + schemaURL string + attrs attribute.Set +} + +// InstrumentationVersion returns the version of the library providing instrumentation. +func (t *TracerConfig) InstrumentationVersion() string { + return t.instrumentationVersion +} + +// InstrumentationAttributes returns the attributes associated with the library +// providing instrumentation. +func (t *TracerConfig) InstrumentationAttributes() attribute.Set { + return t.attrs +} + +// SchemaURL returns the Schema URL of the telemetry emitted by the Tracer. +func (t *TracerConfig) SchemaURL() string { + return t.schemaURL +} + +// NewTracerConfig applies all the options to a returned TracerConfig. +func NewTracerConfig(options ...TracerOption) TracerConfig { + var config TracerConfig + for _, option := range options { + config = option.apply(config) + } + return config +} + +// TracerOption applies an option to a TracerConfig. +type TracerOption interface { + apply(TracerConfig) TracerConfig +} + +type tracerOptionFunc func(TracerConfig) TracerConfig + +func (fn tracerOptionFunc) apply(cfg TracerConfig) TracerConfig { + return fn(cfg) +} + +// SpanConfig is a group of options for a Span. +type SpanConfig struct { + attributes []attribute.KeyValue + timestamp time.Time + links []Link + newRoot bool + spanKind SpanKind + stackTrace bool +} + +// Attributes describe the associated qualities of a Span. +func (cfg *SpanConfig) Attributes() []attribute.KeyValue { + return cfg.attributes +} + +// Timestamp is a time in a Span life-cycle. +func (cfg *SpanConfig) Timestamp() time.Time { + return cfg.timestamp +} + +// StackTrace checks whether stack trace capturing is enabled. +func (cfg *SpanConfig) StackTrace() bool { + return cfg.stackTrace +} + +// Links are the associations a Span has with other Spans. +func (cfg *SpanConfig) Links() []Link { + return cfg.links +} + +// NewRoot identifies a Span as the root Span for a new trace. This is +// commonly used when an existing trace crosses trust boundaries and the +// remote parent span context should be ignored for security. +func (cfg *SpanConfig) NewRoot() bool { + return cfg.newRoot +} + +// SpanKind is the role a Span has in a trace. +func (cfg *SpanConfig) SpanKind() SpanKind { + return cfg.spanKind +} + +// NewSpanStartConfig applies all the options to a returned SpanConfig. +// No validation is performed on the returned SpanConfig (e.g. no uniqueness +// checking or bounding of data), it is left to the SDK to perform this +// action. +func NewSpanStartConfig(options ...SpanStartOption) SpanConfig { + var c SpanConfig + for _, option := range options { + c = option.applySpanStart(c) + } + return c +} + +// NewSpanEndConfig applies all the options to a returned SpanConfig. +// No validation is performed on the returned SpanConfig (e.g. no uniqueness +// checking or bounding of data), it is left to the SDK to perform this +// action. +func NewSpanEndConfig(options ...SpanEndOption) SpanConfig { + var c SpanConfig + for _, option := range options { + c = option.applySpanEnd(c) + } + return c +} + +// SpanStartOption applies an option to a SpanConfig. These options are applicable +// only when the span is created. +type SpanStartOption interface { + applySpanStart(SpanConfig) SpanConfig +} + +type spanOptionFunc func(SpanConfig) SpanConfig + +func (fn spanOptionFunc) applySpanStart(cfg SpanConfig) SpanConfig { + return fn(cfg) +} + +// SpanEndOption applies an option to a SpanConfig. These options are +// applicable only when the span is ended. +type SpanEndOption interface { + applySpanEnd(SpanConfig) SpanConfig +} + +// EventConfig is a group of options for an Event. +type EventConfig struct { + attributes []attribute.KeyValue + timestamp time.Time + stackTrace bool +} + +// Attributes describe the associated qualities of an Event. +func (cfg *EventConfig) Attributes() []attribute.KeyValue { + return cfg.attributes +} + +// Timestamp is a time in an Event life-cycle. +func (cfg *EventConfig) Timestamp() time.Time { + return cfg.timestamp +} + +// StackTrace checks whether stack trace capturing is enabled. +func (cfg *EventConfig) StackTrace() bool { + return cfg.stackTrace +} + +// NewEventConfig applies all the EventOptions to a returned EventConfig. If no +// timestamp option is passed, the returned EventConfig will have a Timestamp +// set to the call time, otherwise no validation is performed on the returned +// EventConfig. +func NewEventConfig(options ...EventOption) EventConfig { + var c EventConfig + for _, option := range options { + c = option.applyEvent(c) + } + if c.timestamp.IsZero() { + c.timestamp = time.Now() + } + return c +} + +// EventOption applies span event options to an EventConfig. +type EventOption interface { + applyEvent(EventConfig) EventConfig +} + +// SpanOption are options that can be used at both the beginning and end of a span. +type SpanOption interface { + SpanStartOption + SpanEndOption +} + +// SpanStartEventOption are options that can be used at the start of a span, or with an event. +type SpanStartEventOption interface { + SpanStartOption + EventOption +} + +// SpanEndEventOption are options that can be used at the end of a span, or with an event. +type SpanEndEventOption interface { + SpanEndOption + EventOption +} + +type attributeOption []attribute.KeyValue + +func (o attributeOption) applySpan(c SpanConfig) SpanConfig { + c.attributes = append(c.attributes, []attribute.KeyValue(o)...) + return c +} +func (o attributeOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) } +func (o attributeOption) applyEvent(c EventConfig) EventConfig { + c.attributes = append(c.attributes, []attribute.KeyValue(o)...) + return c +} + +var _ SpanStartEventOption = attributeOption{} + +// WithAttributes adds the attributes related to a span life-cycle event. +// These attributes are used to describe the work a Span represents when this +// option is provided to a Span's start or end events. Otherwise, these +// attributes provide additional information about the event being recorded +// (e.g. error, state change, processing progress, system event). +// +// If multiple of these options are passed the attributes of each successive +// option will extend the attributes instead of overwriting. There is no +// guarantee of uniqueness in the resulting attributes. +func WithAttributes(attributes ...attribute.KeyValue) SpanStartEventOption { + return attributeOption(attributes) +} + +// SpanEventOption are options that can be used with an event or a span. +type SpanEventOption interface { + SpanOption + EventOption +} + +type timestampOption time.Time + +func (o timestampOption) applySpan(c SpanConfig) SpanConfig { + c.timestamp = time.Time(o) + return c +} +func (o timestampOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) } +func (o timestampOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) } +func (o timestampOption) applyEvent(c EventConfig) EventConfig { + c.timestamp = time.Time(o) + return c +} + +var _ SpanEventOption = timestampOption{} + +// WithTimestamp sets the time of a Span or Event life-cycle moment (e.g. +// started, stopped, errored). +func WithTimestamp(t time.Time) SpanEventOption { + return timestampOption(t) +} + +type stackTraceOption bool + +func (o stackTraceOption) applyEvent(c EventConfig) EventConfig { + c.stackTrace = bool(o) + return c +} +func (o stackTraceOption) applySpan(c SpanConfig) SpanConfig { + c.stackTrace = bool(o) + return c +} +func (o stackTraceOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) } + +// WithStackTrace sets the flag to capture the error with stack trace (e.g. true, false). +func WithStackTrace(b bool) SpanEndEventOption { + return stackTraceOption(b) +} + +// WithLinks adds links to a Span. The links are added to the existing Span +// links, i.e. this does not overwrite. Links with invalid span context are ignored. +func WithLinks(links ...Link) SpanStartOption { + return spanOptionFunc(func(cfg SpanConfig) SpanConfig { + cfg.links = append(cfg.links, links...) + return cfg + }) +} + +// WithNewRoot specifies that the Span should be treated as a root Span. Any +// existing parent span context will be ignored when defining the Span's trace +// identifiers. +func WithNewRoot() SpanStartOption { + return spanOptionFunc(func(cfg SpanConfig) SpanConfig { + cfg.newRoot = true + return cfg + }) +} + +// WithSpanKind sets the SpanKind of a Span. +func WithSpanKind(kind SpanKind) SpanStartOption { + return spanOptionFunc(func(cfg SpanConfig) SpanConfig { + cfg.spanKind = kind + return cfg + }) +} + +// WithInstrumentationVersion sets the instrumentation version. +func WithInstrumentationVersion(version string) TracerOption { + return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { + cfg.instrumentationVersion = version + return cfg + }) +} + +// WithInstrumentationAttributes sets the instrumentation attributes. +// +// The passed attributes will be de-duplicated. +func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption { + return tracerOptionFunc(func(config TracerConfig) TracerConfig { + config.attrs = attribute.NewSet(attr...) + return config + }) +} + +// WithSchemaURL sets the schema URL for the Tracer. +func WithSchemaURL(schemaURL string) TracerOption { + return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { + cfg.schemaURL = schemaURL + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/context.go b/vendor/go.opentelemetry.io/otel/trace/context.go new file mode 100644 index 00000000..76f9a083 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/context.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +import "context" + +type traceContextKeyType int + +const currentSpanKey traceContextKeyType = iota + +// ContextWithSpan returns a copy of parent with span set as the current Span. +func ContextWithSpan(parent context.Context, span Span) context.Context { + return context.WithValue(parent, currentSpanKey, span) +} + +// ContextWithSpanContext returns a copy of parent with sc as the current +// Span. The Span implementation that wraps sc is non-recording and performs +// no operations other than to return sc as the SpanContext from the +// SpanContext method. +func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Context { + return ContextWithSpan(parent, nonRecordingSpan{sc: sc}) +} + +// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicly +// as a remote SpanContext and as the current Span. The Span implementation +// that wraps rsc is non-recording and performs no operations other than to +// return rsc as the SpanContext from the SpanContext method. +func ContextWithRemoteSpanContext(parent context.Context, rsc SpanContext) context.Context { + return ContextWithSpanContext(parent, rsc.WithRemote(true)) +} + +// SpanFromContext returns the current Span from ctx. +// +// If no Span is currently set in ctx an implementation of a Span that +// performs no operations is returned. +func SpanFromContext(ctx context.Context) Span { + if ctx == nil { + return noopSpan{} + } + if span, ok := ctx.Value(currentSpanKey).(Span); ok { + return span + } + return noopSpan{} +} + +// SpanContextFromContext returns the current Span's SpanContext. +func SpanContextFromContext(ctx context.Context) SpanContext { + return SpanFromContext(ctx).SpanContext() +} diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go new file mode 100644 index 00000000..ab0346f9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/doc.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package trace provides an implementation of the tracing part of the +OpenTelemetry API. + +To participate in distributed traces a Span needs to be created for the +operation being performed as part of a traced workflow. In its simplest form: + + var tracer trace.Tracer + + func init() { + tracer = otel.Tracer("instrumentation/package/name") + } + + func operation(ctx context.Context) { + var span trace.Span + ctx, span = tracer.Start(ctx, "operation") + defer span.End() + // ... + } + +A Tracer is unique to the instrumentation and is used to create Spans. +Instrumentation should be designed to accept a TracerProvider from which it +can create its own unique Tracer. Alternatively, the registered global +TracerProvider from the go.opentelemetry.io/otel package can be used as +a default. + + const ( + name = "instrumentation/package/name" + version = "0.1.0" + ) + + type Instrumentation struct { + tracer trace.Tracer + } + + func NewInstrumentation(tp trace.TracerProvider) *Instrumentation { + if tp == nil { + tp = otel.TracerProvider() + } + return &Instrumentation{ + tracer: tp.Tracer(name, trace.WithInstrumentationVersion(version)), + } + } + + func operation(ctx context.Context, inst *Instrumentation) { + var span trace.Span + ctx, span = inst.tracer.Start(ctx, "operation") + defer span.End() + // ... + } +*/ +package trace // import "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go new file mode 100644 index 00000000..88fcb816 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go @@ -0,0 +1,27 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +// nonRecordingSpan is a minimal implementation of a Span that wraps a +// SpanContext. It performs no operations other than to return the wrapped +// SpanContext. +type nonRecordingSpan struct { + noopSpan + + sc SpanContext +} + +// SpanContext returns the wrapped SpanContext. +func (s nonRecordingSpan) SpanContext() SpanContext { return s.sc } diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go new file mode 100644 index 00000000..73950f20 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -0,0 +1,89 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" +) + +// NewNoopTracerProvider returns an implementation of TracerProvider that +// performs no operations. The Tracer and Spans created from the returned +// TracerProvider also perform no operations. +func NewNoopTracerProvider() TracerProvider { + return noopTracerProvider{} +} + +type noopTracerProvider struct{} + +var _ TracerProvider = noopTracerProvider{} + +// Tracer returns noop implementation of Tracer. +func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer { + return noopTracer{} +} + +// noopTracer is an implementation of Tracer that preforms no operations. +type noopTracer struct{} + +var _ Tracer = noopTracer{} + +// Start carries forward a non-recording Span, if one is present in the context, otherwise it +// creates a no-op Span. +func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption) (context.Context, Span) { + span := SpanFromContext(ctx) + if _, ok := span.(nonRecordingSpan); !ok { + // span is likely already a noopSpan, but let's be sure + span = noopSpan{} + } + return ContextWithSpan(ctx, span), span +} + +// noopSpan is an implementation of Span that preforms no operations. +type noopSpan struct{} + +var _ Span = noopSpan{} + +// SpanContext returns an empty span context. +func (noopSpan) SpanContext() SpanContext { return SpanContext{} } + +// IsRecording always returns false. +func (noopSpan) IsRecording() bool { return false } + +// SetStatus does nothing. +func (noopSpan) SetStatus(codes.Code, string) {} + +// SetError does nothing. +func (noopSpan) SetError(bool) {} + +// SetAttributes does nothing. +func (noopSpan) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (noopSpan) End(...SpanEndOption) {} + +// RecordError does nothing. +func (noopSpan) RecordError(error, ...EventOption) {} + +// AddEvent does nothing. +func (noopSpan) AddEvent(string, ...EventOption) {} + +// SetName does nothing. +func (noopSpan) SetName(string) {} + +// TracerProvider returns a no-op TracerProvider. +func (noopSpan) TracerProvider() TracerProvider { return noopTracerProvider{} } diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go new file mode 100644 index 00000000..4aa94f79 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -0,0 +1,551 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "bytes" + "context" + "encoding/hex" + "encoding/json" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" +) + +const ( + // FlagsSampled is a bitmask with the sampled bit set. A SpanContext + // with the sampling bit set means the span is sampled. + FlagsSampled = TraceFlags(0x01) + + errInvalidHexID errorConst = "trace-id and span-id can only contain [0-9a-f] characters, all lowercase" + + errInvalidTraceIDLength errorConst = "hex encoded trace-id must have length equals to 32" + errNilTraceID errorConst = "trace-id can't be all zero" + + errInvalidSpanIDLength errorConst = "hex encoded span-id must have length equals to 16" + errNilSpanID errorConst = "span-id can't be all zero" +) + +type errorConst string + +func (e errorConst) Error() string { + return string(e) +} + +// TraceID is a unique identity of a trace. +// nolint:revive // revive complains about stutter of `trace.TraceID`. +type TraceID [16]byte + +var nilTraceID TraceID +var _ json.Marshaler = nilTraceID + +// IsValid checks whether the trace TraceID is valid. A valid trace ID does +// not consist of zeros only. +func (t TraceID) IsValid() bool { + return !bytes.Equal(t[:], nilTraceID[:]) +} + +// MarshalJSON implements a custom marshal function to encode TraceID +// as a hex string. +func (t TraceID) MarshalJSON() ([]byte, error) { + return json.Marshal(t.String()) +} + +// String returns the hex string representation form of a TraceID. +func (t TraceID) String() string { + return hex.EncodeToString(t[:]) +} + +// SpanID is a unique identity of a span in a trace. +type SpanID [8]byte + +var nilSpanID SpanID +var _ json.Marshaler = nilSpanID + +// IsValid checks whether the SpanID is valid. A valid SpanID does not consist +// of zeros only. +func (s SpanID) IsValid() bool { + return !bytes.Equal(s[:], nilSpanID[:]) +} + +// MarshalJSON implements a custom marshal function to encode SpanID +// as a hex string. +func (s SpanID) MarshalJSON() ([]byte, error) { + return json.Marshal(s.String()) +} + +// String returns the hex string representation form of a SpanID. +func (s SpanID) String() string { + return hex.EncodeToString(s[:]) +} + +// TraceIDFromHex returns a TraceID from a hex string if it is compliant with +// the W3C trace-context specification. See more at +// https://www.w3.org/TR/trace-context/#trace-id +// nolint:revive // revive complains about stutter of `trace.TraceIDFromHex`. +func TraceIDFromHex(h string) (TraceID, error) { + t := TraceID{} + if len(h) != 32 { + return t, errInvalidTraceIDLength + } + + if err := decodeHex(h, t[:]); err != nil { + return t, err + } + + if !t.IsValid() { + return t, errNilTraceID + } + return t, nil +} + +// SpanIDFromHex returns a SpanID from a hex string if it is compliant +// with the w3c trace-context specification. +// See more at https://www.w3.org/TR/trace-context/#parent-id +func SpanIDFromHex(h string) (SpanID, error) { + s := SpanID{} + if len(h) != 16 { + return s, errInvalidSpanIDLength + } + + if err := decodeHex(h, s[:]); err != nil { + return s, err + } + + if !s.IsValid() { + return s, errNilSpanID + } + return s, nil +} + +func decodeHex(h string, b []byte) error { + for _, r := range h { + switch { + case 'a' <= r && r <= 'f': + continue + case '0' <= r && r <= '9': + continue + default: + return errInvalidHexID + } + } + + decoded, err := hex.DecodeString(h) + if err != nil { + return err + } + + copy(b, decoded) + return nil +} + +// TraceFlags contains flags that can be set on a SpanContext. +type TraceFlags byte //nolint:revive // revive complains about stutter of `trace.TraceFlags`. + +// IsSampled returns if the sampling bit is set in the TraceFlags. +func (tf TraceFlags) IsSampled() bool { + return tf&FlagsSampled == FlagsSampled +} + +// WithSampled sets the sampling bit in a new copy of the TraceFlags. +func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive // sampled is not a control flag. + if sampled { + return tf | FlagsSampled + } + + return tf &^ FlagsSampled +} + +// MarshalJSON implements a custom marshal function to encode TraceFlags +// as a hex string. +func (tf TraceFlags) MarshalJSON() ([]byte, error) { + return json.Marshal(tf.String()) +} + +// String returns the hex string representation form of TraceFlags. +func (tf TraceFlags) String() string { + return hex.EncodeToString([]byte{byte(tf)}[:]) +} + +// SpanContextConfig contains mutable fields usable for constructing +// an immutable SpanContext. +type SpanContextConfig struct { + TraceID TraceID + SpanID SpanID + TraceFlags TraceFlags + TraceState TraceState + Remote bool +} + +// NewSpanContext constructs a SpanContext using values from the provided +// SpanContextConfig. +func NewSpanContext(config SpanContextConfig) SpanContext { + return SpanContext{ + traceID: config.TraceID, + spanID: config.SpanID, + traceFlags: config.TraceFlags, + traceState: config.TraceState, + remote: config.Remote, + } +} + +// SpanContext contains identifying trace information about a Span. +type SpanContext struct { + traceID TraceID + spanID SpanID + traceFlags TraceFlags + traceState TraceState + remote bool +} + +var _ json.Marshaler = SpanContext{} + +// IsValid returns if the SpanContext is valid. A valid span context has a +// valid TraceID and SpanID. +func (sc SpanContext) IsValid() bool { + return sc.HasTraceID() && sc.HasSpanID() +} + +// IsRemote indicates whether the SpanContext represents a remotely-created Span. +func (sc SpanContext) IsRemote() bool { + return sc.remote +} + +// WithRemote returns a copy of sc with the Remote property set to remote. +func (sc SpanContext) WithRemote(remote bool) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: sc.spanID, + traceFlags: sc.traceFlags, + traceState: sc.traceState, + remote: remote, + } +} + +// TraceID returns the TraceID from the SpanContext. +func (sc SpanContext) TraceID() TraceID { + return sc.traceID +} + +// HasTraceID checks if the SpanContext has a valid TraceID. +func (sc SpanContext) HasTraceID() bool { + return sc.traceID.IsValid() +} + +// WithTraceID returns a new SpanContext with the TraceID replaced. +func (sc SpanContext) WithTraceID(traceID TraceID) SpanContext { + return SpanContext{ + traceID: traceID, + spanID: sc.spanID, + traceFlags: sc.traceFlags, + traceState: sc.traceState, + remote: sc.remote, + } +} + +// SpanID returns the SpanID from the SpanContext. +func (sc SpanContext) SpanID() SpanID { + return sc.spanID +} + +// HasSpanID checks if the SpanContext has a valid SpanID. +func (sc SpanContext) HasSpanID() bool { + return sc.spanID.IsValid() +} + +// WithSpanID returns a new SpanContext with the SpanID replaced. +func (sc SpanContext) WithSpanID(spanID SpanID) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: spanID, + traceFlags: sc.traceFlags, + traceState: sc.traceState, + remote: sc.remote, + } +} + +// TraceFlags returns the flags from the SpanContext. +func (sc SpanContext) TraceFlags() TraceFlags { + return sc.traceFlags +} + +// IsSampled returns if the sampling bit is set in the SpanContext's TraceFlags. +func (sc SpanContext) IsSampled() bool { + return sc.traceFlags.IsSampled() +} + +// WithTraceFlags returns a new SpanContext with the TraceFlags replaced. +func (sc SpanContext) WithTraceFlags(flags TraceFlags) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: sc.spanID, + traceFlags: flags, + traceState: sc.traceState, + remote: sc.remote, + } +} + +// TraceState returns the TraceState from the SpanContext. +func (sc SpanContext) TraceState() TraceState { + return sc.traceState +} + +// WithTraceState returns a new SpanContext with the TraceState replaced. +func (sc SpanContext) WithTraceState(state TraceState) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: sc.spanID, + traceFlags: sc.traceFlags, + traceState: state, + remote: sc.remote, + } +} + +// Equal is a predicate that determines whether two SpanContext values are equal. +func (sc SpanContext) Equal(other SpanContext) bool { + return sc.traceID == other.traceID && + sc.spanID == other.spanID && + sc.traceFlags == other.traceFlags && + sc.traceState.String() == other.traceState.String() && + sc.remote == other.remote +} + +// MarshalJSON implements a custom marshal function to encode a SpanContext. +func (sc SpanContext) MarshalJSON() ([]byte, error) { + return json.Marshal(SpanContextConfig{ + TraceID: sc.traceID, + SpanID: sc.spanID, + TraceFlags: sc.traceFlags, + TraceState: sc.traceState, + Remote: sc.remote, + }) +} + +// Span is the individual component of a trace. It represents a single named +// and timed operation of a workflow that is traced. A Tracer is used to +// create a Span and it is then up to the operation the Span represents to +// properly end the Span when the operation itself ends. +// +// Warning: methods may be added to this interface in minor releases. +type Span interface { + // End completes the Span. The Span is considered complete and ready to be + // delivered through the rest of the telemetry pipeline after this method + // is called. Therefore, updates to the Span are not allowed after this + // method has been called. + End(options ...SpanEndOption) + + // AddEvent adds an event with the provided name and options. + AddEvent(name string, options ...EventOption) + + // IsRecording returns the recording state of the Span. It will return + // true if the Span is active and events can be recorded. + IsRecording() bool + + // RecordError will record err as an exception span event for this span. An + // additional call to SetStatus is required if the Status of the Span should + // be set to Error, as this method does not change the Span status. If this + // span is not being recorded or err is nil then this method does nothing. + RecordError(err error, options ...EventOption) + + // SpanContext returns the SpanContext of the Span. The returned SpanContext + // is usable even after the End method has been called for the Span. + SpanContext() SpanContext + + // SetStatus sets the status of the Span in the form of a code and a + // description, provided the status hasn't already been set to a higher + // value before (OK > Error > Unset). The description is only included in a + // status when the code is for an error. + SetStatus(code codes.Code, description string) + + // SetName sets the Span name. + SetName(name string) + + // SetAttributes sets kv as attributes of the Span. If a key from kv + // already exists for an attribute of the Span it will be overwritten with + // the value contained in kv. + SetAttributes(kv ...attribute.KeyValue) + + // TracerProvider returns a TracerProvider that can be used to generate + // additional Spans on the same telemetry pipeline as the current Span. + TracerProvider() TracerProvider +} + +// Link is the relationship between two Spans. The relationship can be within +// the same Trace or across different Traces. +// +// For example, a Link is used in the following situations: +// +// 1. Batch Processing: A batch of operations may contain operations +// associated with one or more traces/spans. Since there can only be one +// parent SpanContext, a Link is used to keep reference to the +// SpanContext of all operations in the batch. +// 2. Public Endpoint: A SpanContext for an in incoming client request on a +// public endpoint should be considered untrusted. In such a case, a new +// trace with its own identity and sampling decision needs to be created, +// but this new trace needs to be related to the original trace in some +// form. A Link is used to keep reference to the original SpanContext and +// track the relationship. +type Link struct { + // SpanContext of the linked Span. + SpanContext SpanContext + + // Attributes describe the aspects of the link. + Attributes []attribute.KeyValue +} + +// LinkFromContext returns a link encapsulating the SpanContext in the provided ctx. +func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { + return Link{ + SpanContext: SpanContextFromContext(ctx), + Attributes: attrs, + } +} + +// SpanKind is the role a Span plays in a Trace. +type SpanKind int + +// As a convenience, these match the proto definition, see +// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 +// +// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` +// to coerce a span kind to a valid value. +const ( + // SpanKindUnspecified is an unspecified SpanKind and is not a valid + // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal + // if it is received. + SpanKindUnspecified SpanKind = 0 + // SpanKindInternal is a SpanKind for a Span that represents an internal + // operation within an application. + SpanKindInternal SpanKind = 1 + // SpanKindServer is a SpanKind for a Span that represents the operation + // of handling a request from a client. + SpanKindServer SpanKind = 2 + // SpanKindClient is a SpanKind for a Span that represents the operation + // of client making a request to a server. + SpanKindClient SpanKind = 3 + // SpanKindProducer is a SpanKind for a Span that represents the operation + // of a producer sending a message to a message broker. Unlike + // SpanKindClient and SpanKindServer, there is often no direct + // relationship between this kind of Span and a SpanKindConsumer kind. A + // SpanKindProducer Span will end once the message is accepted by the + // message broker which might not overlap with the processing of that + // message. + SpanKindProducer SpanKind = 4 + // SpanKindConsumer is a SpanKind for a Span that represents the operation + // of a consumer receiving a message from a message broker. Like + // SpanKindProducer Spans, there is often no direct relationship between + // this Span and the Span that produced the message. + SpanKindConsumer SpanKind = 5 +) + +// ValidateSpanKind returns a valid span kind value. This will coerce +// invalid values into the default value, SpanKindInternal. +func ValidateSpanKind(spanKind SpanKind) SpanKind { + switch spanKind { + case SpanKindInternal, + SpanKindServer, + SpanKindClient, + SpanKindProducer, + SpanKindConsumer: + // valid + return spanKind + default: + return SpanKindInternal + } +} + +// String returns the specified name of the SpanKind in lower-case. +func (sk SpanKind) String() string { + switch sk { + case SpanKindInternal: + return "internal" + case SpanKindServer: + return "server" + case SpanKindClient: + return "client" + case SpanKindProducer: + return "producer" + case SpanKindConsumer: + return "consumer" + default: + return "unspecified" + } +} + +// Tracer is the creator of Spans. +// +// Warning: methods may be added to this interface in minor releases. +type Tracer interface { + // Start creates a span and a context.Context containing the newly-created span. + // + // If the context.Context provided in `ctx` contains a Span then the newly-created + // Span will be a child of that span, otherwise it will be a root span. This behavior + // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the + // newly-created Span to be a root span even if `ctx` contains a Span. + // + // When creating a Span it is recommended to provide all known span attributes using + // the `WithAttributes()` SpanOption as samplers will only have access to the + // attributes provided when a Span is created. + // + // Any Span that is created MUST also be ended. This is the responsibility of the user. + // Implementations of this API may leak memory or other resources if Spans are not ended. + Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) +} + +// TracerProvider provides Tracers that are used by instrumentation code to +// trace computational workflows. +// +// A TracerProvider is the collection destination of all Spans from Tracers it +// provides, it represents a unique telemetry collection pipeline. How that +// pipeline is defined, meaning how those Spans are collected, processed, and +// where they are exported, depends on its implementation. Instrumentation +// authors do not need to define this implementation, rather just use the +// provided Tracers to instrument code. +// +// Commonly, instrumentation code will accept a TracerProvider implementation +// at runtime from its users or it can simply use the globally registered one +// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). +// +// Warning: methods may be added to this interface in minor releases. +type TracerProvider interface { + // Tracer returns a unique Tracer scoped to be used by instrumentation code + // to trace computational workflows. The scope and identity of that + // instrumentation code is uniquely defined by the name and options passed. + // + // The passed name needs to uniquely identify instrumentation code. + // Therefore, it is recommended that name is the Go package name of the + // library providing instrumentation (note: not the code being + // instrumented). Instrumentation libraries can have multiple versions, + // therefore, the WithInstrumentationVersion option should be used to + // distinguish these different codebases. Additionally, instrumentation + // libraries may sometimes use traces to communicate different domains of + // workflow data (i.e. using spans to communicate workflow events only). If + // this is the case, the WithScopeAttributes option should be used to + // uniquely identify Tracers that handle the different domains of workflow + // data. + // + // If the same name and options are passed multiple times, the same Tracer + // will be returned (it is up to the implementation if this will be the + // same underlying instance of that Tracer or not). It is not necessary to + // call this multiple times with the same name and options to get an + // up-to-date Tracer. All implementations will ensure any TracerProvider + // configuration changes are propagated to all provided Tracers. + // + // If name is empty, then an implementation defined default name will be + // used instead. + // + // This method is safe to call concurrently. + Tracer(name string, options ...TracerOption) Tracer +} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go new file mode 100644 index 00000000..ca68a82e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -0,0 +1,212 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" +) + +const ( + maxListMembers = 32 + + listDelimiter = "," + + // based on the W3C Trace Context specification, see + // https://www.w3.org/TR/trace-context-1/#tracestate-header + noTenantKeyFormat = `[a-z][_0-9a-z\-\*\/]{0,255}` + withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}` + valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]` + + errInvalidKey errorConst = "invalid tracestate key" + errInvalidValue errorConst = "invalid tracestate value" + errInvalidMember errorConst = "invalid tracestate list-member" + errMemberNumber errorConst = "too many list-members in tracestate" + errDuplicate errorConst = "duplicate list-member in tracestate" +) + +var ( + keyRe = regexp.MustCompile(`^((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))$`) + valueRe = regexp.MustCompile(`^(` + valueFormat + `)$`) + memberRe = regexp.MustCompile(`^\s*((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`) +) + +type member struct { + Key string + Value string +} + +func newMember(key, value string) (member, error) { + if !keyRe.MatchString(key) { + return member{}, fmt.Errorf("%w: %s", errInvalidKey, key) + } + if !valueRe.MatchString(value) { + return member{}, fmt.Errorf("%w: %s", errInvalidValue, value) + } + return member{Key: key, Value: value}, nil +} + +func parseMember(m string) (member, error) { + matches := memberRe.FindStringSubmatch(m) + if len(matches) != 5 { + return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) + } + + return member{ + Key: matches[1], + Value: matches[4], + }, nil +} + +// String encodes member into a string compliant with the W3C Trace Context +// specification. +func (m member) String() string { + return fmt.Sprintf("%s=%s", m.Key, m.Value) +} + +// TraceState provides additional vendor-specific trace identification +// information across different distributed tracing systems. It represents an +// immutable list consisting of key/value pairs, each pair is referred to as a +// list-member. +// +// TraceState conforms to the W3C Trace Context specification +// (https://www.w3.org/TR/trace-context-1). All operations that create or copy +// a TraceState do so by validating all input and will only produce TraceState +// that conform to the specification. Specifically, this means that all +// list-member's key/value pairs are valid, no duplicate list-members exist, +// and the maximum number of list-members (32) is not exceeded. +type TraceState struct { //nolint:revive // revive complains about stutter of `trace.TraceState` + // list is the members in order. + list []member +} + +var _ json.Marshaler = TraceState{} + +// ParseTraceState attempts to decode a TraceState from the passed +// string. It returns an error if the input is invalid according to the W3C +// Trace Context specification. +func ParseTraceState(tracestate string) (TraceState, error) { + if tracestate == "" { + return TraceState{}, nil + } + + wrapErr := func(err error) error { + return fmt.Errorf("failed to parse tracestate: %w", err) + } + + var members []member + found := make(map[string]struct{}) + for _, memberStr := range strings.Split(tracestate, listDelimiter) { + if len(memberStr) == 0 { + continue + } + + m, err := parseMember(memberStr) + if err != nil { + return TraceState{}, wrapErr(err) + } + + if _, ok := found[m.Key]; ok { + return TraceState{}, wrapErr(errDuplicate) + } + found[m.Key] = struct{}{} + + members = append(members, m) + if n := len(members); n > maxListMembers { + return TraceState{}, wrapErr(errMemberNumber) + } + } + + return TraceState{list: members}, nil +} + +// MarshalJSON marshals the TraceState into JSON. +func (ts TraceState) MarshalJSON() ([]byte, error) { + return json.Marshal(ts.String()) +} + +// String encodes the TraceState into a string compliant with the W3C +// Trace Context specification. The returned string will be invalid if the +// TraceState contains any invalid members. +func (ts TraceState) String() string { + members := make([]string, len(ts.list)) + for i, m := range ts.list { + members[i] = m.String() + } + return strings.Join(members, listDelimiter) +} + +// Get returns the value paired with key from the corresponding TraceState +// list-member if it exists, otherwise an empty string is returned. +func (ts TraceState) Get(key string) string { + for _, member := range ts.list { + if member.Key == key { + return member.Value + } + } + + return "" +} + +// Insert adds a new list-member defined by the key/value pair to the +// TraceState. If a list-member already exists for the given key, that +// list-member's value is updated. The new or updated list-member is always +// moved to the beginning of the TraceState as specified by the W3C Trace +// Context specification. +// +// If key or value are invalid according to the W3C Trace Context +// specification an error is returned with the original TraceState. +// +// If adding a new list-member means the TraceState would have more members +// then is allowed, the new list-member will be inserted and the right-most +// list-member will be dropped in the returned TraceState. +func (ts TraceState) Insert(key, value string) (TraceState, error) { + m, err := newMember(key, value) + if err != nil { + return ts, err + } + + cTS := ts.Delete(key) + if cTS.Len()+1 <= maxListMembers { + cTS.list = append(cTS.list, member{}) + } + // When the number of members exceeds capacity, drop the "right-most". + copy(cTS.list[1:], cTS.list) + cTS.list[0] = m + + return cTS, nil +} + +// Delete returns a copy of the TraceState with the list-member identified by +// key removed. +func (ts TraceState) Delete(key string) TraceState { + members := make([]member, ts.Len()) + copy(members, ts.list) + for i, member := range ts.list { + if member.Key == key { + members = append(members[:i], members[i+1:]...) + // TraceState should contain no duplicate members. + break + } + } + return TraceState{list: members} +} + +// Len returns the number of list-members in the TraceState. +func (ts TraceState) Len() int { + return len(ts.list) +} diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go new file mode 100644 index 00000000..0e8e5e02 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +// Version is the current release version of OpenTelemetry in use. +func Version() string { + return "1.14.0" +} diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml new file mode 100644 index 00000000..40df1fae --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -0,0 +1,57 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +module-sets: + stable-v1: + version: v1.14.0 + modules: + - go.opentelemetry.io/otel + - go.opentelemetry.io/otel/bridge/opentracing + - go.opentelemetry.io/otel/bridge/opentracing/test + - go.opentelemetry.io/otel/example/fib + - go.opentelemetry.io/otel/example/jaeger + - go.opentelemetry.io/otel/example/namedtracer + - go.opentelemetry.io/otel/example/otel-collector + - go.opentelemetry.io/otel/example/passthrough + - go.opentelemetry.io/otel/example/zipkin + - go.opentelemetry.io/otel/exporters/jaeger + - go.opentelemetry.io/otel/exporters/zipkin + - go.opentelemetry.io/otel/exporters/otlp/otlptrace + - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc + - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp + - go.opentelemetry.io/otel/exporters/otlp/internal/retry + - go.opentelemetry.io/otel/exporters/stdout/stdouttrace + - go.opentelemetry.io/otel/trace + - go.opentelemetry.io/otel/sdk + experimental-metrics: + version: v0.37.0 + modules: + - go.opentelemetry.io/otel/example/opencensus + - go.opentelemetry.io/otel/example/prometheus + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp + - go.opentelemetry.io/otel/exporters/prometheus + - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric + - go.opentelemetry.io/otel/metric + - go.opentelemetry.io/otel/sdk/metric + - go.opentelemetry.io/otel/bridge/opencensus + - go.opentelemetry.io/otel/bridge/opencensus/test + - go.opentelemetry.io/otel/example/view + experimental-schema: + version: v0.0.4 + modules: + - go.opentelemetry.io/otel/schema +excluded-modules: + - go.opentelemetry.io/otel/internal/tools diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go new file mode 100644 index 00000000..30f632c5 --- /dev/null +++ b/vendor/golang.org/x/sync/semaphore/semaphore.go @@ -0,0 +1,136 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semaphore provides a weighted semaphore implementation. +package semaphore // import "golang.org/x/sync/semaphore" + +import ( + "container/list" + "context" + "sync" +) + +type waiter struct { + n int64 + ready chan<- struct{} // Closed when semaphore acquired. +} + +// NewWeighted creates a new weighted semaphore with the given +// maximum combined weight for concurrent access. +func NewWeighted(n int64) *Weighted { + w := &Weighted{size: n} + return w +} + +// Weighted provides a way to bound concurrent access to a resource. +// The callers can request access with a given weight. +type Weighted struct { + size int64 + cur int64 + mu sync.Mutex + waiters list.List +} + +// Acquire acquires the semaphore with a weight of n, blocking until resources +// are available or ctx is done. On success, returns nil. On failure, returns +// ctx.Err() and leaves the semaphore unchanged. +// +// If ctx is already done, Acquire may still succeed without blocking. +func (s *Weighted) Acquire(ctx context.Context, n int64) error { + s.mu.Lock() + if s.size-s.cur >= n && s.waiters.Len() == 0 { + s.cur += n + s.mu.Unlock() + return nil + } + + if n > s.size { + // Don't make other Acquire calls block on one that's doomed to fail. + s.mu.Unlock() + <-ctx.Done() + return ctx.Err() + } + + ready := make(chan struct{}) + w := waiter{n: n, ready: ready} + elem := s.waiters.PushBack(w) + s.mu.Unlock() + + select { + case <-ctx.Done(): + err := ctx.Err() + s.mu.Lock() + select { + case <-ready: + // Acquired the semaphore after we were canceled. Rather than trying to + // fix up the queue, just pretend we didn't notice the cancelation. + err = nil + default: + isFront := s.waiters.Front() == elem + s.waiters.Remove(elem) + // If we're at the front and there're extra tokens left, notify other waiters. + if isFront && s.size > s.cur { + s.notifyWaiters() + } + } + s.mu.Unlock() + return err + + case <-ready: + return nil + } +} + +// TryAcquire acquires the semaphore with a weight of n without blocking. +// On success, returns true. On failure, returns false and leaves the semaphore unchanged. +func (s *Weighted) TryAcquire(n int64) bool { + s.mu.Lock() + success := s.size-s.cur >= n && s.waiters.Len() == 0 + if success { + s.cur += n + } + s.mu.Unlock() + return success +} + +// Release releases the semaphore with a weight of n. +func (s *Weighted) Release(n int64) { + s.mu.Lock() + s.cur -= n + if s.cur < 0 { + s.mu.Unlock() + panic("semaphore: released more than held") + } + s.notifyWaiters() + s.mu.Unlock() +} + +func (s *Weighted) notifyWaiters() { + for { + next := s.waiters.Front() + if next == nil { + break // No more waiters blocked. + } + + w := next.Value.(waiter) + if s.size-s.cur < w.n { + // Not enough tokens for the next waiter. We could keep going (to try to + // find a waiter with a smaller request), but under load that could cause + // starvation for large requests; instead, we leave all remaining waiters + // blocked. + // + // Consider a semaphore used as a read-write lock, with N tokens, N + // readers, and one writer. Each reader can Acquire(1) to obtain a read + // lock. The writer can Acquire(N) to obtain a write lock, excluding all + // of the readers. If we allow the readers to jump ahead in the queue, + // the writer will starve — there is always one token available for every + // reader. + break + } + + s.cur += w.n + s.waiters.Remove(next) + close(w.ready) + } +} diff --git a/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/golang.org/x/sys/windows/registry/key.go new file mode 100644 index 00000000..6c8d97b6 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/key.go @@ -0,0 +1,206 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows +// +build windows + +// Package registry provides access to the Windows registry. +// +// Here is a simple example, opening a registry key and reading a string value from it. +// +// k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) +// if err != nil { +// log.Fatal(err) +// } +// defer k.Close() +// +// s, _, err := k.GetStringValue("SystemRoot") +// if err != nil { +// log.Fatal(err) +// } +// fmt.Printf("Windows system root is %q\n", s) +package registry + +import ( + "io" + "runtime" + "syscall" + "time" +) + +const ( + // Registry key security and access rights. + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms724878.aspx + // for details. + ALL_ACCESS = 0xf003f + CREATE_LINK = 0x00020 + CREATE_SUB_KEY = 0x00004 + ENUMERATE_SUB_KEYS = 0x00008 + EXECUTE = 0x20019 + NOTIFY = 0x00010 + QUERY_VALUE = 0x00001 + READ = 0x20019 + SET_VALUE = 0x00002 + WOW64_32KEY = 0x00200 + WOW64_64KEY = 0x00100 + WRITE = 0x20006 +) + +// Key is a handle to an open Windows registry key. +// Keys can be obtained by calling OpenKey; there are +// also some predefined root keys such as CURRENT_USER. +// Keys can be used directly in the Windows API. +type Key syscall.Handle + +const ( + // Windows defines some predefined root keys that are always open. + // An application can use these keys as entry points to the registry. + // Normally these keys are used in OpenKey to open new keys, + // but they can also be used anywhere a Key is required. + CLASSES_ROOT = Key(syscall.HKEY_CLASSES_ROOT) + CURRENT_USER = Key(syscall.HKEY_CURRENT_USER) + LOCAL_MACHINE = Key(syscall.HKEY_LOCAL_MACHINE) + USERS = Key(syscall.HKEY_USERS) + CURRENT_CONFIG = Key(syscall.HKEY_CURRENT_CONFIG) + PERFORMANCE_DATA = Key(syscall.HKEY_PERFORMANCE_DATA) +) + +// Close closes open key k. +func (k Key) Close() error { + return syscall.RegCloseKey(syscall.Handle(k)) +} + +// OpenKey opens a new key with path name relative to key k. +// It accepts any open key, including CURRENT_USER and others, +// and returns the new key and an error. +// The access parameter specifies desired access rights to the +// key to be opened. +func OpenKey(k Key, path string, access uint32) (Key, error) { + p, err := syscall.UTF16PtrFromString(path) + if err != nil { + return 0, err + } + var subkey syscall.Handle + err = syscall.RegOpenKeyEx(syscall.Handle(k), p, 0, access, &subkey) + if err != nil { + return 0, err + } + return Key(subkey), nil +} + +// OpenRemoteKey opens a predefined registry key on another +// computer pcname. The key to be opened is specified by k, but +// can only be one of LOCAL_MACHINE, PERFORMANCE_DATA or USERS. +// If pcname is "", OpenRemoteKey returns local computer key. +func OpenRemoteKey(pcname string, k Key) (Key, error) { + var err error + var p *uint16 + if pcname != "" { + p, err = syscall.UTF16PtrFromString(`\\` + pcname) + if err != nil { + return 0, err + } + } + var remoteKey syscall.Handle + err = regConnectRegistry(p, syscall.Handle(k), &remoteKey) + if err != nil { + return 0, err + } + return Key(remoteKey), nil +} + +// ReadSubKeyNames returns the names of subkeys of key k. +// The parameter n controls the number of returned names, +// analogous to the way os.File.Readdirnames works. +func (k Key) ReadSubKeyNames(n int) ([]string, error) { + // RegEnumKeyEx must be called repeatedly and to completion. + // During this time, this goroutine cannot migrate away from + // its current thread. See https://golang.org/issue/49320 and + // https://golang.org/issue/49466. + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + names := make([]string, 0) + // Registry key size limit is 255 bytes and described there: + // https://msdn.microsoft.com/library/windows/desktop/ms724872.aspx + buf := make([]uint16, 256) //plus extra room for terminating zero byte +loopItems: + for i := uint32(0); ; i++ { + if n > 0 { + if len(names) == n { + return names, nil + } + } + l := uint32(len(buf)) + for { + err := syscall.RegEnumKeyEx(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) + if err == nil { + break + } + if err == syscall.ERROR_MORE_DATA { + // Double buffer size and try again. + l = uint32(2 * len(buf)) + buf = make([]uint16, l) + continue + } + if err == _ERROR_NO_MORE_ITEMS { + break loopItems + } + return names, err + } + names = append(names, syscall.UTF16ToString(buf[:l])) + } + if n > len(names) { + return names, io.EOF + } + return names, nil +} + +// CreateKey creates a key named path under open key k. +// CreateKey returns the new key and a boolean flag that reports +// whether the key already existed. +// The access parameter specifies the access rights for the key +// to be created. +func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool, err error) { + var h syscall.Handle + var d uint32 + err = regCreateKeyEx(syscall.Handle(k), syscall.StringToUTF16Ptr(path), + 0, nil, _REG_OPTION_NON_VOLATILE, access, nil, &h, &d) + if err != nil { + return 0, false, err + } + return Key(h), d == _REG_OPENED_EXISTING_KEY, nil +} + +// DeleteKey deletes the subkey path of key k and its values. +func DeleteKey(k Key, path string) error { + return regDeleteKey(syscall.Handle(k), syscall.StringToUTF16Ptr(path)) +} + +// A KeyInfo describes the statistics of a key. It is returned by Stat. +type KeyInfo struct { + SubKeyCount uint32 + MaxSubKeyLen uint32 // size of the key's subkey with the longest name, in Unicode characters, not including the terminating zero byte + ValueCount uint32 + MaxValueNameLen uint32 // size of the key's longest value name, in Unicode characters, not including the terminating zero byte + MaxValueLen uint32 // longest data component among the key's values, in bytes + lastWriteTime syscall.Filetime +} + +// ModTime returns the key's last write time. +func (ki *KeyInfo) ModTime() time.Time { + return time.Unix(0, ki.lastWriteTime.Nanoseconds()) +} + +// Stat retrieves information about the open key k. +func (k Key) Stat() (*KeyInfo, error) { + var ki KeyInfo + err := syscall.RegQueryInfoKey(syscall.Handle(k), nil, nil, nil, + &ki.SubKeyCount, &ki.MaxSubKeyLen, nil, &ki.ValueCount, + &ki.MaxValueNameLen, &ki.MaxValueLen, nil, &ki.lastWriteTime) + if err != nil { + return nil, err + } + return &ki, nil +} diff --git a/vendor/golang.org/x/sys/windows/registry/mksyscall.go b/vendor/golang.org/x/sys/windows/registry/mksyscall.go new file mode 100644 index 00000000..ee74927d --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/mksyscall.go @@ -0,0 +1,10 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build generate +// +build generate + +package registry + +//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go syscall.go diff --git a/vendor/golang.org/x/sys/windows/registry/syscall.go b/vendor/golang.org/x/sys/windows/registry/syscall.go new file mode 100644 index 00000000..41733512 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/syscall.go @@ -0,0 +1,33 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows +// +build windows + +package registry + +import "syscall" + +const ( + _REG_OPTION_NON_VOLATILE = 0 + + _REG_CREATED_NEW_KEY = 1 + _REG_OPENED_EXISTING_KEY = 2 + + _ERROR_NO_MORE_ITEMS syscall.Errno = 259 +) + +func LoadRegLoadMUIString() error { + return procRegLoadMUIStringW.Find() +} + +//sys regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW +//sys regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) = advapi32.RegDeleteKeyW +//sys regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) = advapi32.RegSetValueExW +//sys regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegEnumValueW +//sys regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) = advapi32.RegDeleteValueW +//sys regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) = advapi32.RegLoadMUIStringW +//sys regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) = advapi32.RegConnectRegistryW + +//sys expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) = kernel32.ExpandEnvironmentStringsW diff --git a/vendor/golang.org/x/sys/windows/registry/value.go b/vendor/golang.org/x/sys/windows/registry/value.go new file mode 100644 index 00000000..2789f6f1 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/value.go @@ -0,0 +1,387 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows +// +build windows + +package registry + +import ( + "errors" + "io" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + // Registry value types. + NONE = 0 + SZ = 1 + EXPAND_SZ = 2 + BINARY = 3 + DWORD = 4 + DWORD_BIG_ENDIAN = 5 + LINK = 6 + MULTI_SZ = 7 + RESOURCE_LIST = 8 + FULL_RESOURCE_DESCRIPTOR = 9 + RESOURCE_REQUIREMENTS_LIST = 10 + QWORD = 11 +) + +var ( + // ErrShortBuffer is returned when the buffer was too short for the operation. + ErrShortBuffer = syscall.ERROR_MORE_DATA + + // ErrNotExist is returned when a registry key or value does not exist. + ErrNotExist = syscall.ERROR_FILE_NOT_FOUND + + // ErrUnexpectedType is returned by Get*Value when the value's type was unexpected. + ErrUnexpectedType = errors.New("unexpected key value type") +) + +// GetValue retrieves the type and data for the specified value associated +// with an open key k. It fills up buffer buf and returns the retrieved +// byte count n. If buf is too small to fit the stored value it returns +// ErrShortBuffer error along with the required buffer size n. +// If no buffer is provided, it returns true and actual buffer size n. +// If no buffer is provided, GetValue returns the value's type only. +// If the value does not exist, the error returned is ErrNotExist. +// +// GetValue is a low level function. If value's type is known, use the appropriate +// Get*Value function instead. +func (k Key) GetValue(name string, buf []byte) (n int, valtype uint32, err error) { + pname, err := syscall.UTF16PtrFromString(name) + if err != nil { + return 0, 0, err + } + var pbuf *byte + if len(buf) > 0 { + pbuf = (*byte)(unsafe.Pointer(&buf[0])) + } + l := uint32(len(buf)) + err = syscall.RegQueryValueEx(syscall.Handle(k), pname, nil, &valtype, pbuf, &l) + if err != nil { + return int(l), valtype, err + } + return int(l), valtype, nil +} + +func (k Key) getValue(name string, buf []byte) (data []byte, valtype uint32, err error) { + p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return nil, 0, err + } + var t uint32 + n := uint32(len(buf)) + for { + err = syscall.RegQueryValueEx(syscall.Handle(k), p, nil, &t, (*byte)(unsafe.Pointer(&buf[0])), &n) + if err == nil { + return buf[:n], t, nil + } + if err != syscall.ERROR_MORE_DATA { + return nil, 0, err + } + if n <= uint32(len(buf)) { + return nil, 0, err + } + buf = make([]byte, n) + } +} + +// GetStringValue retrieves the string value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetStringValue returns ErrNotExist. +// If value is not SZ or EXPAND_SZ, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetStringValue(name string) (val string, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return "", typ, err2 + } + switch typ { + case SZ, EXPAND_SZ: + default: + return "", typ, ErrUnexpectedType + } + if len(data) == 0 { + return "", typ, nil + } + u := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2] + return syscall.UTF16ToString(u), typ, nil +} + +// GetMUIStringValue retrieves the localized string value for +// the specified value name associated with an open key k. +// If the value name doesn't exist or the localized string value +// can't be resolved, GetMUIStringValue returns ErrNotExist. +// GetMUIStringValue panics if the system doesn't support +// regLoadMUIString; use LoadRegLoadMUIString to check if +// regLoadMUIString is supported before calling this function. +func (k Key) GetMUIStringValue(name string) (string, error) { + pname, err := syscall.UTF16PtrFromString(name) + if err != nil { + return "", err + } + + buf := make([]uint16, 1024) + var buflen uint32 + var pdir *uint16 + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + if err == syscall.ERROR_FILE_NOT_FOUND { // Try fallback path + + // Try to resolve the string value using the system directory as + // a DLL search path; this assumes the string value is of the form + // @[path]\dllname,-strID but with no path given, e.g. @tzres.dll,-320. + + // This approach works with tzres.dll but may have to be revised + // in the future to allow callers to provide custom search paths. + + var s string + s, err = ExpandString("%SystemRoot%\\system32\\") + if err != nil { + return "", err + } + pdir, err = syscall.UTF16PtrFromString(s) + if err != nil { + return "", err + } + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + for err == syscall.ERROR_MORE_DATA { // Grow buffer if needed + if buflen <= uint32(len(buf)) { + break // Buffer not growing, assume race; break + } + buf = make([]uint16, buflen) + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + if err != nil { + return "", err + } + + return syscall.UTF16ToString(buf), nil +} + +// ExpandString expands environment-variable strings and replaces +// them with the values defined for the current user. +// Use ExpandString to expand EXPAND_SZ strings. +func ExpandString(value string) (string, error) { + if value == "" { + return "", nil + } + p, err := syscall.UTF16PtrFromString(value) + if err != nil { + return "", err + } + r := make([]uint16, 100) + for { + n, err := expandEnvironmentStrings(p, &r[0], uint32(len(r))) + if err != nil { + return "", err + } + if n <= uint32(len(r)) { + return syscall.UTF16ToString(r[:n]), nil + } + r = make([]uint16, n) + } +} + +// GetStringsValue retrieves the []string value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetStringsValue returns ErrNotExist. +// If value is not MULTI_SZ, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetStringsValue(name string) (val []string, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return nil, typ, err2 + } + if typ != MULTI_SZ { + return nil, typ, ErrUnexpectedType + } + if len(data) == 0 { + return nil, typ, nil + } + p := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2] + if len(p) == 0 { + return nil, typ, nil + } + if p[len(p)-1] == 0 { + p = p[:len(p)-1] // remove terminating null + } + val = make([]string, 0, 5) + from := 0 + for i, c := range p { + if c == 0 { + val = append(val, string(utf16.Decode(p[from:i]))) + from = i + 1 + } + } + return val, typ, nil +} + +// GetIntegerValue retrieves the integer value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetIntegerValue returns ErrNotExist. +// If value is not DWORD or QWORD, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetIntegerValue(name string) (val uint64, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 8)) + if err2 != nil { + return 0, typ, err2 + } + switch typ { + case DWORD: + if len(data) != 4 { + return 0, typ, errors.New("DWORD value is not 4 bytes long") + } + var val32 uint32 + copy((*[4]byte)(unsafe.Pointer(&val32))[:], data) + return uint64(val32), DWORD, nil + case QWORD: + if len(data) != 8 { + return 0, typ, errors.New("QWORD value is not 8 bytes long") + } + copy((*[8]byte)(unsafe.Pointer(&val))[:], data) + return val, QWORD, nil + default: + return 0, typ, ErrUnexpectedType + } +} + +// GetBinaryValue retrieves the binary value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetBinaryValue returns ErrNotExist. +// If value is not BINARY, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetBinaryValue(name string) (val []byte, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return nil, typ, err2 + } + if typ != BINARY { + return nil, typ, ErrUnexpectedType + } + return data, typ, nil +} + +func (k Key) setValue(name string, valtype uint32, data []byte) error { + p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return err + } + if len(data) == 0 { + return regSetValueEx(syscall.Handle(k), p, 0, valtype, nil, 0) + } + return regSetValueEx(syscall.Handle(k), p, 0, valtype, &data[0], uint32(len(data))) +} + +// SetDWordValue sets the data and type of a name value +// under key k to value and DWORD. +func (k Key) SetDWordValue(name string, value uint32) error { + return k.setValue(name, DWORD, (*[4]byte)(unsafe.Pointer(&value))[:]) +} + +// SetQWordValue sets the data and type of a name value +// under key k to value and QWORD. +func (k Key) SetQWordValue(name string, value uint64) error { + return k.setValue(name, QWORD, (*[8]byte)(unsafe.Pointer(&value))[:]) +} + +func (k Key) setStringValue(name string, valtype uint32, value string) error { + v, err := syscall.UTF16FromString(value) + if err != nil { + return err + } + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2] + return k.setValue(name, valtype, buf) +} + +// SetStringValue sets the data and type of a name value +// under key k to value and SZ. The value must not contain a zero byte. +func (k Key) SetStringValue(name, value string) error { + return k.setStringValue(name, SZ, value) +} + +// SetExpandStringValue sets the data and type of a name value +// under key k to value and EXPAND_SZ. The value must not contain a zero byte. +func (k Key) SetExpandStringValue(name, value string) error { + return k.setStringValue(name, EXPAND_SZ, value) +} + +// SetStringsValue sets the data and type of a name value +// under key k to value and MULTI_SZ. The value strings +// must not contain a zero byte. +func (k Key) SetStringsValue(name string, value []string) error { + ss := "" + for _, s := range value { + for i := 0; i < len(s); i++ { + if s[i] == 0 { + return errors.New("string cannot have 0 inside") + } + } + ss += s + "\x00" + } + v := utf16.Encode([]rune(ss + "\x00")) + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2] + return k.setValue(name, MULTI_SZ, buf) +} + +// SetBinaryValue sets the data and type of a name value +// under key k to value and BINARY. +func (k Key) SetBinaryValue(name string, value []byte) error { + return k.setValue(name, BINARY, value) +} + +// DeleteValue removes a named value from the key k. +func (k Key) DeleteValue(name string) error { + return regDeleteValue(syscall.Handle(k), syscall.StringToUTF16Ptr(name)) +} + +// ReadValueNames returns the value names of key k. +// The parameter n controls the number of returned names, +// analogous to the way os.File.Readdirnames works. +func (k Key) ReadValueNames(n int) ([]string, error) { + ki, err := k.Stat() + if err != nil { + return nil, err + } + names := make([]string, 0, ki.ValueCount) + buf := make([]uint16, ki.MaxValueNameLen+1) // extra room for terminating null character +loopItems: + for i := uint32(0); ; i++ { + if n > 0 { + if len(names) == n { + return names, nil + } + } + l := uint32(len(buf)) + for { + err := regEnumValue(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) + if err == nil { + break + } + if err == syscall.ERROR_MORE_DATA { + // Double buffer size and try again. + l = uint32(2 * len(buf)) + buf = make([]uint16, l) + continue + } + if err == _ERROR_NO_MORE_ITEMS { + break loopItems + } + return names, err + } + names = append(names, syscall.UTF16ToString(buf[:l])) + } + if n > len(names) { + return names, io.EOF + } + return names, nil +} diff --git a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go new file mode 100644 index 00000000..fc1835d8 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go @@ -0,0 +1,117 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package registry + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procRegConnectRegistryW = modadvapi32.NewProc("RegConnectRegistryW") + procRegCreateKeyExW = modadvapi32.NewProc("RegCreateKeyExW") + procRegDeleteKeyW = modadvapi32.NewProc("RegDeleteKeyW") + procRegDeleteValueW = modadvapi32.NewProc("RegDeleteValueW") + procRegEnumValueW = modadvapi32.NewProc("RegEnumValueW") + procRegLoadMUIStringW = modadvapi32.NewProc("RegLoadMUIStringW") + procRegSetValueExW = modadvapi32.NewProc("RegSetValueExW") + procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") +) + +func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegConnectRegistryW.Addr(), 3, uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteKeyW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(subkey)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteValueW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(name)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegLoadMUIStringW.Addr(), 7, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)), 0, 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) { + r0, _, _ := syscall.Syscall6(procRegSetValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize)) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/modules.txt b/vendor/modules.txt index ddba095a..a41494fc 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -57,6 +57,12 @@ github.com/clbanning/mxj # github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f ## explicit github.com/dgryski/go-rendezvous +# github.com/fatih/color v1.15.0 +## explicit; go 1.17 +github.com/fatih/color +# github.com/fsnotify/fsnotify v1.6.0 +## explicit; go 1.16 +github.com/fsnotify/fsnotify # github.com/gin-contrib/sse v0.1.0 ## explicit; go 1.12 github.com/gin-contrib/sse @@ -67,6 +73,16 @@ github.com/gin-gonic/gin/binding github.com/gin-gonic/gin/internal/bytesconv github.com/gin-gonic/gin/internal/json github.com/gin-gonic/gin/render +# github.com/go-co-op/gocron v1.19.0 +## explicit; go 1.19 +github.com/go-co-op/gocron +# github.com/go-logr/logr v1.2.3 +## explicit; go 1.16 +github.com/go-logr/logr +github.com/go-logr/logr/funcr +# github.com/go-logr/stdr v1.2.2 +## explicit; go 1.16 +github.com/go-logr/stdr # github.com/go-ole/go-ole v1.2.6 ## explicit; go 1.12 github.com/go-ole/go-ole @@ -99,6 +115,56 @@ github.com/goccy/go-json/internal/encoder/vm_color_indent github.com/goccy/go-json/internal/encoder/vm_indent github.com/goccy/go-json/internal/errors github.com/goccy/go-json/internal/runtime +# github.com/gogf/gf/v2 v2.3.3 +## explicit; go 1.15 +github.com/gogf/gf/v2/container/garray +github.com/gogf/gf/v2/container/glist +github.com/gogf/gf/v2/container/gmap +github.com/gogf/gf/v2/container/gpool +github.com/gogf/gf/v2/container/gqueue +github.com/gogf/gf/v2/container/gset +github.com/gogf/gf/v2/container/gtree +github.com/gogf/gf/v2/container/gtype +github.com/gogf/gf/v2/container/gvar +github.com/gogf/gf/v2/database/gredis +github.com/gogf/gf/v2/debug/gdebug +github.com/gogf/gf/v2/encoding/gbinary +github.com/gogf/gf/v2/encoding/gcompress +github.com/gogf/gf/v2/encoding/ghash +github.com/gogf/gf/v2/errors/gcode +github.com/gogf/gf/v2/errors/gerror +github.com/gogf/gf/v2/internal/command +github.com/gogf/gf/v2/internal/consts +github.com/gogf/gf/v2/internal/deepcopy +github.com/gogf/gf/v2/internal/empty +github.com/gogf/gf/v2/internal/intlog +github.com/gogf/gf/v2/internal/json +github.com/gogf/gf/v2/internal/reflection +github.com/gogf/gf/v2/internal/rwmutex +github.com/gogf/gf/v2/internal/tracing +github.com/gogf/gf/v2/internal/utils +github.com/gogf/gf/v2/net/gipv4 +github.com/gogf/gf/v2/net/gtrace +github.com/gogf/gf/v2/net/gtrace/internal/provider +github.com/gogf/gf/v2/os/gcache +github.com/gogf/gf/v2/os/gcron +github.com/gogf/gf/v2/os/gctx +github.com/gogf/gf/v2/os/gfile +github.com/gogf/gf/v2/os/gfpool +github.com/gogf/gf/v2/os/gfsnotify +github.com/gogf/gf/v2/os/glog +github.com/gogf/gf/v2/os/gmlock +github.com/gogf/gf/v2/os/gmutex +github.com/gogf/gf/v2/os/grpool +github.com/gogf/gf/v2/os/gstructs +github.com/gogf/gf/v2/os/gtime +github.com/gogf/gf/v2/os/gtimer +github.com/gogf/gf/v2/text/gregex +github.com/gogf/gf/v2/text/gstr +github.com/gogf/gf/v2/util/gconv +github.com/gogf/gf/v2/util/grand +github.com/gogf/gf/v2/util/gtag +github.com/gogf/gf/v2/util/gutil # github.com/golang/snappy v0.0.4 ## explicit github.com/golang/snappy @@ -154,6 +220,9 @@ github.com/leodido/go-urn github.com/lib/pq github.com/lib/pq/oid github.com/lib/pq/scram +# github.com/mattn/go-colorable v0.1.13 +## explicit; go 1.15 +github.com/mattn/go-colorable # github.com/mattn/go-isatty v0.0.18 ## explicit; go 1.15 github.com/mattn/go-isatty @@ -346,6 +415,30 @@ go.mongodb.org/mongo-driver/x/mongo/driver/operation go.mongodb.org/mongo-driver/x/mongo/driver/session go.mongodb.org/mongo-driver/x/mongo/driver/topology go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage +# go.opentelemetry.io/otel v1.14.0 +## explicit; go 1.18 +go.opentelemetry.io/otel +go.opentelemetry.io/otel/attribute +go.opentelemetry.io/otel/baggage +go.opentelemetry.io/otel/codes +go.opentelemetry.io/otel/internal +go.opentelemetry.io/otel/internal/attribute +go.opentelemetry.io/otel/internal/baggage +go.opentelemetry.io/otel/internal/global +go.opentelemetry.io/otel/propagation +go.opentelemetry.io/otel/semconv/internal +go.opentelemetry.io/otel/semconv/v1.17.0 +go.opentelemetry.io/otel/semconv/v1.4.0 +# go.opentelemetry.io/otel/sdk v1.14.0 +## explicit; go 1.18 +go.opentelemetry.io/otel/sdk/instrumentation +go.opentelemetry.io/otel/sdk/internal +go.opentelemetry.io/otel/sdk/internal/env +go.opentelemetry.io/otel/sdk/resource +go.opentelemetry.io/otel/sdk/trace +# go.opentelemetry.io/otel/trace v1.14.0 +## explicit; go 1.18 +go.opentelemetry.io/otel/trace # go.uber.org/atomic v1.10.0 ## explicit; go 1.18 go.uber.org/atomic @@ -397,6 +490,7 @@ golang.org/x/net/idna # golang.org/x/sync v0.1.0 ## explicit golang.org/x/sync/errgroup +golang.org/x/sync/semaphore golang.org/x/sync/singleflight # golang.org/x/sys v0.6.0 ## explicit; go 1.17 @@ -405,6 +499,7 @@ golang.org/x/sys/execabs golang.org/x/sys/internal/unsafeheader golang.org/x/sys/unix golang.org/x/sys/windows +golang.org/x/sys/windows/registry # golang.org/x/text v0.8.0 ## explicit; go 1.17 golang.org/x/text/cases